1/* tc-arm.c -- Assemble for the ARM
2   Copyright (C) 1994-2020 Free Software Foundation, Inc.
3   Contributed by Richard Earnshaw (rwe@pegasus.esprit.ec.org)
4	Modified by David Taylor (dtaylor@armltd.co.uk)
5	Cirrus coprocessor mods by Aldy Hernandez (aldyh@redhat.com)
6	Cirrus coprocessor fixes by Petko Manolov (petkan@nucleusys.com)
7	Cirrus coprocessor fixes by Vladimir Ivanov (vladitx@nucleusys.com)
8
9   This file is part of GAS, the GNU Assembler.
10
11   GAS is free software; you can redistribute it and/or modify
12   it under the terms of the GNU General Public License as published by
13   the Free Software Foundation; either version 3, or (at your option)
14   any later version.
15
16   GAS is distributed in the hope that it will be useful,
17   but WITHOUT ANY WARRANTY; without even the implied warranty of
18   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.	 See the
19   GNU General Public License for more details.
20
21   You should have received a copy of the GNU General Public License
22   along with GAS; see the file COPYING.  If not, write to the Free
23   Software Foundation, 51 Franklin Street - Fifth Floor, Boston, MA
24   02110-1301, USA.  */
25
26#include "as.h"
27#include <limits.h>
28#include <stdarg.h>
29#define	 NO_RELOC 0
30#include "safe-ctype.h"
31#include "subsegs.h"
32#include "obstack.h"
33#include "libiberty.h"
34#include "opcode/arm.h"
35#include "cpu-arm.h"
36
37#ifdef OBJ_ELF
38#include "elf/arm.h"
39#include "dw2gencfi.h"
40#endif
41
42#include "dwarf2dbg.h"
43
44#ifdef OBJ_ELF
45/* Must be at least the size of the largest unwind opcode (currently two).  */
46#define ARM_OPCODE_CHUNK_SIZE 8
47
48/* This structure holds the unwinding state.  */
49
50static struct
51{
52  symbolS *	  proc_start;
53  symbolS *	  table_entry;
54  symbolS *	  personality_routine;
55  int		  personality_index;
56  /* The segment containing the function.  */
57  segT		  saved_seg;
58  subsegT	  saved_subseg;
59  /* Opcodes generated from this function.  */
60  unsigned char * opcodes;
61  int		  opcode_count;
62  int		  opcode_alloc;
63  /* The number of bytes pushed to the stack.  */
64  offsetT	  frame_size;
65  /* We don't add stack adjustment opcodes immediately so that we can merge
66     multiple adjustments.  We can also omit the final adjustment
67     when using a frame pointer.  */
68  offsetT	  pending_offset;
69  /* These two fields are set by both unwind_movsp and unwind_setfp.  They
70     hold the reg+offset to use when restoring sp from a frame pointer.	 */
71  offsetT	  fp_offset;
72  int		  fp_reg;
73  /* Nonzero if an unwind_setfp directive has been seen.  */
74  unsigned	  fp_used:1;
75  /* Nonzero if the last opcode restores sp from fp_reg.  */
76  unsigned	  sp_restored:1;
77} unwind;
78
79/* Whether --fdpic was given.  */
80static int arm_fdpic;
81
82#endif /* OBJ_ELF */
83
84/* Results from operand parsing worker functions.  */
85
86typedef enum
87{
88  PARSE_OPERAND_SUCCESS,
89  PARSE_OPERAND_FAIL,
90  PARSE_OPERAND_FAIL_NO_BACKTRACK
91} parse_operand_result;
92
93enum arm_float_abi
94{
95  ARM_FLOAT_ABI_HARD,
96  ARM_FLOAT_ABI_SOFTFP,
97  ARM_FLOAT_ABI_SOFT
98};
99
100/* Types of processor to assemble for.	*/
101#ifndef CPU_DEFAULT
102/* The code that was here used to select a default CPU depending on compiler
103   pre-defines which were only present when doing native builds, thus
104   changing gas' default behaviour depending upon the build host.
105
106   If you have a target that requires a default CPU option then the you
107   should define CPU_DEFAULT here.  */
108#endif
109
110/* Perform range checks on positive and negative overflows by checking if the
111   VALUE given fits within the range of an BITS sized immediate.  */
112static bfd_boolean out_of_range_p (offsetT value, offsetT bits)
113 {
114  gas_assert (bits < (offsetT)(sizeof (value) * 8));
115  return (value & ~((1 << bits)-1))
116	  && ((value & ~((1 << bits)-1)) != ~((1 << bits)-1));
117}
118
119#ifndef FPU_DEFAULT
120# ifdef TE_LINUX
121#  define FPU_DEFAULT FPU_ARCH_FPA
122# elif defined (TE_NetBSD)
123#  ifdef OBJ_ELF
124#   define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, but VFP order.  */
125#  else
126    /* Legacy a.out format.  */
127#   define FPU_DEFAULT FPU_ARCH_FPA	/* Soft-float, but FPA order.  */
128#  endif
129# elif defined (TE_VXWORKS)
130#  define FPU_DEFAULT FPU_ARCH_VFP	/* Soft-float, VFP order.  */
131# else
132   /* For backwards compatibility, default to FPA.  */
133#  define FPU_DEFAULT FPU_ARCH_FPA
134# endif
135#endif /* ifndef FPU_DEFAULT */
136
137#define streq(a, b)	      (strcmp (a, b) == 0)
138
139/* Current set of feature bits available (CPU+FPU).  Different from
140   selected_cpu + selected_fpu in case of autodetection since the CPU
141   feature bits are then all set.  */
142static arm_feature_set cpu_variant;
143/* Feature bits used in each execution state.  Used to set build attribute
144   (in particular Tag_*_ISA_use) in CPU autodetection mode.  */
145static arm_feature_set arm_arch_used;
146static arm_feature_set thumb_arch_used;
147
148/* Flags stored in private area of BFD structure.  */
149static int uses_apcs_26	     = FALSE;
150static int atpcs	     = FALSE;
151static int support_interwork = FALSE;
152static int uses_apcs_float   = FALSE;
153static int pic_code	     = FALSE;
154static int fix_v4bx	     = FALSE;
155/* Warn on using deprecated features.  */
156static int warn_on_deprecated = TRUE;
157static int warn_on_restrict_it = FALSE;
158
159/* Understand CodeComposer Studio assembly syntax.  */
160bfd_boolean codecomposer_syntax = FALSE;
161
162/* Variables that we set while parsing command-line options.  Once all
163   options have been read we re-process these values to set the real
164   assembly flags.  */
165
166/* CPU and FPU feature bits set for legacy CPU and FPU options (eg. -marm1
167   instead of -mcpu=arm1).  */
168static const arm_feature_set *legacy_cpu = NULL;
169static const arm_feature_set *legacy_fpu = NULL;
170
171/* CPU, extension and FPU feature bits selected by -mcpu.  */
172static const arm_feature_set *mcpu_cpu_opt = NULL;
173static arm_feature_set *mcpu_ext_opt = NULL;
174static const arm_feature_set *mcpu_fpu_opt = NULL;
175
176/* CPU, extension and FPU feature bits selected by -march.  */
177static const arm_feature_set *march_cpu_opt = NULL;
178static arm_feature_set *march_ext_opt = NULL;
179static const arm_feature_set *march_fpu_opt = NULL;
180
181/* Feature bits selected by -mfpu.  */
182static const arm_feature_set *mfpu_opt = NULL;
183
184/* Constants for known architecture features.  */
185static const arm_feature_set fpu_default = FPU_DEFAULT;
186static const arm_feature_set fpu_arch_vfp_v1 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V1;
187static const arm_feature_set fpu_arch_vfp_v2 = FPU_ARCH_VFP_V2;
188static const arm_feature_set fpu_arch_vfp_v3 ATTRIBUTE_UNUSED = FPU_ARCH_VFP_V3;
189static const arm_feature_set fpu_arch_neon_v1 ATTRIBUTE_UNUSED = FPU_ARCH_NEON_V1;
190static const arm_feature_set fpu_arch_fpa = FPU_ARCH_FPA;
191static const arm_feature_set fpu_any_hard = FPU_ANY_HARD;
192#ifdef OBJ_ELF
193static const arm_feature_set fpu_arch_maverick = FPU_ARCH_MAVERICK;
194#endif
195static const arm_feature_set fpu_endian_pure = FPU_ARCH_ENDIAN_PURE;
196
197#ifdef CPU_DEFAULT
198static const arm_feature_set cpu_default = CPU_DEFAULT;
199#endif
200
201static const arm_feature_set arm_ext_v1 = ARM_FEATURE_CORE_LOW (ARM_EXT_V1);
202static const arm_feature_set arm_ext_v2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V2);
203static const arm_feature_set arm_ext_v2s = ARM_FEATURE_CORE_LOW (ARM_EXT_V2S);
204static const arm_feature_set arm_ext_v3 = ARM_FEATURE_CORE_LOW (ARM_EXT_V3);
205static const arm_feature_set arm_ext_v3m = ARM_FEATURE_CORE_LOW (ARM_EXT_V3M);
206static const arm_feature_set arm_ext_v4 = ARM_FEATURE_CORE_LOW (ARM_EXT_V4);
207static const arm_feature_set arm_ext_v4t = ARM_FEATURE_CORE_LOW (ARM_EXT_V4T);
208static const arm_feature_set arm_ext_v5 = ARM_FEATURE_CORE_LOW (ARM_EXT_V5);
209static const arm_feature_set arm_ext_v4t_5 =
210  ARM_FEATURE_CORE_LOW (ARM_EXT_V4T | ARM_EXT_V5);
211static const arm_feature_set arm_ext_v5t = ARM_FEATURE_CORE_LOW (ARM_EXT_V5T);
212static const arm_feature_set arm_ext_v5e = ARM_FEATURE_CORE_LOW (ARM_EXT_V5E);
213static const arm_feature_set arm_ext_v5exp = ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP);
214static const arm_feature_set arm_ext_v5j = ARM_FEATURE_CORE_LOW (ARM_EXT_V5J);
215static const arm_feature_set arm_ext_v6 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6);
216static const arm_feature_set arm_ext_v6k = ARM_FEATURE_CORE_LOW (ARM_EXT_V6K);
217static const arm_feature_set arm_ext_v6t2 = ARM_FEATURE_CORE_LOW (ARM_EXT_V6T2);
218/* Only for compatability of hint instructions.  */
219static const arm_feature_set arm_ext_v6k_v6t2 =
220  ARM_FEATURE_CORE_LOW (ARM_EXT_V6K | ARM_EXT_V6T2);
221static const arm_feature_set arm_ext_v6_notm =
222  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_NOTM);
223static const arm_feature_set arm_ext_v6_dsp =
224  ARM_FEATURE_CORE_LOW (ARM_EXT_V6_DSP);
225static const arm_feature_set arm_ext_barrier =
226  ARM_FEATURE_CORE_LOW (ARM_EXT_BARRIER);
227static const arm_feature_set arm_ext_msr =
228  ARM_FEATURE_CORE_LOW (ARM_EXT_THUMB_MSR);
229static const arm_feature_set arm_ext_div = ARM_FEATURE_CORE_LOW (ARM_EXT_DIV);
230static const arm_feature_set arm_ext_v7 = ARM_FEATURE_CORE_LOW (ARM_EXT_V7);
231static const arm_feature_set arm_ext_v7a = ARM_FEATURE_CORE_LOW (ARM_EXT_V7A);
232static const arm_feature_set arm_ext_v7r = ARM_FEATURE_CORE_LOW (ARM_EXT_V7R);
233static const arm_feature_set arm_ext_v8r = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8R);
234#ifdef OBJ_ELF
235static const arm_feature_set ATTRIBUTE_UNUSED arm_ext_v7m = ARM_FEATURE_CORE_LOW (ARM_EXT_V7M);
236#endif
237static const arm_feature_set arm_ext_v8 = ARM_FEATURE_CORE_LOW (ARM_EXT_V8);
238static const arm_feature_set arm_ext_m =
239  ARM_FEATURE_CORE (ARM_EXT_V6M | ARM_EXT_V7M,
240		    ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
241static const arm_feature_set arm_ext_mp = ARM_FEATURE_CORE_LOW (ARM_EXT_MP);
242static const arm_feature_set arm_ext_sec = ARM_FEATURE_CORE_LOW (ARM_EXT_SEC);
243static const arm_feature_set arm_ext_os = ARM_FEATURE_CORE_LOW (ARM_EXT_OS);
244static const arm_feature_set arm_ext_adiv = ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV);
245static const arm_feature_set arm_ext_virt = ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT);
246static const arm_feature_set arm_ext_pan = ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN);
247static const arm_feature_set arm_ext_v8m = ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M);
248static const arm_feature_set arm_ext_v8m_main =
249  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M_MAIN);
250static const arm_feature_set arm_ext_v8_1m_main =
251ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_1M_MAIN);
252/* Instructions in ARMv8-M only found in M profile architectures.  */
253static const arm_feature_set arm_ext_v8m_m_only =
254  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8M | ARM_EXT2_V8M_MAIN);
255static const arm_feature_set arm_ext_v6t2_v8m =
256  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V6T2_V8M);
257/* Instructions shared between ARMv8-A and ARMv8-M.  */
258static const arm_feature_set arm_ext_atomics =
259  ARM_FEATURE_CORE_HIGH (ARM_EXT2_ATOMICS);
260#ifdef OBJ_ELF
261/* DSP instructions Tag_DSP_extension refers to.  */
262static const arm_feature_set arm_ext_dsp =
263  ARM_FEATURE_CORE_LOW (ARM_EXT_V5E | ARM_EXT_V5ExP | ARM_EXT_V6_DSP);
264#endif
265static const arm_feature_set arm_ext_ras =
266  ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS);
267/* FP16 instructions.  */
268static const arm_feature_set arm_ext_fp16 =
269  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST);
270static const arm_feature_set arm_ext_fp16_fml =
271  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_FML);
272static const arm_feature_set arm_ext_v8_2 =
273  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_2A);
274static const arm_feature_set arm_ext_v8_3 =
275  ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8_3A);
276static const arm_feature_set arm_ext_sb =
277  ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB);
278static const arm_feature_set arm_ext_predres =
279  ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES);
280static const arm_feature_set arm_ext_bf16 =
281  ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16);
282static const arm_feature_set arm_ext_i8mm =
283  ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM);
284static const arm_feature_set arm_ext_crc =
285  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC);
286static const arm_feature_set arm_ext_cde =
287  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE);
288static const arm_feature_set arm_ext_cde0 =
289  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE0);
290static const arm_feature_set arm_ext_cde1 =
291  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE1);
292static const arm_feature_set arm_ext_cde2 =
293  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE2);
294static const arm_feature_set arm_ext_cde3 =
295  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE3);
296static const arm_feature_set arm_ext_cde4 =
297  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE4);
298static const arm_feature_set arm_ext_cde5 =
299  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE5);
300static const arm_feature_set arm_ext_cde6 =
301  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE6);
302static const arm_feature_set arm_ext_cde7 =
303  ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE7);
304
305static const arm_feature_set arm_arch_any = ARM_ANY;
306static const arm_feature_set fpu_any = FPU_ANY;
307static const arm_feature_set arm_arch_full ATTRIBUTE_UNUSED = ARM_FEATURE (-1, -1, -1);
308static const arm_feature_set arm_arch_t2 = ARM_ARCH_THUMB2;
309static const arm_feature_set arm_arch_none = ARM_ARCH_NONE;
310
311static const arm_feature_set arm_cext_iwmmxt2 =
312  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2);
313static const arm_feature_set arm_cext_iwmmxt =
314  ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT);
315static const arm_feature_set arm_cext_xscale =
316  ARM_FEATURE_COPROC (ARM_CEXT_XSCALE);
317static const arm_feature_set arm_cext_maverick =
318  ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK);
319static const arm_feature_set fpu_fpa_ext_v1 =
320  ARM_FEATURE_COPROC (FPU_FPA_EXT_V1);
321static const arm_feature_set fpu_fpa_ext_v2 =
322  ARM_FEATURE_COPROC (FPU_FPA_EXT_V2);
323static const arm_feature_set fpu_vfp_ext_v1xd =
324  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1xD);
325static const arm_feature_set fpu_vfp_ext_v1 =
326  ARM_FEATURE_COPROC (FPU_VFP_EXT_V1);
327static const arm_feature_set fpu_vfp_ext_v2 =
328  ARM_FEATURE_COPROC (FPU_VFP_EXT_V2);
329static const arm_feature_set fpu_vfp_ext_v3xd =
330  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3xD);
331static const arm_feature_set fpu_vfp_ext_v3 =
332  ARM_FEATURE_COPROC (FPU_VFP_EXT_V3);
333static const arm_feature_set fpu_vfp_ext_d32 =
334  ARM_FEATURE_COPROC (FPU_VFP_EXT_D32);
335static const arm_feature_set fpu_neon_ext_v1 =
336  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1);
337static const arm_feature_set fpu_vfp_v3_or_neon_ext =
338  ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_VFP_EXT_V3);
339static const arm_feature_set mve_ext =
340  ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE);
341static const arm_feature_set mve_fp_ext =
342  ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE_FP);
343/* Note: This has more than one bit set, which means using it with
344   mark_feature_used (which returns if *any* of the bits are set in the current
345   cpu variant) can give surprising results.  */
346static const arm_feature_set armv8m_fp =
347  ARM_FEATURE_COPROC (FPU_VFP_V5_SP_D16);
348#ifdef OBJ_ELF
349static const arm_feature_set fpu_vfp_fp16 =
350  ARM_FEATURE_COPROC (FPU_VFP_EXT_FP16);
351static const arm_feature_set fpu_neon_ext_fma =
352  ARM_FEATURE_COPROC (FPU_NEON_EXT_FMA);
353#endif
354static const arm_feature_set fpu_vfp_ext_fma =
355  ARM_FEATURE_COPROC (FPU_VFP_EXT_FMA);
356static const arm_feature_set fpu_vfp_ext_armv8 =
357  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8);
358static const arm_feature_set fpu_vfp_ext_armv8xd =
359  ARM_FEATURE_COPROC (FPU_VFP_EXT_ARMV8xD);
360static const arm_feature_set fpu_neon_ext_armv8 =
361  ARM_FEATURE_COPROC (FPU_NEON_EXT_ARMV8);
362static const arm_feature_set fpu_crypto_ext_armv8 =
363  ARM_FEATURE_COPROC (FPU_CRYPTO_EXT_ARMV8);
364static const arm_feature_set fpu_neon_ext_v8_1 =
365  ARM_FEATURE_COPROC (FPU_NEON_EXT_RDMA);
366static const arm_feature_set fpu_neon_ext_dotprod =
367  ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD);
368
369static int mfloat_abi_opt = -1;
370/* Architecture feature bits selected by the last -mcpu/-march or .cpu/.arch
371   directive.  */
372static arm_feature_set selected_arch = ARM_ARCH_NONE;
373/* Extension feature bits selected by the last -mcpu/-march or .arch_extension
374   directive.  */
375static arm_feature_set selected_ext = ARM_ARCH_NONE;
376/* Feature bits selected by the last -mcpu/-march or by the combination of the
377   last .cpu/.arch directive .arch_extension directives since that
378   directive.  */
379static arm_feature_set selected_cpu = ARM_ARCH_NONE;
380/* FPU feature bits selected by the last -mfpu or .fpu directive.  */
381static arm_feature_set selected_fpu = FPU_NONE;
382/* Feature bits selected by the last .object_arch directive.  */
383static arm_feature_set selected_object_arch = ARM_ARCH_NONE;
384/* Must be long enough to hold any of the names in arm_cpus.  */
385static const struct arm_ext_table * selected_ctx_ext_table = NULL;
386static char selected_cpu_name[20];
387
388extern FLONUM_TYPE generic_floating_point_number;
389
390/* Return if no cpu was selected on command-line.  */
391static bfd_boolean
392no_cpu_selected (void)
393{
394  return ARM_FEATURE_EQUAL (selected_cpu, arm_arch_none);
395}
396
397#ifdef OBJ_ELF
398# ifdef EABI_DEFAULT
399static int meabi_flags = EABI_DEFAULT;
400# else
401static int meabi_flags = EF_ARM_EABI_UNKNOWN;
402# endif
403
404static int attributes_set_explicitly[NUM_KNOWN_OBJ_ATTRIBUTES];
405
406bfd_boolean
407arm_is_eabi (void)
408{
409  return (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4);
410}
411#endif
412
413#ifdef OBJ_ELF
414/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
415symbolS * GOT_symbol;
416#endif
417
418/* 0: assemble for ARM,
419   1: assemble for Thumb,
420   2: assemble for Thumb even though target CPU does not support thumb
421      instructions.  */
422static int thumb_mode = 0;
423/* A value distinct from the possible values for thumb_mode that we
424   can use to record whether thumb_mode has been copied into the
425   tc_frag_data field of a frag.  */
426#define MODE_RECORDED (1 << 4)
427
428/* Specifies the intrinsic IT insn behavior mode.  */
429enum implicit_it_mode
430{
431  IMPLICIT_IT_MODE_NEVER  = 0x00,
432  IMPLICIT_IT_MODE_ARM    = 0x01,
433  IMPLICIT_IT_MODE_THUMB  = 0x02,
434  IMPLICIT_IT_MODE_ALWAYS = (IMPLICIT_IT_MODE_ARM | IMPLICIT_IT_MODE_THUMB)
435};
436static int implicit_it_mode = IMPLICIT_IT_MODE_ARM;
437
438/* If unified_syntax is true, we are processing the new unified
439   ARM/Thumb syntax.  Important differences from the old ARM mode:
440
441     - Immediate operands do not require a # prefix.
442     - Conditional affixes always appear at the end of the
443       instruction.  (For backward compatibility, those instructions
444       that formerly had them in the middle, continue to accept them
445       there.)
446     - The IT instruction may appear, and if it does is validated
447       against subsequent conditional affixes.  It does not generate
448       machine code.
449
450   Important differences from the old Thumb mode:
451
452     - Immediate operands do not require a # prefix.
453     - Most of the V6T2 instructions are only available in unified mode.
454     - The .N and .W suffixes are recognized and honored (it is an error
455       if they cannot be honored).
456     - All instructions set the flags if and only if they have an 's' affix.
457     - Conditional affixes may be used.  They are validated against
458       preceding IT instructions.  Unlike ARM mode, you cannot use a
459       conditional affix except in the scope of an IT instruction.  */
460
461static bfd_boolean unified_syntax = FALSE;
462
463/* An immediate operand can start with #, and ld*, st*, pld operands
464   can contain [ and ].  We need to tell APP not to elide whitespace
465   before a [, which can appear as the first operand for pld.
466   Likewise, a { can appear as the first operand for push, pop, vld*, etc.  */
467const char arm_symbol_chars[] = "#[]{}";
468
469enum neon_el_type
470{
471  NT_invtype,
472  NT_untyped,
473  NT_integer,
474  NT_float,
475  NT_poly,
476  NT_signed,
477  NT_bfloat,
478  NT_unsigned
479};
480
481struct neon_type_el
482{
483  enum neon_el_type type;
484  unsigned size;
485};
486
487#define NEON_MAX_TYPE_ELS 5
488
489struct neon_type
490{
491  struct neon_type_el el[NEON_MAX_TYPE_ELS];
492  unsigned elems;
493};
494
495enum pred_instruction_type
496{
497   OUTSIDE_PRED_INSN,
498   INSIDE_VPT_INSN,
499   INSIDE_IT_INSN,
500   INSIDE_IT_LAST_INSN,
501   IF_INSIDE_IT_LAST_INSN, /* Either outside or inside;
502			      if inside, should be the last one.  */
503   NEUTRAL_IT_INSN,        /* This could be either inside or outside,
504			      i.e. BKPT and NOP.  */
505   IT_INSN,		   /* The IT insn has been parsed.  */
506   VPT_INSN,		   /* The VPT/VPST insn has been parsed.  */
507   MVE_OUTSIDE_PRED_INSN , /* Instruction to indicate a MVE instruction without
508			      a predication code.  */
509   MVE_UNPREDICABLE_INSN,  /* MVE instruction that is non-predicable.  */
510};
511
512/* The maximum number of operands we need.  */
513#define ARM_IT_MAX_OPERANDS 6
514#define ARM_IT_MAX_RELOCS 3
515
516struct arm_it
517{
518  const char *	error;
519  unsigned long instruction;
520  unsigned int	size;
521  unsigned int	size_req;
522  unsigned int	cond;
523  /* "uncond_value" is set to the value in place of the conditional field in
524     unconditional versions of the instruction, or -1u if nothing is
525     appropriate.  */
526  unsigned int	uncond_value;
527  struct neon_type vectype;
528  /* This does not indicate an actual NEON instruction, only that
529     the mnemonic accepts neon-style type suffixes.  */
530  int		is_neon;
531  /* Set to the opcode if the instruction needs relaxation.
532     Zero if the instruction is not relaxed.  */
533  unsigned long	relax;
534  struct
535  {
536    bfd_reloc_code_real_type type;
537    expressionS		     exp;
538    int			     pc_rel;
539  } relocs[ARM_IT_MAX_RELOCS];
540
541  enum pred_instruction_type pred_insn_type;
542
543  struct
544  {
545    unsigned reg;
546    signed int imm;
547    struct neon_type_el vectype;
548    unsigned present	: 1;  /* Operand present.  */
549    unsigned isreg	: 1;  /* Operand was a register.  */
550    unsigned immisreg	: 2;  /* .imm field is a second register.
551				 0: imm, 1: gpr, 2: MVE Q-register.  */
552    unsigned isscalar   : 2;  /* Operand is a (SIMD) scalar:
553				 0) not scalar,
554				 1) Neon scalar,
555				 2) MVE scalar.  */
556    unsigned immisalign : 1;  /* Immediate is an alignment specifier.  */
557    unsigned immisfloat : 1;  /* Immediate was parsed as a float.  */
558    /* Note: we abuse "regisimm" to mean "is Neon register" in VMOV
559       instructions. This allows us to disambiguate ARM <-> vector insns.  */
560    unsigned regisimm   : 1;  /* 64-bit immediate, reg forms high 32 bits.  */
561    unsigned isvec      : 1;  /* Is a single, double or quad VFP/Neon reg.  */
562    unsigned isquad     : 1;  /* Operand is SIMD quad register.  */
563    unsigned issingle   : 1;  /* Operand is VFP single-precision register.  */
564    unsigned iszr	: 1;  /* Operand is ZR register.  */
565    unsigned hasreloc	: 1;  /* Operand has relocation suffix.  */
566    unsigned writeback	: 1;  /* Operand has trailing !  */
567    unsigned preind	: 1;  /* Preindexed address.  */
568    unsigned postind	: 1;  /* Postindexed address.  */
569    unsigned negative	: 1;  /* Index register was negated.  */
570    unsigned shifted	: 1;  /* Shift applied to operation.  */
571    unsigned shift_kind : 3;  /* Shift operation (enum shift_kind).  */
572  } operands[ARM_IT_MAX_OPERANDS];
573};
574
575static struct arm_it inst;
576
577#define NUM_FLOAT_VALS 8
578
579const char * fp_const[] =
580{
581  "0.0", "1.0", "2.0", "3.0", "4.0", "5.0", "0.5", "10.0", 0
582};
583
584LITTLENUM_TYPE fp_values[NUM_FLOAT_VALS][MAX_LITTLENUMS];
585
586#define FAIL	(-1)
587#define SUCCESS (0)
588
589#define SUFF_S 1
590#define SUFF_D 2
591#define SUFF_E 3
592#define SUFF_P 4
593
594#define CP_T_X	 0x00008000
595#define CP_T_Y	 0x00400000
596
597#define CONDS_BIT	 0x00100000
598#define LOAD_BIT	 0x00100000
599
600#define DOUBLE_LOAD_FLAG 0x00000001
601
602struct asm_cond
603{
604  const char *	 template_name;
605  unsigned long  value;
606};
607
608#define COND_ALWAYS 0xE
609
610struct asm_psr
611{
612  const char *   template_name;
613  unsigned long  field;
614};
615
616struct asm_barrier_opt
617{
618  const char *    template_name;
619  unsigned long   value;
620  const arm_feature_set arch;
621};
622
623/* The bit that distinguishes CPSR and SPSR.  */
624#define SPSR_BIT   (1 << 22)
625
626/* The individual PSR flag bits.  */
627#define PSR_c	(1 << 16)
628#define PSR_x	(1 << 17)
629#define PSR_s	(1 << 18)
630#define PSR_f	(1 << 19)
631
632struct reloc_entry
633{
634  const char *              name;
635  bfd_reloc_code_real_type  reloc;
636};
637
638enum vfp_reg_pos
639{
640  VFP_REG_Sd, VFP_REG_Sm, VFP_REG_Sn,
641  VFP_REG_Dd, VFP_REG_Dm, VFP_REG_Dn
642};
643
644enum vfp_ldstm_type
645{
646  VFP_LDSTMIA, VFP_LDSTMDB, VFP_LDSTMIAX, VFP_LDSTMDBX
647};
648
649/* Bits for DEFINED field in neon_typed_alias.  */
650#define NTA_HASTYPE  1
651#define NTA_HASINDEX 2
652
653struct neon_typed_alias
654{
655  unsigned char        defined;
656  unsigned char        index;
657  struct neon_type_el  eltype;
658};
659
660/* ARM register categories.  This includes coprocessor numbers and various
661   architecture extensions' registers.  Each entry should have an error message
662   in reg_expected_msgs below.  */
663enum arm_reg_type
664{
665  REG_TYPE_RN,
666  REG_TYPE_CP,
667  REG_TYPE_CN,
668  REG_TYPE_FN,
669  REG_TYPE_VFS,
670  REG_TYPE_VFD,
671  REG_TYPE_NQ,
672  REG_TYPE_VFSD,
673  REG_TYPE_NDQ,
674  REG_TYPE_NSD,
675  REG_TYPE_NSDQ,
676  REG_TYPE_VFC,
677  REG_TYPE_MVF,
678  REG_TYPE_MVD,
679  REG_TYPE_MVFX,
680  REG_TYPE_MVDX,
681  REG_TYPE_MVAX,
682  REG_TYPE_MQ,
683  REG_TYPE_DSPSC,
684  REG_TYPE_MMXWR,
685  REG_TYPE_MMXWC,
686  REG_TYPE_MMXWCG,
687  REG_TYPE_XSCALE,
688  REG_TYPE_RNB,
689  REG_TYPE_ZR
690};
691
692/* Structure for a hash table entry for a register.
693   If TYPE is REG_TYPE_VFD or REG_TYPE_NQ, the NEON field can point to extra
694   information which states whether a vector type or index is specified (for a
695   register alias created with .dn or .qn). Otherwise NEON should be NULL.  */
696struct reg_entry
697{
698  const char *               name;
699  unsigned int               number;
700  unsigned char              type;
701  unsigned char              builtin;
702  struct neon_typed_alias *  neon;
703};
704
705/* Diagnostics used when we don't get a register of the expected type.	*/
706const char * const reg_expected_msgs[] =
707{
708  [REG_TYPE_RN]	    = N_("ARM register expected"),
709  [REG_TYPE_CP]	    = N_("bad or missing co-processor number"),
710  [REG_TYPE_CN]	    = N_("co-processor register expected"),
711  [REG_TYPE_FN]	    = N_("FPA register expected"),
712  [REG_TYPE_VFS]    = N_("VFP single precision register expected"),
713  [REG_TYPE_VFD]    = N_("VFP/Neon double precision register expected"),
714  [REG_TYPE_NQ]	    = N_("Neon quad precision register expected"),
715  [REG_TYPE_VFSD]   = N_("VFP single or double precision register expected"),
716  [REG_TYPE_NDQ]    = N_("Neon double or quad precision register expected"),
717  [REG_TYPE_NSD]    = N_("Neon single or double precision register expected"),
718  [REG_TYPE_NSDQ]   = N_("VFP single, double or Neon quad precision register"
719			 " expected"),
720  [REG_TYPE_VFC]    = N_("VFP system register expected"),
721  [REG_TYPE_MVF]    = N_("Maverick MVF register expected"),
722  [REG_TYPE_MVD]    = N_("Maverick MVD register expected"),
723  [REG_TYPE_MVFX]   = N_("Maverick MVFX register expected"),
724  [REG_TYPE_MVDX]   = N_("Maverick MVDX register expected"),
725  [REG_TYPE_MVAX]   = N_("Maverick MVAX register expected"),
726  [REG_TYPE_DSPSC]  = N_("Maverick DSPSC register expected"),
727  [REG_TYPE_MMXWR]  = N_("iWMMXt data register expected"),
728  [REG_TYPE_MMXWC]  = N_("iWMMXt control register expected"),
729  [REG_TYPE_MMXWCG] = N_("iWMMXt scalar register expected"),
730  [REG_TYPE_XSCALE] = N_("XScale accumulator register expected"),
731  [REG_TYPE_MQ]	    = N_("MVE vector register expected"),
732  [REG_TYPE_RNB]    = ""
733};
734
735/* Some well known registers that we refer to directly elsewhere.  */
736#define REG_R12	12
737#define REG_SP	13
738#define REG_LR	14
739#define REG_PC	15
740
741/* ARM instructions take 4bytes in the object file, Thumb instructions
742   take 2:  */
743#define INSN_SIZE	4
744
745struct asm_opcode
746{
747  /* Basic string to match.  */
748  const char * template_name;
749
750  /* Parameters to instruction.	 */
751  unsigned int operands[8];
752
753  /* Conditional tag - see opcode_lookup.  */
754  unsigned int tag : 4;
755
756  /* Basic instruction code.  */
757  unsigned int avalue;
758
759  /* Thumb-format instruction code.  */
760  unsigned int tvalue;
761
762  /* Which architecture variant provides this instruction.  */
763  const arm_feature_set * avariant;
764  const arm_feature_set * tvariant;
765
766  /* Function to call to encode instruction in ARM format.  */
767  void (* aencode) (void);
768
769  /* Function to call to encode instruction in Thumb format.  */
770  void (* tencode) (void);
771
772  /* Indicates whether this instruction may be vector predicated.  */
773  unsigned int mayBeVecPred : 1;
774};
775
776/* Defines for various bits that we will want to toggle.  */
777#define INST_IMMEDIATE	0x02000000
778#define OFFSET_REG	0x02000000
779#define HWOFFSET_IMM	0x00400000
780#define SHIFT_BY_REG	0x00000010
781#define PRE_INDEX	0x01000000
782#define INDEX_UP	0x00800000
783#define WRITE_BACK	0x00200000
784#define LDM_TYPE_2_OR_3	0x00400000
785#define CPSI_MMOD	0x00020000
786
787#define LITERAL_MASK	0xf000f000
788#define OPCODE_MASK	0xfe1fffff
789#define V4_STR_BIT	0x00000020
790#define VLDR_VMOV_SAME	0x0040f000
791
792#define T2_SUBS_PC_LR	0xf3de8f00
793
794#define DATA_OP_SHIFT	21
795#define SBIT_SHIFT	20
796
797#define T2_OPCODE_MASK	0xfe1fffff
798#define T2_DATA_OP_SHIFT 21
799#define T2_SBIT_SHIFT	 20
800
801#define A_COND_MASK         0xf0000000
802#define A_PUSH_POP_OP_MASK  0x0fff0000
803
804/* Opcodes for pushing/poping registers to/from the stack.  */
805#define A1_OPCODE_PUSH    0x092d0000
806#define A2_OPCODE_PUSH    0x052d0004
807#define A2_OPCODE_POP     0x049d0004
808
809/* Codes to distinguish the arithmetic instructions.  */
810#define OPCODE_AND	0
811#define OPCODE_EOR	1
812#define OPCODE_SUB	2
813#define OPCODE_RSB	3
814#define OPCODE_ADD	4
815#define OPCODE_ADC	5
816#define OPCODE_SBC	6
817#define OPCODE_RSC	7
818#define OPCODE_TST	8
819#define OPCODE_TEQ	9
820#define OPCODE_CMP	10
821#define OPCODE_CMN	11
822#define OPCODE_ORR	12
823#define OPCODE_MOV	13
824#define OPCODE_BIC	14
825#define OPCODE_MVN	15
826
827#define T2_OPCODE_AND	0
828#define T2_OPCODE_BIC	1
829#define T2_OPCODE_ORR	2
830#define T2_OPCODE_ORN	3
831#define T2_OPCODE_EOR	4
832#define T2_OPCODE_ADD	8
833#define T2_OPCODE_ADC	10
834#define T2_OPCODE_SBC	11
835#define T2_OPCODE_SUB	13
836#define T2_OPCODE_RSB	14
837
838#define T_OPCODE_MUL 0x4340
839#define T_OPCODE_TST 0x4200
840#define T_OPCODE_CMN 0x42c0
841#define T_OPCODE_NEG 0x4240
842#define T_OPCODE_MVN 0x43c0
843
844#define T_OPCODE_ADD_R3	0x1800
845#define T_OPCODE_SUB_R3 0x1a00
846#define T_OPCODE_ADD_HI 0x4400
847#define T_OPCODE_ADD_ST 0xb000
848#define T_OPCODE_SUB_ST 0xb080
849#define T_OPCODE_ADD_SP 0xa800
850#define T_OPCODE_ADD_PC 0xa000
851#define T_OPCODE_ADD_I8 0x3000
852#define T_OPCODE_SUB_I8 0x3800
853#define T_OPCODE_ADD_I3 0x1c00
854#define T_OPCODE_SUB_I3 0x1e00
855
856#define T_OPCODE_ASR_R	0x4100
857#define T_OPCODE_LSL_R	0x4080
858#define T_OPCODE_LSR_R	0x40c0
859#define T_OPCODE_ROR_R	0x41c0
860#define T_OPCODE_ASR_I	0x1000
861#define T_OPCODE_LSL_I	0x0000
862#define T_OPCODE_LSR_I	0x0800
863
864#define T_OPCODE_MOV_I8	0x2000
865#define T_OPCODE_CMP_I8 0x2800
866#define T_OPCODE_CMP_LR 0x4280
867#define T_OPCODE_MOV_HR 0x4600
868#define T_OPCODE_CMP_HR 0x4500
869
870#define T_OPCODE_LDR_PC 0x4800
871#define T_OPCODE_LDR_SP 0x9800
872#define T_OPCODE_STR_SP 0x9000
873#define T_OPCODE_LDR_IW 0x6800
874#define T_OPCODE_STR_IW 0x6000
875#define T_OPCODE_LDR_IH 0x8800
876#define T_OPCODE_STR_IH 0x8000
877#define T_OPCODE_LDR_IB 0x7800
878#define T_OPCODE_STR_IB 0x7000
879#define T_OPCODE_LDR_RW 0x5800
880#define T_OPCODE_STR_RW 0x5000
881#define T_OPCODE_LDR_RH 0x5a00
882#define T_OPCODE_STR_RH 0x5200
883#define T_OPCODE_LDR_RB 0x5c00
884#define T_OPCODE_STR_RB 0x5400
885
886#define T_OPCODE_PUSH	0xb400
887#define T_OPCODE_POP	0xbc00
888
889#define T_OPCODE_BRANCH 0xe000
890
891#define THUMB_SIZE	2	/* Size of thumb instruction.  */
892#define THUMB_PP_PC_LR 0x0100
893#define THUMB_LOAD_BIT 0x0800
894#define THUMB2_LOAD_BIT 0x00100000
895
896#define BAD_SYNTAX	_("syntax error")
897#define BAD_ARGS	_("bad arguments to instruction")
898#define BAD_SP          _("r13 not allowed here")
899#define BAD_PC		_("r15 not allowed here")
900#define BAD_ODD		_("Odd register not allowed here")
901#define BAD_EVEN	_("Even register not allowed here")
902#define BAD_COND	_("instruction cannot be conditional")
903#define BAD_OVERLAP	_("registers may not be the same")
904#define BAD_HIREG	_("lo register required")
905#define BAD_THUMB32	_("instruction not supported in Thumb16 mode")
906#define BAD_ADDR_MODE   _("instruction does not accept this addressing mode")
907#define BAD_BRANCH	_("branch must be last instruction in IT block")
908#define BAD_BRANCH_OFF	_("branch out of range or not a multiple of 2")
909#define BAD_NO_VPT	_("instruction not allowed in VPT block")
910#define BAD_NOT_IT	_("instruction not allowed in IT block")
911#define BAD_NOT_VPT	_("instruction missing MVE vector predication code")
912#define BAD_FPU		_("selected FPU does not support instruction")
913#define BAD_OUT_IT 	_("thumb conditional instruction should be in IT block")
914#define BAD_OUT_VPT	\
915	_("vector predicated instruction should be in VPT/VPST block")
916#define BAD_IT_COND	_("incorrect condition in IT block")
917#define BAD_VPT_COND	_("incorrect condition in VPT/VPST block")
918#define BAD_IT_IT 	_("IT falling in the range of a previous IT block")
919#define MISSING_FNSTART	_("missing .fnstart before unwinding directive")
920#define BAD_PC_ADDRESSING \
921	_("cannot use register index with PC-relative addressing")
922#define BAD_PC_WRITEBACK \
923	_("cannot use writeback with PC-relative addressing")
924#define BAD_RANGE	_("branch out of range")
925#define BAD_FP16	_("selected processor does not support fp16 instruction")
926#define BAD_BF16	_("selected processor does not support bf16 instruction")
927#define BAD_CDE	_("selected processor does not support cde instruction")
928#define BAD_CDE_COPROC	_("coprocessor for insn is not enabled for cde")
929#define UNPRED_REG(R)	_("using " R " results in unpredictable behaviour")
930#define THUMB1_RELOC_ONLY  _("relocation valid in thumb1 code only")
931#define MVE_NOT_IT	_("Warning: instruction is UNPREDICTABLE in an IT " \
932			  "block")
933#define MVE_NOT_VPT	_("Warning: instruction is UNPREDICTABLE in a VPT " \
934			  "block")
935#define MVE_BAD_PC	_("Warning: instruction is UNPREDICTABLE with PC" \
936			  " operand")
937#define MVE_BAD_SP	_("Warning: instruction is UNPREDICTABLE with SP" \
938			  " operand")
939#define BAD_SIMD_TYPE	_("bad type in SIMD instruction")
940#define BAD_MVE_AUTO	\
941  _("GAS auto-detection mode and -march=all is deprecated for MVE, please" \
942    " use a valid -march or -mcpu option.")
943#define BAD_MVE_SRCDEST	_("Warning: 32-bit element size and same destination "\
944			  "and source operands makes instruction UNPREDICTABLE")
945#define BAD_EL_TYPE	_("bad element type for instruction")
946#define MVE_BAD_QREG	_("MVE vector register Q[0..7] expected")
947
948static htab_t  arm_ops_hsh;
949static htab_t  arm_cond_hsh;
950static htab_t  arm_vcond_hsh;
951static htab_t  arm_shift_hsh;
952static htab_t  arm_psr_hsh;
953static htab_t  arm_v7m_psr_hsh;
954static htab_t  arm_reg_hsh;
955static htab_t  arm_reloc_hsh;
956static htab_t  arm_barrier_opt_hsh;
957
958/* Stuff needed to resolve the label ambiguity
959   As:
960     ...
961     label:   <insn>
962   may differ from:
963     ...
964     label:
965	      <insn>  */
966
967symbolS *  last_label_seen;
968static int label_is_thumb_function_name = FALSE;
969
970/* Literal pool structure.  Held on a per-section
971   and per-sub-section basis.  */
972
973#define MAX_LITERAL_POOL_SIZE 1024
974typedef struct literal_pool
975{
976  expressionS	         literals [MAX_LITERAL_POOL_SIZE];
977  unsigned int	         next_free_entry;
978  unsigned int	         id;
979  symbolS *	         symbol;
980  segT		         section;
981  subsegT	         sub_section;
982#ifdef OBJ_ELF
983  struct dwarf2_line_info locs [MAX_LITERAL_POOL_SIZE];
984#endif
985  struct literal_pool *  next;
986  unsigned int		 alignment;
987} literal_pool;
988
989/* Pointer to a linked list of literal pools.  */
990literal_pool * list_of_pools = NULL;
991
992typedef enum asmfunc_states
993{
994  OUTSIDE_ASMFUNC,
995  WAITING_ASMFUNC_NAME,
996  WAITING_ENDASMFUNC
997} asmfunc_states;
998
999static asmfunc_states asmfunc_state = OUTSIDE_ASMFUNC;
1000
1001#ifdef OBJ_ELF
1002#  define now_pred seg_info (now_seg)->tc_segment_info_data.current_pred
1003#else
1004static struct current_pred now_pred;
1005#endif
1006
1007static inline int
1008now_pred_compatible (int cond)
1009{
1010  return (cond & ~1) == (now_pred.cc & ~1);
1011}
1012
1013static inline int
1014conditional_insn (void)
1015{
1016  return inst.cond != COND_ALWAYS;
1017}
1018
1019static int in_pred_block (void);
1020
1021static int handle_pred_state (void);
1022
1023static void force_automatic_it_block_close (void);
1024
1025static void it_fsm_post_encode (void);
1026
1027#define set_pred_insn_type(type)			\
1028  do						\
1029    {						\
1030      inst.pred_insn_type = type;			\
1031      if (handle_pred_state () == FAIL)		\
1032	return;					\
1033    }						\
1034  while (0)
1035
1036#define set_pred_insn_type_nonvoid(type, failret) \
1037  do						\
1038    {                                           \
1039      inst.pred_insn_type = type;			\
1040      if (handle_pred_state () == FAIL)		\
1041	return failret;				\
1042    }						\
1043  while(0)
1044
1045#define set_pred_insn_type_last()				\
1046  do							\
1047    {							\
1048      if (inst.cond == COND_ALWAYS)			\
1049	set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);	\
1050      else						\
1051	set_pred_insn_type (INSIDE_IT_LAST_INSN);		\
1052    }							\
1053  while (0)
1054
1055/* Toggle value[pos].  */
1056#define TOGGLE_BIT(value, pos) (value ^ (1 << pos))
1057
1058/* Pure syntax.	 */
1059
1060/* This array holds the chars that always start a comment.  If the
1061   pre-processor is disabled, these aren't very useful.	 */
1062char arm_comment_chars[] = "@";
1063
1064/* This array holds the chars that only start a comment at the beginning of
1065   a line.  If the line seems to have the form '# 123 filename'
1066   .line and .file directives will appear in the pre-processed output.	*/
1067/* Note that input_file.c hand checks for '#' at the beginning of the
1068   first line of the input file.  This is because the compiler outputs
1069   #NO_APP at the beginning of its output.  */
1070/* Also note that comments like this one will always work.  */
1071const char line_comment_chars[] = "#";
1072
1073char arm_line_separator_chars[] = ";";
1074
1075/* Chars that can be used to separate mant
1076   from exp in floating point numbers.	*/
1077const char EXP_CHARS[] = "eE";
1078
1079/* Chars that mean this number is a floating point constant.  */
1080/* As in 0f12.456  */
1081/* or	 0d1.2345e12  */
1082
1083const char FLT_CHARS[] = "rRsSfFdDxXeEpPHh";
1084
1085/* Prefix characters that indicate the start of an immediate
1086   value.  */
1087#define is_immediate_prefix(C) ((C) == '#' || (C) == '$')
1088
1089/* Separator character handling.  */
1090
1091#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
1092
1093enum fp_16bit_format
1094{
1095  ARM_FP16_FORMAT_IEEE		= 0x1,
1096  ARM_FP16_FORMAT_ALTERNATIVE	= 0x2,
1097  ARM_FP16_FORMAT_DEFAULT	= 0x3
1098};
1099
1100static enum fp_16bit_format fp16_format = ARM_FP16_FORMAT_DEFAULT;
1101
1102
1103static inline int
1104skip_past_char (char ** str, char c)
1105{
1106  /* PR gas/14987: Allow for whitespace before the expected character.  */
1107  skip_whitespace (*str);
1108
1109  if (**str == c)
1110    {
1111      (*str)++;
1112      return SUCCESS;
1113    }
1114  else
1115    return FAIL;
1116}
1117
1118#define skip_past_comma(str) skip_past_char (str, ',')
1119
1120/* Arithmetic expressions (possibly involving symbols).	 */
1121
1122/* Return TRUE if anything in the expression is a bignum.  */
1123
1124static bfd_boolean
1125walk_no_bignums (symbolS * sp)
1126{
1127  if (symbol_get_value_expression (sp)->X_op == O_big)
1128    return TRUE;
1129
1130  if (symbol_get_value_expression (sp)->X_add_symbol)
1131    {
1132      return (walk_no_bignums (symbol_get_value_expression (sp)->X_add_symbol)
1133	      || (symbol_get_value_expression (sp)->X_op_symbol
1134		  && walk_no_bignums (symbol_get_value_expression (sp)->X_op_symbol)));
1135    }
1136
1137  return FALSE;
1138}
1139
1140static bfd_boolean in_my_get_expression = FALSE;
1141
1142/* Third argument to my_get_expression.	 */
1143#define GE_NO_PREFIX 0
1144#define GE_IMM_PREFIX 1
1145#define GE_OPT_PREFIX 2
1146/* This is a bit of a hack. Use an optional prefix, and also allow big (64-bit)
1147   immediates, as can be used in Neon VMVN and VMOV immediate instructions.  */
1148#define GE_OPT_PREFIX_BIG 3
1149
1150static int
1151my_get_expression (expressionS * ep, char ** str, int prefix_mode)
1152{
1153  char * save_in;
1154
1155  /* In unified syntax, all prefixes are optional.  */
1156  if (unified_syntax)
1157    prefix_mode = (prefix_mode == GE_OPT_PREFIX_BIG) ? prefix_mode
1158		  : GE_OPT_PREFIX;
1159
1160  switch (prefix_mode)
1161    {
1162    case GE_NO_PREFIX: break;
1163    case GE_IMM_PREFIX:
1164      if (!is_immediate_prefix (**str))
1165	{
1166	  inst.error = _("immediate expression requires a # prefix");
1167	  return FAIL;
1168	}
1169      (*str)++;
1170      break;
1171    case GE_OPT_PREFIX:
1172    case GE_OPT_PREFIX_BIG:
1173      if (is_immediate_prefix (**str))
1174	(*str)++;
1175      break;
1176    default:
1177      abort ();
1178    }
1179
1180  memset (ep, 0, sizeof (expressionS));
1181
1182  save_in = input_line_pointer;
1183  input_line_pointer = *str;
1184  in_my_get_expression = TRUE;
1185  expression (ep);
1186  in_my_get_expression = FALSE;
1187
1188  if (ep->X_op == O_illegal || ep->X_op == O_absent)
1189    {
1190      /* We found a bad or missing expression in md_operand().  */
1191      *str = input_line_pointer;
1192      input_line_pointer = save_in;
1193      if (inst.error == NULL)
1194	inst.error = (ep->X_op == O_absent
1195		      ? _("missing expression") :_("bad expression"));
1196      return 1;
1197    }
1198
1199  /* Get rid of any bignums now, so that we don't generate an error for which
1200     we can't establish a line number later on.	 Big numbers are never valid
1201     in instructions, which is where this routine is always called.  */
1202  if (prefix_mode != GE_OPT_PREFIX_BIG
1203      && (ep->X_op == O_big
1204	  || (ep->X_add_symbol
1205	      && (walk_no_bignums (ep->X_add_symbol)
1206		  || (ep->X_op_symbol
1207		      && walk_no_bignums (ep->X_op_symbol))))))
1208    {
1209      inst.error = _("invalid constant");
1210      *str = input_line_pointer;
1211      input_line_pointer = save_in;
1212      return 1;
1213    }
1214
1215  *str = input_line_pointer;
1216  input_line_pointer = save_in;
1217  return SUCCESS;
1218}
1219
1220/* Turn a string in input_line_pointer into a floating point constant
1221   of type TYPE, and store the appropriate bytes in *LITP.  The number
1222   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
1223   returned, or NULL on OK.
1224
1225   Note that fp constants aren't represent in the normal way on the ARM.
1226   In big endian mode, things are as expected.	However, in little endian
1227   mode fp constants are big-endian word-wise, and little-endian byte-wise
1228   within the words.  For example, (double) 1.1 in big endian mode is
1229   the byte sequence 3f f1 99 99 99 99 99 9a, and in little endian mode is
1230   the byte sequence 99 99 f1 3f 9a 99 99 99.
1231
1232   ??? The format of 12 byte floats is uncertain according to gcc's arm.h.  */
1233
1234const char *
1235md_atof (int type, char * litP, int * sizeP)
1236{
1237  int prec;
1238  LITTLENUM_TYPE words[MAX_LITTLENUMS];
1239  char *t;
1240  int i;
1241
1242  switch (type)
1243    {
1244    case 'H':
1245    case 'h':
1246      prec = 1;
1247      break;
1248
1249    /* If this is a bfloat16, then parse it slightly differently, as it
1250       does not follow the IEEE specification for floating point numbers
1251       exactly.  */
1252    case 'b':
1253      {
1254	FLONUM_TYPE generic_float;
1255
1256	t = atof_ieee_detail (input_line_pointer, 1, 8, words, &generic_float);
1257
1258	if (t)
1259	  input_line_pointer = t;
1260	else
1261	  return _("invalid floating point number");
1262
1263	switch (generic_float.sign)
1264	  {
1265	  /* Is +Inf.  */
1266	  case 'P':
1267	    words[0] = 0x7f80;
1268	    break;
1269
1270	  /* Is -Inf.  */
1271	  case 'N':
1272	    words[0] = 0xff80;
1273	    break;
1274
1275	  /* Is NaN.  */
1276	  /* bfloat16 has two types of NaN - quiet and signalling.
1277	     Quiet NaN has bit[6] == 1 && faction != 0, whereas
1278	     signalling NaN's have bit[0] == 0 && fraction != 0.
1279	     Chosen this specific encoding as it is the same form
1280	     as used by other IEEE 754 encodings in GAS.  */
1281	  case 0:
1282	    words[0] = 0x7fff;
1283	    break;
1284
1285	  default:
1286	    break;
1287	  }
1288
1289	*sizeP = 2;
1290
1291	md_number_to_chars (litP, (valueT) words[0], sizeof (LITTLENUM_TYPE));
1292
1293	return NULL;
1294      }
1295    case 'f':
1296    case 'F':
1297    case 's':
1298    case 'S':
1299      prec = 2;
1300      break;
1301
1302    case 'd':
1303    case 'D':
1304    case 'r':
1305    case 'R':
1306      prec = 4;
1307      break;
1308
1309    case 'x':
1310    case 'X':
1311      prec = 5;
1312      break;
1313
1314    case 'p':
1315    case 'P':
1316      prec = 5;
1317      break;
1318
1319    default:
1320      *sizeP = 0;
1321      return _("Unrecognized or unsupported floating point constant");
1322    }
1323
1324  t = atof_ieee (input_line_pointer, type, words);
1325  if (t)
1326    input_line_pointer = t;
1327  *sizeP = prec * sizeof (LITTLENUM_TYPE);
1328
1329  if (target_big_endian || prec == 1)
1330    for (i = 0; i < prec; i++)
1331      {
1332	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1333	litP += sizeof (LITTLENUM_TYPE);
1334      }
1335  else if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
1336    for (i = prec - 1; i >= 0; i--)
1337      {
1338	md_number_to_chars (litP, (valueT) words[i], sizeof (LITTLENUM_TYPE));
1339	litP += sizeof (LITTLENUM_TYPE);
1340      }
1341  else
1342    /* For a 4 byte float the order of elements in `words' is 1 0.
1343       For an 8 byte float the order is 1 0 3 2.  */
1344    for (i = 0; i < prec; i += 2)
1345      {
1346	md_number_to_chars (litP, (valueT) words[i + 1],
1347			    sizeof (LITTLENUM_TYPE));
1348	md_number_to_chars (litP + sizeof (LITTLENUM_TYPE),
1349			    (valueT) words[i], sizeof (LITTLENUM_TYPE));
1350	litP += 2 * sizeof (LITTLENUM_TYPE);
1351      }
1352
1353  return NULL;
1354}
1355
1356/* We handle all bad expressions here, so that we can report the faulty
1357   instruction in the error message.  */
1358
1359void
1360md_operand (expressionS * exp)
1361{
1362  if (in_my_get_expression)
1363    exp->X_op = O_illegal;
1364}
1365
1366/* Immediate values.  */
1367
1368#ifdef OBJ_ELF
1369/* Generic immediate-value read function for use in directives.
1370   Accepts anything that 'expression' can fold to a constant.
1371   *val receives the number.  */
1372
1373static int
1374immediate_for_directive (int *val)
1375{
1376  expressionS exp;
1377  exp.X_op = O_illegal;
1378
1379  if (is_immediate_prefix (*input_line_pointer))
1380    {
1381      input_line_pointer++;
1382      expression (&exp);
1383    }
1384
1385  if (exp.X_op != O_constant)
1386    {
1387      as_bad (_("expected #constant"));
1388      ignore_rest_of_line ();
1389      return FAIL;
1390    }
1391  *val = exp.X_add_number;
1392  return SUCCESS;
1393}
1394#endif
1395
1396/* Register parsing.  */
1397
1398/* Generic register parser.  CCP points to what should be the
1399   beginning of a register name.  If it is indeed a valid register
1400   name, advance CCP over it and return the reg_entry structure;
1401   otherwise return NULL.  Does not issue diagnostics.	*/
1402
1403static struct reg_entry *
1404arm_reg_parse_multi (char **ccp)
1405{
1406  char *start = *ccp;
1407  char *p;
1408  struct reg_entry *reg;
1409
1410  skip_whitespace (start);
1411
1412#ifdef REGISTER_PREFIX
1413  if (*start != REGISTER_PREFIX)
1414    return NULL;
1415  start++;
1416#endif
1417#ifdef OPTIONAL_REGISTER_PREFIX
1418  if (*start == OPTIONAL_REGISTER_PREFIX)
1419    start++;
1420#endif
1421
1422  p = start;
1423  if (!ISALPHA (*p) || !is_name_beginner (*p))
1424    return NULL;
1425
1426  do
1427    p++;
1428  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
1429
1430  reg = (struct reg_entry *) str_hash_find_n (arm_reg_hsh, start, p - start);
1431
1432  if (!reg)
1433    return NULL;
1434
1435  *ccp = p;
1436  return reg;
1437}
1438
1439static int
1440arm_reg_alt_syntax (char **ccp, char *start, struct reg_entry *reg,
1441		    enum arm_reg_type type)
1442{
1443  /* Alternative syntaxes are accepted for a few register classes.  */
1444  switch (type)
1445    {
1446    case REG_TYPE_MVF:
1447    case REG_TYPE_MVD:
1448    case REG_TYPE_MVFX:
1449    case REG_TYPE_MVDX:
1450      /* Generic coprocessor register names are allowed for these.  */
1451      if (reg && reg->type == REG_TYPE_CN)
1452	return reg->number;
1453      break;
1454
1455    case REG_TYPE_CP:
1456      /* For backward compatibility, a bare number is valid here.  */
1457      {
1458	unsigned long processor = strtoul (start, ccp, 10);
1459	if (*ccp != start && processor <= 15)
1460	  return processor;
1461      }
1462      /* Fall through.  */
1463
1464    case REG_TYPE_MMXWC:
1465      /* WC includes WCG.  ??? I'm not sure this is true for all
1466	 instructions that take WC registers.  */
1467      if (reg && reg->type == REG_TYPE_MMXWCG)
1468	return reg->number;
1469      break;
1470
1471    default:
1472      break;
1473    }
1474
1475  return FAIL;
1476}
1477
1478/* As arm_reg_parse_multi, but the register must be of type TYPE, and the
1479   return value is the register number or FAIL.  */
1480
1481static int
1482arm_reg_parse (char **ccp, enum arm_reg_type type)
1483{
1484  char *start = *ccp;
1485  struct reg_entry *reg = arm_reg_parse_multi (ccp);
1486  int ret;
1487
1488  /* Do not allow a scalar (reg+index) to parse as a register.  */
1489  if (reg && reg->neon && (reg->neon->defined & NTA_HASINDEX))
1490    return FAIL;
1491
1492  if (reg && reg->type == type)
1493    return reg->number;
1494
1495  if ((ret = arm_reg_alt_syntax (ccp, start, reg, type)) != FAIL)
1496    return ret;
1497
1498  *ccp = start;
1499  return FAIL;
1500}
1501
1502/* Parse a Neon type specifier. *STR should point at the leading '.'
1503   character. Does no verification at this stage that the type fits the opcode
1504   properly. E.g.,
1505
1506     .i32.i32.s16
1507     .s32.f32
1508     .u16
1509
1510   Can all be legally parsed by this function.
1511
1512   Fills in neon_type struct pointer with parsed information, and updates STR
1513   to point after the parsed type specifier. Returns SUCCESS if this was a legal
1514   type, FAIL if not.  */
1515
1516static int
1517parse_neon_type (struct neon_type *type, char **str)
1518{
1519  char *ptr = *str;
1520
1521  if (type)
1522    type->elems = 0;
1523
1524  while (type->elems < NEON_MAX_TYPE_ELS)
1525    {
1526      enum neon_el_type thistype = NT_untyped;
1527      unsigned thissize = -1u;
1528
1529      if (*ptr != '.')
1530	break;
1531
1532      ptr++;
1533
1534      /* Just a size without an explicit type.  */
1535      if (ISDIGIT (*ptr))
1536	goto parsesize;
1537
1538      switch (TOLOWER (*ptr))
1539	{
1540	case 'i': thistype = NT_integer; break;
1541	case 'f': thistype = NT_float; break;
1542	case 'p': thistype = NT_poly; break;
1543	case 's': thistype = NT_signed; break;
1544	case 'u': thistype = NT_unsigned; break;
1545	case 'd':
1546	  thistype = NT_float;
1547	  thissize = 64;
1548	  ptr++;
1549	  goto done;
1550	case 'b':
1551	  thistype = NT_bfloat;
1552	  switch (TOLOWER (*(++ptr)))
1553	    {
1554	    case 'f':
1555	      ptr += 1;
1556	      thissize = strtoul (ptr, &ptr, 10);
1557	      if (thissize != 16)
1558		{
1559		  as_bad (_("bad size %d in type specifier"), thissize);
1560		  return FAIL;
1561		}
1562	      goto done;
1563	    case '0': case '1': case '2': case '3': case '4':
1564	    case '5': case '6': case '7': case '8': case '9':
1565	    case ' ': case '.':
1566	      as_bad (_("unexpected type character `b' -- did you mean `bf'?"));
1567	      return FAIL;
1568	    default:
1569	      break;
1570	    }
1571	  break;
1572	default:
1573	  as_bad (_("unexpected character `%c' in type specifier"), *ptr);
1574	  return FAIL;
1575	}
1576
1577      ptr++;
1578
1579      /* .f is an abbreviation for .f32.  */
1580      if (thistype == NT_float && !ISDIGIT (*ptr))
1581	thissize = 32;
1582      else
1583	{
1584	parsesize:
1585	  thissize = strtoul (ptr, &ptr, 10);
1586
1587	  if (thissize != 8 && thissize != 16 && thissize != 32
1588	      && thissize != 64)
1589	    {
1590	      as_bad (_("bad size %d in type specifier"), thissize);
1591	      return FAIL;
1592	    }
1593	}
1594
1595      done:
1596      if (type)
1597	{
1598	  type->el[type->elems].type = thistype;
1599	  type->el[type->elems].size = thissize;
1600	  type->elems++;
1601	}
1602    }
1603
1604  /* Empty/missing type is not a successful parse.  */
1605  if (type->elems == 0)
1606    return FAIL;
1607
1608  *str = ptr;
1609
1610  return SUCCESS;
1611}
1612
1613/* Errors may be set multiple times during parsing or bit encoding
1614   (particularly in the Neon bits), but usually the earliest error which is set
1615   will be the most meaningful. Avoid overwriting it with later (cascading)
1616   errors by calling this function.  */
1617
1618static void
1619first_error (const char *err)
1620{
1621  if (!inst.error)
1622    inst.error = err;
1623}
1624
1625/* Parse a single type, e.g. ".s32", leading period included.  */
1626static int
1627parse_neon_operand_type (struct neon_type_el *vectype, char **ccp)
1628{
1629  char *str = *ccp;
1630  struct neon_type optype;
1631
1632  if (*str == '.')
1633    {
1634      if (parse_neon_type (&optype, &str) == SUCCESS)
1635	{
1636	  if (optype.elems == 1)
1637	    *vectype = optype.el[0];
1638	  else
1639	    {
1640	      first_error (_("only one type should be specified for operand"));
1641	      return FAIL;
1642	    }
1643	}
1644      else
1645	{
1646	  first_error (_("vector type expected"));
1647	  return FAIL;
1648	}
1649    }
1650  else
1651    return FAIL;
1652
1653  *ccp = str;
1654
1655  return SUCCESS;
1656}
1657
1658/* Special meanings for indices (which have a range of 0-7), which will fit into
1659   a 4-bit integer.  */
1660
1661#define NEON_ALL_LANES		15
1662#define NEON_INTERLEAVE_LANES	14
1663
1664/* Record a use of the given feature.  */
1665static void
1666record_feature_use (const arm_feature_set *feature)
1667{
1668  if (thumb_mode)
1669    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used, *feature);
1670  else
1671    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, *feature);
1672}
1673
1674/* If the given feature available in the selected CPU, mark it as used.
1675   Returns TRUE iff feature is available.  */
1676static bfd_boolean
1677mark_feature_used (const arm_feature_set *feature)
1678{
1679
1680  /* Do not support the use of MVE only instructions when in auto-detection or
1681     -march=all.  */
1682  if (((feature == &mve_ext) || (feature == &mve_fp_ext))
1683      && ARM_CPU_IS_ANY (cpu_variant))
1684    {
1685      first_error (BAD_MVE_AUTO);
1686      return FALSE;
1687    }
1688  /* Ensure the option is valid on the current architecture.  */
1689  if (!ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
1690    return FALSE;
1691
1692  /* Add the appropriate architecture feature for the barrier option used.
1693     */
1694  record_feature_use (feature);
1695
1696  return TRUE;
1697}
1698
1699/* Parse either a register or a scalar, with an optional type. Return the
1700   register number, and optionally fill in the actual type of the register
1701   when multiple alternatives were given (NEON_TYPE_NDQ) in *RTYPE, and
1702   type/index information in *TYPEINFO.  */
1703
1704static int
1705parse_typed_reg_or_scalar (char **ccp, enum arm_reg_type type,
1706			   enum arm_reg_type *rtype,
1707			   struct neon_typed_alias *typeinfo)
1708{
1709  char *str = *ccp;
1710  struct reg_entry *reg = arm_reg_parse_multi (&str);
1711  struct neon_typed_alias atype;
1712  struct neon_type_el parsetype;
1713
1714  atype.defined = 0;
1715  atype.index = -1;
1716  atype.eltype.type = NT_invtype;
1717  atype.eltype.size = -1;
1718
1719  /* Try alternate syntax for some types of register. Note these are mutually
1720     exclusive with the Neon syntax extensions.  */
1721  if (reg == NULL)
1722    {
1723      int altreg = arm_reg_alt_syntax (&str, *ccp, reg, type);
1724      if (altreg != FAIL)
1725	*ccp = str;
1726      if (typeinfo)
1727	*typeinfo = atype;
1728      return altreg;
1729    }
1730
1731  /* Undo polymorphism when a set of register types may be accepted.  */
1732  if ((type == REG_TYPE_NDQ
1733       && (reg->type == REG_TYPE_NQ || reg->type == REG_TYPE_VFD))
1734      || (type == REG_TYPE_VFSD
1735	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1736      || (type == REG_TYPE_NSDQ
1737	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD
1738	      || reg->type == REG_TYPE_NQ))
1739      || (type == REG_TYPE_NSD
1740	  && (reg->type == REG_TYPE_VFS || reg->type == REG_TYPE_VFD))
1741      || (type == REG_TYPE_MMXWC
1742	  && (reg->type == REG_TYPE_MMXWCG)))
1743    type = (enum arm_reg_type) reg->type;
1744
1745  if (type == REG_TYPE_MQ)
1746    {
1747      if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1748	return FAIL;
1749
1750      if (!reg || reg->type != REG_TYPE_NQ)
1751	return FAIL;
1752
1753      if (reg->number > 14 && !mark_feature_used (&fpu_vfp_ext_d32))
1754	{
1755	  first_error (_("expected MVE register [q0..q7]"));
1756	  return FAIL;
1757	}
1758      type = REG_TYPE_NQ;
1759    }
1760  else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
1761	   && (type == REG_TYPE_NQ))
1762    return FAIL;
1763
1764
1765  if (type != reg->type)
1766    return FAIL;
1767
1768  if (reg->neon)
1769    atype = *reg->neon;
1770
1771  if (parse_neon_operand_type (&parsetype, &str) == SUCCESS)
1772    {
1773      if ((atype.defined & NTA_HASTYPE) != 0)
1774	{
1775	  first_error (_("can't redefine type for operand"));
1776	  return FAIL;
1777	}
1778      atype.defined |= NTA_HASTYPE;
1779      atype.eltype = parsetype;
1780    }
1781
1782  if (skip_past_char (&str, '[') == SUCCESS)
1783    {
1784      if (type != REG_TYPE_VFD
1785	  && !(type == REG_TYPE_VFS
1786	       && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_2))
1787	  && !(type == REG_TYPE_NQ
1788	       && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
1789	{
1790	  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
1791	    first_error (_("only D and Q registers may be indexed"));
1792	  else
1793	    first_error (_("only D registers may be indexed"));
1794	  return FAIL;
1795	}
1796
1797      if ((atype.defined & NTA_HASINDEX) != 0)
1798	{
1799	  first_error (_("can't change index for operand"));
1800	  return FAIL;
1801	}
1802
1803      atype.defined |= NTA_HASINDEX;
1804
1805      if (skip_past_char (&str, ']') == SUCCESS)
1806	atype.index = NEON_ALL_LANES;
1807      else
1808	{
1809	  expressionS exp;
1810
1811	  my_get_expression (&exp, &str, GE_NO_PREFIX);
1812
1813	  if (exp.X_op != O_constant)
1814	    {
1815	      first_error (_("constant expression required"));
1816	      return FAIL;
1817	    }
1818
1819	  if (skip_past_char (&str, ']') == FAIL)
1820	    return FAIL;
1821
1822	  atype.index = exp.X_add_number;
1823	}
1824    }
1825
1826  if (typeinfo)
1827    *typeinfo = atype;
1828
1829  if (rtype)
1830    *rtype = type;
1831
1832  *ccp = str;
1833
1834  return reg->number;
1835}
1836
1837/* Like arm_reg_parse, but also allow the following extra features:
1838    - If RTYPE is non-zero, return the (possibly restricted) type of the
1839      register (e.g. Neon double or quad reg when either has been requested).
1840    - If this is a Neon vector type with additional type information, fill
1841      in the struct pointed to by VECTYPE (if non-NULL).
1842   This function will fault on encountering a scalar.  */
1843
1844static int
1845arm_typed_reg_parse (char **ccp, enum arm_reg_type type,
1846		     enum arm_reg_type *rtype, struct neon_type_el *vectype)
1847{
1848  struct neon_typed_alias atype;
1849  char *str = *ccp;
1850  int reg = parse_typed_reg_or_scalar (&str, type, rtype, &atype);
1851
1852  if (reg == FAIL)
1853    return FAIL;
1854
1855  /* Do not allow regname(... to parse as a register.  */
1856  if (*str == '(')
1857    return FAIL;
1858
1859  /* Do not allow a scalar (reg+index) to parse as a register.  */
1860  if ((atype.defined & NTA_HASINDEX) != 0)
1861    {
1862      first_error (_("register operand expected, but got scalar"));
1863      return FAIL;
1864    }
1865
1866  if (vectype)
1867    *vectype = atype.eltype;
1868
1869  *ccp = str;
1870
1871  return reg;
1872}
1873
1874#define NEON_SCALAR_REG(X)	((X) >> 4)
1875#define NEON_SCALAR_INDEX(X)	((X) & 15)
1876
1877/* Parse a Neon scalar. Most of the time when we're parsing a scalar, we don't
1878   have enough information to be able to do a good job bounds-checking. So, we
1879   just do easy checks here, and do further checks later.  */
1880
1881static int
1882parse_scalar (char **ccp, int elsize, struct neon_type_el *type, enum
1883	      arm_reg_type reg_type)
1884{
1885  int reg;
1886  char *str = *ccp;
1887  struct neon_typed_alias atype;
1888  unsigned reg_size;
1889
1890  reg = parse_typed_reg_or_scalar (&str, reg_type, NULL, &atype);
1891
1892  switch (reg_type)
1893    {
1894    case REG_TYPE_VFS:
1895      reg_size = 32;
1896      break;
1897    case REG_TYPE_VFD:
1898      reg_size = 64;
1899      break;
1900    case REG_TYPE_MQ:
1901      reg_size = 128;
1902      break;
1903    default:
1904      gas_assert (0);
1905      return FAIL;
1906    }
1907
1908  if (reg == FAIL || (atype.defined & NTA_HASINDEX) == 0)
1909    return FAIL;
1910
1911  if (reg_type != REG_TYPE_MQ && atype.index == NEON_ALL_LANES)
1912    {
1913      first_error (_("scalar must have an index"));
1914      return FAIL;
1915    }
1916  else if (atype.index >= reg_size / elsize)
1917    {
1918      first_error (_("scalar index out of range"));
1919      return FAIL;
1920    }
1921
1922  if (type)
1923    *type = atype.eltype;
1924
1925  *ccp = str;
1926
1927  return reg * 16 + atype.index;
1928}
1929
1930/* Types of registers in a list.  */
1931
1932enum reg_list_els
1933{
1934  REGLIST_RN,
1935  REGLIST_CLRM,
1936  REGLIST_VFP_S,
1937  REGLIST_VFP_S_VPR,
1938  REGLIST_VFP_D,
1939  REGLIST_VFP_D_VPR,
1940  REGLIST_NEON_D
1941};
1942
1943/* Parse an ARM register list.  Returns the bitmask, or FAIL.  */
1944
1945static long
1946parse_reg_list (char ** strp, enum reg_list_els etype)
1947{
1948  char *str = *strp;
1949  long range = 0;
1950  int another_range;
1951
1952  gas_assert (etype == REGLIST_RN || etype == REGLIST_CLRM);
1953
1954  /* We come back here if we get ranges concatenated by '+' or '|'.  */
1955  do
1956    {
1957      skip_whitespace (str);
1958
1959      another_range = 0;
1960
1961      if (*str == '{')
1962	{
1963	  int in_range = 0;
1964	  int cur_reg = -1;
1965
1966	  str++;
1967	  do
1968	    {
1969	      int reg;
1970	      const char apsr_str[] = "apsr";
1971	      int apsr_str_len = strlen (apsr_str);
1972
1973	      reg = arm_reg_parse (&str, REG_TYPE_RN);
1974	      if (etype == REGLIST_CLRM)
1975		{
1976		  if (reg == REG_SP || reg == REG_PC)
1977		    reg = FAIL;
1978		  else if (reg == FAIL
1979			   && !strncasecmp (str, apsr_str, apsr_str_len)
1980			   && !ISALPHA (*(str + apsr_str_len)))
1981		    {
1982		      reg = 15;
1983		      str += apsr_str_len;
1984		    }
1985
1986		  if (reg == FAIL)
1987		    {
1988		      first_error (_("r0-r12, lr or APSR expected"));
1989		      return FAIL;
1990		    }
1991		}
1992	      else /* etype == REGLIST_RN.  */
1993		{
1994		  if (reg == FAIL)
1995		    {
1996		      first_error (_(reg_expected_msgs[REGLIST_RN]));
1997		      return FAIL;
1998		    }
1999		}
2000
2001	      if (in_range)
2002		{
2003		  int i;
2004
2005		  if (reg <= cur_reg)
2006		    {
2007		      first_error (_("bad range in register list"));
2008		      return FAIL;
2009		    }
2010
2011		  for (i = cur_reg + 1; i < reg; i++)
2012		    {
2013		      if (range & (1 << i))
2014			as_tsktsk
2015			  (_("Warning: duplicated register (r%d) in register list"),
2016			   i);
2017		      else
2018			range |= 1 << i;
2019		    }
2020		  in_range = 0;
2021		}
2022
2023	      if (range & (1 << reg))
2024		as_tsktsk (_("Warning: duplicated register (r%d) in register list"),
2025			   reg);
2026	      else if (reg <= cur_reg)
2027		as_tsktsk (_("Warning: register range not in ascending order"));
2028
2029	      range |= 1 << reg;
2030	      cur_reg = reg;
2031	    }
2032	  while (skip_past_comma (&str) != FAIL
2033		 || (in_range = 1, *str++ == '-'));
2034	  str--;
2035
2036	  if (skip_past_char (&str, '}') == FAIL)
2037	    {
2038	      first_error (_("missing `}'"));
2039	      return FAIL;
2040	    }
2041	}
2042      else if (etype == REGLIST_RN)
2043	{
2044	  expressionS exp;
2045
2046	  if (my_get_expression (&exp, &str, GE_NO_PREFIX))
2047	    return FAIL;
2048
2049	  if (exp.X_op == O_constant)
2050	    {
2051	      if (exp.X_add_number
2052		  != (exp.X_add_number & 0x0000ffff))
2053		{
2054		  inst.error = _("invalid register mask");
2055		  return FAIL;
2056		}
2057
2058	      if ((range & exp.X_add_number) != 0)
2059		{
2060		  int regno = range & exp.X_add_number;
2061
2062		  regno &= -regno;
2063		  regno = (1 << regno) - 1;
2064		  as_tsktsk
2065		    (_("Warning: duplicated register (r%d) in register list"),
2066		     regno);
2067		}
2068
2069	      range |= exp.X_add_number;
2070	    }
2071	  else
2072	    {
2073	      if (inst.relocs[0].type != 0)
2074		{
2075		  inst.error = _("expression too complex");
2076		  return FAIL;
2077		}
2078
2079	      memcpy (&inst.relocs[0].exp, &exp, sizeof (expressionS));
2080	      inst.relocs[0].type = BFD_RELOC_ARM_MULTI;
2081	      inst.relocs[0].pc_rel = 0;
2082	    }
2083	}
2084
2085      if (*str == '|' || *str == '+')
2086	{
2087	  str++;
2088	  another_range = 1;
2089	}
2090    }
2091  while (another_range);
2092
2093  *strp = str;
2094  return range;
2095}
2096
2097/* Parse a VFP register list.  If the string is invalid return FAIL.
2098   Otherwise return the number of registers, and set PBASE to the first
2099   register.  Parses registers of type ETYPE.
2100   If REGLIST_NEON_D is used, several syntax enhancements are enabled:
2101     - Q registers can be used to specify pairs of D registers
2102     - { } can be omitted from around a singleton register list
2103	 FIXME: This is not implemented, as it would require backtracking in
2104	 some cases, e.g.:
2105	   vtbl.8 d3,d4,d5
2106	 This could be done (the meaning isn't really ambiguous), but doesn't
2107	 fit in well with the current parsing framework.
2108     - 32 D registers may be used (also true for VFPv3).
2109   FIXME: Types are ignored in these register lists, which is probably a
2110   bug.  */
2111
2112static int
2113parse_vfp_reg_list (char **ccp, unsigned int *pbase, enum reg_list_els etype,
2114		    bfd_boolean *partial_match)
2115{
2116  char *str = *ccp;
2117  int base_reg;
2118  int new_base;
2119  enum arm_reg_type regtype = (enum arm_reg_type) 0;
2120  int max_regs = 0;
2121  int count = 0;
2122  int warned = 0;
2123  unsigned long mask = 0;
2124  int i;
2125  bfd_boolean vpr_seen = FALSE;
2126  bfd_boolean expect_vpr =
2127    (etype == REGLIST_VFP_S_VPR) || (etype == REGLIST_VFP_D_VPR);
2128
2129  if (skip_past_char (&str, '{') == FAIL)
2130    {
2131      inst.error = _("expecting {");
2132      return FAIL;
2133    }
2134
2135  switch (etype)
2136    {
2137    case REGLIST_VFP_S:
2138    case REGLIST_VFP_S_VPR:
2139      regtype = REG_TYPE_VFS;
2140      max_regs = 32;
2141      break;
2142
2143    case REGLIST_VFP_D:
2144    case REGLIST_VFP_D_VPR:
2145      regtype = REG_TYPE_VFD;
2146      break;
2147
2148    case REGLIST_NEON_D:
2149      regtype = REG_TYPE_NDQ;
2150      break;
2151
2152    default:
2153      gas_assert (0);
2154    }
2155
2156  if (etype != REGLIST_VFP_S && etype != REGLIST_VFP_S_VPR)
2157    {
2158      /* VFPv3 allows 32 D registers, except for the VFPv3-D16 variant.  */
2159      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
2160	{
2161	  max_regs = 32;
2162	  if (thumb_mode)
2163	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
2164				    fpu_vfp_ext_d32);
2165	  else
2166	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
2167				    fpu_vfp_ext_d32);
2168	}
2169      else
2170	max_regs = 16;
2171    }
2172
2173  base_reg = max_regs;
2174  *partial_match = FALSE;
2175
2176  do
2177    {
2178      unsigned int setmask = 1, addregs = 1;
2179      const char vpr_str[] = "vpr";
2180      size_t vpr_str_len = strlen (vpr_str);
2181
2182      new_base = arm_typed_reg_parse (&str, regtype, &regtype, NULL);
2183
2184      if (expect_vpr)
2185	{
2186	  if (new_base == FAIL
2187	      && !strncasecmp (str, vpr_str, vpr_str_len)
2188	      && !ISALPHA (*(str + vpr_str_len))
2189	      && !vpr_seen)
2190	    {
2191	      vpr_seen = TRUE;
2192	      str += vpr_str_len;
2193	      if (count == 0)
2194		base_reg = 0; /* Canonicalize VPR only on d0 with 0 regs.  */
2195	    }
2196	  else if (vpr_seen)
2197	    {
2198	      first_error (_("VPR expected last"));
2199	      return FAIL;
2200	    }
2201	  else if (new_base == FAIL)
2202	    {
2203	      if (regtype == REG_TYPE_VFS)
2204		first_error (_("VFP single precision register or VPR "
2205			       "expected"));
2206	      else /* regtype == REG_TYPE_VFD.  */
2207		first_error (_("VFP/Neon double precision register or VPR "
2208			       "expected"));
2209	      return FAIL;
2210	    }
2211	}
2212      else if (new_base == FAIL)
2213	{
2214	  first_error (_(reg_expected_msgs[regtype]));
2215	  return FAIL;
2216	}
2217
2218      *partial_match = TRUE;
2219      if (vpr_seen)
2220	continue;
2221
2222      if (new_base >= max_regs)
2223	{
2224	  first_error (_("register out of range in list"));
2225	  return FAIL;
2226	}
2227
2228      /* Note: a value of 2 * n is returned for the register Q<n>.  */
2229      if (regtype == REG_TYPE_NQ)
2230	{
2231	  setmask = 3;
2232	  addregs = 2;
2233	}
2234
2235      if (new_base < base_reg)
2236	base_reg = new_base;
2237
2238      if (mask & (setmask << new_base))
2239	{
2240	  first_error (_("invalid register list"));
2241	  return FAIL;
2242	}
2243
2244      if ((mask >> new_base) != 0 && ! warned && !vpr_seen)
2245	{
2246	  as_tsktsk (_("register list not in ascending order"));
2247	  warned = 1;
2248	}
2249
2250      mask |= setmask << new_base;
2251      count += addregs;
2252
2253      if (*str == '-') /* We have the start of a range expression */
2254	{
2255	  int high_range;
2256
2257	  str++;
2258
2259	  if ((high_range = arm_typed_reg_parse (&str, regtype, NULL, NULL))
2260	      == FAIL)
2261	    {
2262	      inst.error = gettext (reg_expected_msgs[regtype]);
2263	      return FAIL;
2264	    }
2265
2266	  if (high_range >= max_regs)
2267	    {
2268	      first_error (_("register out of range in list"));
2269	      return FAIL;
2270	    }
2271
2272	  if (regtype == REG_TYPE_NQ)
2273	    high_range = high_range + 1;
2274
2275	  if (high_range <= new_base)
2276	    {
2277	      inst.error = _("register range not in ascending order");
2278	      return FAIL;
2279	    }
2280
2281	  for (new_base += addregs; new_base <= high_range; new_base += addregs)
2282	    {
2283	      if (mask & (setmask << new_base))
2284		{
2285		  inst.error = _("invalid register list");
2286		  return FAIL;
2287		}
2288
2289	      mask |= setmask << new_base;
2290	      count += addregs;
2291	    }
2292	}
2293    }
2294  while (skip_past_comma (&str) != FAIL);
2295
2296  str++;
2297
2298  /* Sanity check -- should have raised a parse error above.  */
2299  if ((!vpr_seen && count == 0) || count > max_regs)
2300    abort ();
2301
2302  *pbase = base_reg;
2303
2304  if (expect_vpr && !vpr_seen)
2305    {
2306      first_error (_("VPR expected last"));
2307      return FAIL;
2308    }
2309
2310  /* Final test -- the registers must be consecutive.  */
2311  mask >>= base_reg;
2312  for (i = 0; i < count; i++)
2313    {
2314      if ((mask & (1u << i)) == 0)
2315	{
2316	  inst.error = _("non-contiguous register range");
2317	  return FAIL;
2318	}
2319    }
2320
2321  *ccp = str;
2322
2323  return count;
2324}
2325
2326/* True if two alias types are the same.  */
2327
2328static bfd_boolean
2329neon_alias_types_same (struct neon_typed_alias *a, struct neon_typed_alias *b)
2330{
2331  if (!a && !b)
2332    return TRUE;
2333
2334  if (!a || !b)
2335    return FALSE;
2336
2337  if (a->defined != b->defined)
2338    return FALSE;
2339
2340  if ((a->defined & NTA_HASTYPE) != 0
2341      && (a->eltype.type != b->eltype.type
2342	  || a->eltype.size != b->eltype.size))
2343    return FALSE;
2344
2345  if ((a->defined & NTA_HASINDEX) != 0
2346      && (a->index != b->index))
2347    return FALSE;
2348
2349  return TRUE;
2350}
2351
2352/* Parse element/structure lists for Neon VLD<n> and VST<n> instructions.
2353   The base register is put in *PBASE.
2354   The lane (or one of the NEON_*_LANES constants) is placed in bits [3:0] of
2355   the return value.
2356   The register stride (minus one) is put in bit 4 of the return value.
2357   Bits [6:5] encode the list length (minus one).
2358   The type of the list elements is put in *ELTYPE, if non-NULL.  */
2359
2360#define NEON_LANE(X)		((X) & 0xf)
2361#define NEON_REG_STRIDE(X)	((((X) >> 4) & 1) + 1)
2362#define NEON_REGLIST_LENGTH(X)	((((X) >> 5) & 3) + 1)
2363
2364static int
2365parse_neon_el_struct_list (char **str, unsigned *pbase,
2366			   int mve,
2367			   struct neon_type_el *eltype)
2368{
2369  char *ptr = *str;
2370  int base_reg = -1;
2371  int reg_incr = -1;
2372  int count = 0;
2373  int lane = -1;
2374  int leading_brace = 0;
2375  enum arm_reg_type rtype = REG_TYPE_NDQ;
2376  const char *const incr_error = mve ? _("register stride must be 1") :
2377    _("register stride must be 1 or 2");
2378  const char *const type_error = _("mismatched element/structure types in list");
2379  struct neon_typed_alias firsttype;
2380  firsttype.defined = 0;
2381  firsttype.eltype.type = NT_invtype;
2382  firsttype.eltype.size = -1;
2383  firsttype.index = -1;
2384
2385  if (skip_past_char (&ptr, '{') == SUCCESS)
2386    leading_brace = 1;
2387
2388  do
2389    {
2390      struct neon_typed_alias atype;
2391      if (mve)
2392	rtype = REG_TYPE_MQ;
2393      int getreg = parse_typed_reg_or_scalar (&ptr, rtype, &rtype, &atype);
2394
2395      if (getreg == FAIL)
2396	{
2397	  first_error (_(reg_expected_msgs[rtype]));
2398	  return FAIL;
2399	}
2400
2401      if (base_reg == -1)
2402	{
2403	  base_reg = getreg;
2404	  if (rtype == REG_TYPE_NQ)
2405	    {
2406	      reg_incr = 1;
2407	    }
2408	  firsttype = atype;
2409	}
2410      else if (reg_incr == -1)
2411	{
2412	  reg_incr = getreg - base_reg;
2413	  if (reg_incr < 1 || reg_incr > 2)
2414	    {
2415	      first_error (_(incr_error));
2416	      return FAIL;
2417	    }
2418	}
2419      else if (getreg != base_reg + reg_incr * count)
2420	{
2421	  first_error (_(incr_error));
2422	  return FAIL;
2423	}
2424
2425      if (! neon_alias_types_same (&atype, &firsttype))
2426	{
2427	  first_error (_(type_error));
2428	  return FAIL;
2429	}
2430
2431      /* Handle Dn-Dm or Qn-Qm syntax. Can only be used with non-indexed list
2432	 modes.  */
2433      if (ptr[0] == '-')
2434	{
2435	  struct neon_typed_alias htype;
2436	  int hireg, dregs = (rtype == REG_TYPE_NQ) ? 2 : 1;
2437	  if (lane == -1)
2438	    lane = NEON_INTERLEAVE_LANES;
2439	  else if (lane != NEON_INTERLEAVE_LANES)
2440	    {
2441	      first_error (_(type_error));
2442	      return FAIL;
2443	    }
2444	  if (reg_incr == -1)
2445	    reg_incr = 1;
2446	  else if (reg_incr != 1)
2447	    {
2448	      first_error (_("don't use Rn-Rm syntax with non-unit stride"));
2449	      return FAIL;
2450	    }
2451	  ptr++;
2452	  hireg = parse_typed_reg_or_scalar (&ptr, rtype, NULL, &htype);
2453	  if (hireg == FAIL)
2454	    {
2455	      first_error (_(reg_expected_msgs[rtype]));
2456	      return FAIL;
2457	    }
2458	  if (! neon_alias_types_same (&htype, &firsttype))
2459	    {
2460	      first_error (_(type_error));
2461	      return FAIL;
2462	    }
2463	  count += hireg + dregs - getreg;
2464	  continue;
2465	}
2466
2467      /* If we're using Q registers, we can't use [] or [n] syntax.  */
2468      if (rtype == REG_TYPE_NQ)
2469	{
2470	  count += 2;
2471	  continue;
2472	}
2473
2474      if ((atype.defined & NTA_HASINDEX) != 0)
2475	{
2476	  if (lane == -1)
2477	    lane = atype.index;
2478	  else if (lane != atype.index)
2479	    {
2480	      first_error (_(type_error));
2481	      return FAIL;
2482	    }
2483	}
2484      else if (lane == -1)
2485	lane = NEON_INTERLEAVE_LANES;
2486      else if (lane != NEON_INTERLEAVE_LANES)
2487	{
2488	  first_error (_(type_error));
2489	  return FAIL;
2490	}
2491      count++;
2492    }
2493  while ((count != 1 || leading_brace) && skip_past_comma (&ptr) != FAIL);
2494
2495  /* No lane set by [x]. We must be interleaving structures.  */
2496  if (lane == -1)
2497    lane = NEON_INTERLEAVE_LANES;
2498
2499  /* Sanity check.  */
2500  if (lane == -1 || base_reg == -1 || count < 1 || (!mve && count > 4)
2501      || (count > 1 && reg_incr == -1))
2502    {
2503      first_error (_("error parsing element/structure list"));
2504      return FAIL;
2505    }
2506
2507  if ((count > 1 || leading_brace) && skip_past_char (&ptr, '}') == FAIL)
2508    {
2509      first_error (_("expected }"));
2510      return FAIL;
2511    }
2512
2513  if (reg_incr == -1)
2514    reg_incr = 1;
2515
2516  if (eltype)
2517    *eltype = firsttype.eltype;
2518
2519  *pbase = base_reg;
2520  *str = ptr;
2521
2522  return lane | ((reg_incr - 1) << 4) | ((count - 1) << 5);
2523}
2524
2525/* Parse an explicit relocation suffix on an expression.  This is
2526   either nothing, or a word in parentheses.  Note that if !OBJ_ELF,
2527   arm_reloc_hsh contains no entries, so this function can only
2528   succeed if there is no () after the word.  Returns -1 on error,
2529   BFD_RELOC_UNUSED if there wasn't any suffix.	 */
2530
2531static int
2532parse_reloc (char **str)
2533{
2534  struct reloc_entry *r;
2535  char *p, *q;
2536
2537  if (**str != '(')
2538    return BFD_RELOC_UNUSED;
2539
2540  p = *str + 1;
2541  q = p;
2542
2543  while (*q && *q != ')' && *q != ',')
2544    q++;
2545  if (*q != ')')
2546    return -1;
2547
2548  if ((r = (struct reloc_entry *)
2549       str_hash_find_n (arm_reloc_hsh, p, q - p)) == NULL)
2550    return -1;
2551
2552  *str = q + 1;
2553  return r->reloc;
2554}
2555
2556/* Directives: register aliases.  */
2557
2558static struct reg_entry *
2559insert_reg_alias (char *str, unsigned number, int type)
2560{
2561  struct reg_entry *new_reg;
2562  const char *name;
2563
2564  if ((new_reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, str)) != 0)
2565    {
2566      if (new_reg->builtin)
2567	as_warn (_("ignoring attempt to redefine built-in register '%s'"), str);
2568
2569      /* Only warn about a redefinition if it's not defined as the
2570	 same register.	 */
2571      else if (new_reg->number != number || new_reg->type != type)
2572	as_warn (_("ignoring redefinition of register alias '%s'"), str);
2573
2574      return NULL;
2575    }
2576
2577  name = xstrdup (str);
2578  new_reg = XNEW (struct reg_entry);
2579
2580  new_reg->name = name;
2581  new_reg->number = number;
2582  new_reg->type = type;
2583  new_reg->builtin = FALSE;
2584  new_reg->neon = NULL;
2585
2586  str_hash_insert (arm_reg_hsh, name, new_reg, 0);
2587
2588  return new_reg;
2589}
2590
2591static void
2592insert_neon_reg_alias (char *str, int number, int type,
2593		       struct neon_typed_alias *atype)
2594{
2595  struct reg_entry *reg = insert_reg_alias (str, number, type);
2596
2597  if (!reg)
2598    {
2599      first_error (_("attempt to redefine typed alias"));
2600      return;
2601    }
2602
2603  if (atype)
2604    {
2605      reg->neon = XNEW (struct neon_typed_alias);
2606      *reg->neon = *atype;
2607    }
2608}
2609
2610/* Look for the .req directive.	 This is of the form:
2611
2612	new_register_name .req existing_register_name
2613
2614   If we find one, or if it looks sufficiently like one that we want to
2615   handle any error here, return TRUE.  Otherwise return FALSE.  */
2616
2617static bfd_boolean
2618create_register_alias (char * newname, char *p)
2619{
2620  struct reg_entry *old;
2621  char *oldname, *nbuf;
2622  size_t nlen;
2623
2624  /* The input scrubber ensures that whitespace after the mnemonic is
2625     collapsed to single spaces.  */
2626  oldname = p;
2627  if (strncmp (oldname, " .req ", 6) != 0)
2628    return FALSE;
2629
2630  oldname += 6;
2631  if (*oldname == '\0')
2632    return FALSE;
2633
2634  old = (struct reg_entry *) str_hash_find (arm_reg_hsh, oldname);
2635  if (!old)
2636    {
2637      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
2638      return TRUE;
2639    }
2640
2641  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2642     the desired alias name, and p points to its end.  If not, then
2643     the desired alias name is in the global original_case_string.  */
2644#ifdef TC_CASE_SENSITIVE
2645  nlen = p - newname;
2646#else
2647  newname = original_case_string;
2648  nlen = strlen (newname);
2649#endif
2650
2651  nbuf = xmemdup0 (newname, nlen);
2652
2653  /* Create aliases under the new name as stated; an all-lowercase
2654     version of the new name; and an all-uppercase version of the new
2655     name.  */
2656  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
2657    {
2658      for (p = nbuf; *p; p++)
2659	*p = TOUPPER (*p);
2660
2661      if (strncmp (nbuf, newname, nlen))
2662	{
2663	  /* If this attempt to create an additional alias fails, do not bother
2664	     trying to create the all-lower case alias.  We will fail and issue
2665	     a second, duplicate error message.  This situation arises when the
2666	     programmer does something like:
2667	       foo .req r0
2668	       Foo .req r1
2669	     The second .req creates the "Foo" alias but then fails to create
2670	     the artificial FOO alias because it has already been created by the
2671	     first .req.  */
2672	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
2673	    {
2674	      free (nbuf);
2675	      return TRUE;
2676	    }
2677	}
2678
2679      for (p = nbuf; *p; p++)
2680	*p = TOLOWER (*p);
2681
2682      if (strncmp (nbuf, newname, nlen))
2683	insert_reg_alias (nbuf, old->number, old->type);
2684    }
2685
2686  free (nbuf);
2687  return TRUE;
2688}
2689
2690/* Create a Neon typed/indexed register alias using directives, e.g.:
2691     X .dn d5.s32[1]
2692     Y .qn 6.s16
2693     Z .dn d7
2694     T .dn Z[0]
2695   These typed registers can be used instead of the types specified after the
2696   Neon mnemonic, so long as all operands given have types. Types can also be
2697   specified directly, e.g.:
2698     vadd d0.s32, d1.s32, d2.s32  */
2699
2700static bfd_boolean
2701create_neon_reg_alias (char *newname, char *p)
2702{
2703  enum arm_reg_type basetype;
2704  struct reg_entry *basereg;
2705  struct reg_entry mybasereg;
2706  struct neon_type ntype;
2707  struct neon_typed_alias typeinfo;
2708  char *namebuf, *nameend ATTRIBUTE_UNUSED;
2709  int namelen;
2710
2711  typeinfo.defined = 0;
2712  typeinfo.eltype.type = NT_invtype;
2713  typeinfo.eltype.size = -1;
2714  typeinfo.index = -1;
2715
2716  nameend = p;
2717
2718  if (strncmp (p, " .dn ", 5) == 0)
2719    basetype = REG_TYPE_VFD;
2720  else if (strncmp (p, " .qn ", 5) == 0)
2721    basetype = REG_TYPE_NQ;
2722  else
2723    return FALSE;
2724
2725  p += 5;
2726
2727  if (*p == '\0')
2728    return FALSE;
2729
2730  basereg = arm_reg_parse_multi (&p);
2731
2732  if (basereg && basereg->type != basetype)
2733    {
2734      as_bad (_("bad type for register"));
2735      return FALSE;
2736    }
2737
2738  if (basereg == NULL)
2739    {
2740      expressionS exp;
2741      /* Try parsing as an integer.  */
2742      my_get_expression (&exp, &p, GE_NO_PREFIX);
2743      if (exp.X_op != O_constant)
2744	{
2745	  as_bad (_("expression must be constant"));
2746	  return FALSE;
2747	}
2748      basereg = &mybasereg;
2749      basereg->number = (basetype == REG_TYPE_NQ) ? exp.X_add_number * 2
2750						  : exp.X_add_number;
2751      basereg->neon = 0;
2752    }
2753
2754  if (basereg->neon)
2755    typeinfo = *basereg->neon;
2756
2757  if (parse_neon_type (&ntype, &p) == SUCCESS)
2758    {
2759      /* We got a type.  */
2760      if (typeinfo.defined & NTA_HASTYPE)
2761	{
2762	  as_bad (_("can't redefine the type of a register alias"));
2763	  return FALSE;
2764	}
2765
2766      typeinfo.defined |= NTA_HASTYPE;
2767      if (ntype.elems != 1)
2768	{
2769	  as_bad (_("you must specify a single type only"));
2770	  return FALSE;
2771	}
2772      typeinfo.eltype = ntype.el[0];
2773    }
2774
2775  if (skip_past_char (&p, '[') == SUCCESS)
2776    {
2777      expressionS exp;
2778      /* We got a scalar index.  */
2779
2780      if (typeinfo.defined & NTA_HASINDEX)
2781	{
2782	  as_bad (_("can't redefine the index of a scalar alias"));
2783	  return FALSE;
2784	}
2785
2786      my_get_expression (&exp, &p, GE_NO_PREFIX);
2787
2788      if (exp.X_op != O_constant)
2789	{
2790	  as_bad (_("scalar index must be constant"));
2791	  return FALSE;
2792	}
2793
2794      typeinfo.defined |= NTA_HASINDEX;
2795      typeinfo.index = exp.X_add_number;
2796
2797      if (skip_past_char (&p, ']') == FAIL)
2798	{
2799	  as_bad (_("expecting ]"));
2800	  return FALSE;
2801	}
2802    }
2803
2804  /* If TC_CASE_SENSITIVE is defined, then newname already points to
2805     the desired alias name, and p points to its end.  If not, then
2806     the desired alias name is in the global original_case_string.  */
2807#ifdef TC_CASE_SENSITIVE
2808  namelen = nameend - newname;
2809#else
2810  newname = original_case_string;
2811  namelen = strlen (newname);
2812#endif
2813
2814  namebuf = xmemdup0 (newname, namelen);
2815
2816  insert_neon_reg_alias (namebuf, basereg->number, basetype,
2817			 typeinfo.defined != 0 ? &typeinfo : NULL);
2818
2819  /* Insert name in all uppercase.  */
2820  for (p = namebuf; *p; p++)
2821    *p = TOUPPER (*p);
2822
2823  if (strncmp (namebuf, newname, namelen))
2824    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2825			   typeinfo.defined != 0 ? &typeinfo : NULL);
2826
2827  /* Insert name in all lowercase.  */
2828  for (p = namebuf; *p; p++)
2829    *p = TOLOWER (*p);
2830
2831  if (strncmp (namebuf, newname, namelen))
2832    insert_neon_reg_alias (namebuf, basereg->number, basetype,
2833			   typeinfo.defined != 0 ? &typeinfo : NULL);
2834
2835  free (namebuf);
2836  return TRUE;
2837}
2838
2839/* Should never be called, as .req goes between the alias and the
2840   register name, not at the beginning of the line.  */
2841
2842static void
2843s_req (int a ATTRIBUTE_UNUSED)
2844{
2845  as_bad (_("invalid syntax for .req directive"));
2846}
2847
2848static void
2849s_dn (int a ATTRIBUTE_UNUSED)
2850{
2851  as_bad (_("invalid syntax for .dn directive"));
2852}
2853
2854static void
2855s_qn (int a ATTRIBUTE_UNUSED)
2856{
2857  as_bad (_("invalid syntax for .qn directive"));
2858}
2859
2860/* The .unreq directive deletes an alias which was previously defined
2861   by .req.  For example:
2862
2863       my_alias .req r11
2864       .unreq my_alias	  */
2865
2866static void
2867s_unreq (int a ATTRIBUTE_UNUSED)
2868{
2869  char * name;
2870  char saved_char;
2871
2872  name = input_line_pointer;
2873
2874  while (*input_line_pointer != 0
2875	 && *input_line_pointer != ' '
2876	 && *input_line_pointer != '\n')
2877    ++input_line_pointer;
2878
2879  saved_char = *input_line_pointer;
2880  *input_line_pointer = 0;
2881
2882  if (!*name)
2883    as_bad (_("invalid syntax for .unreq directive"));
2884  else
2885    {
2886      struct reg_entry *reg
2887	= (struct reg_entry *) str_hash_find (arm_reg_hsh, name);
2888
2889      if (!reg)
2890	as_bad (_("unknown register alias '%s'"), name);
2891      else if (reg->builtin)
2892	as_warn (_("ignoring attempt to use .unreq on fixed register name: '%s'"),
2893		 name);
2894      else
2895	{
2896	  char * p;
2897	  char * nbuf;
2898
2899	  str_hash_delete (arm_reg_hsh, name);
2900	  free ((char *) reg->name);
2901	  free (reg->neon);
2902	  free (reg);
2903
2904	  /* Also locate the all upper case and all lower case versions.
2905	     Do not complain if we cannot find one or the other as it
2906	     was probably deleted above.  */
2907
2908	  nbuf = strdup (name);
2909	  for (p = nbuf; *p; p++)
2910	    *p = TOUPPER (*p);
2911	  reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2912	  if (reg)
2913	    {
2914	      str_hash_delete (arm_reg_hsh, nbuf);
2915	      free ((char *) reg->name);
2916	      free (reg->neon);
2917	      free (reg);
2918	    }
2919
2920	  for (p = nbuf; *p; p++)
2921	    *p = TOLOWER (*p);
2922	  reg = (struct reg_entry *) str_hash_find (arm_reg_hsh, nbuf);
2923	  if (reg)
2924	    {
2925	      str_hash_delete (arm_reg_hsh, nbuf);
2926	      free ((char *) reg->name);
2927	      free (reg->neon);
2928	      free (reg);
2929	    }
2930
2931	  free (nbuf);
2932	}
2933    }
2934
2935  *input_line_pointer = saved_char;
2936  demand_empty_rest_of_line ();
2937}
2938
2939/* Directives: Instruction set selection.  */
2940
2941#ifdef OBJ_ELF
2942/* This code is to handle mapping symbols as defined in the ARM ELF spec.
2943   (See "Mapping symbols", section 4.5.5, ARM AAELF version 1.0).
2944   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
2945   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
2946
2947/* Create a new mapping symbol for the transition to STATE.  */
2948
2949static void
2950make_mapping_symbol (enum mstate state, valueT value, fragS *frag)
2951{
2952  symbolS * symbolP;
2953  const char * symname;
2954  int type;
2955
2956  switch (state)
2957    {
2958    case MAP_DATA:
2959      symname = "$d";
2960      type = BSF_NO_FLAGS;
2961      break;
2962    case MAP_ARM:
2963      symname = "$a";
2964      type = BSF_NO_FLAGS;
2965      break;
2966    case MAP_THUMB:
2967      symname = "$t";
2968      type = BSF_NO_FLAGS;
2969      break;
2970    default:
2971      abort ();
2972    }
2973
2974  symbolP = symbol_new (symname, now_seg, frag, value);
2975  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
2976
2977  switch (state)
2978    {
2979    case MAP_ARM:
2980      THUMB_SET_FUNC (symbolP, 0);
2981      ARM_SET_THUMB (symbolP, 0);
2982      ARM_SET_INTERWORK (symbolP, support_interwork);
2983      break;
2984
2985    case MAP_THUMB:
2986      THUMB_SET_FUNC (symbolP, 1);
2987      ARM_SET_THUMB (symbolP, 1);
2988      ARM_SET_INTERWORK (symbolP, support_interwork);
2989      break;
2990
2991    case MAP_DATA:
2992    default:
2993      break;
2994    }
2995
2996  /* Save the mapping symbols for future reference.  Also check that
2997     we do not place two mapping symbols at the same offset within a
2998     frag.  We'll handle overlap between frags in
2999     check_mapping_symbols.
3000
3001     If .fill or other data filling directive generates zero sized data,
3002     the mapping symbol for the following code will have the same value
3003     as the one generated for the data filling directive.  In this case,
3004     we replace the old symbol with the new one at the same address.  */
3005  if (value == 0)
3006    {
3007      if (frag->tc_frag_data.first_map != NULL)
3008	{
3009	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
3010	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP, &symbol_lastP);
3011	}
3012      frag->tc_frag_data.first_map = symbolP;
3013    }
3014  if (frag->tc_frag_data.last_map != NULL)
3015    {
3016      know (S_GET_VALUE (frag->tc_frag_data.last_map) <= S_GET_VALUE (symbolP));
3017      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
3018	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP, &symbol_lastP);
3019    }
3020  frag->tc_frag_data.last_map = symbolP;
3021}
3022
3023/* We must sometimes convert a region marked as code to data during
3024   code alignment, if an odd number of bytes have to be padded.  The
3025   code mapping symbol is pushed to an aligned address.  */
3026
3027static void
3028insert_data_mapping_symbol (enum mstate state,
3029			    valueT value, fragS *frag, offsetT bytes)
3030{
3031  /* If there was already a mapping symbol, remove it.  */
3032  if (frag->tc_frag_data.last_map != NULL
3033      && S_GET_VALUE (frag->tc_frag_data.last_map) == frag->fr_address + value)
3034    {
3035      symbolS *symp = frag->tc_frag_data.last_map;
3036
3037      if (value == 0)
3038	{
3039	  know (frag->tc_frag_data.first_map == symp);
3040	  frag->tc_frag_data.first_map = NULL;
3041	}
3042      frag->tc_frag_data.last_map = NULL;
3043      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
3044    }
3045
3046  make_mapping_symbol (MAP_DATA, value, frag);
3047  make_mapping_symbol (state, value + bytes, frag);
3048}
3049
3050static void mapping_state_2 (enum mstate state, int max_chars);
3051
3052/* Set the mapping state to STATE.  Only call this when about to
3053   emit some STATE bytes to the file.  */
3054
3055#define TRANSITION(from, to) (mapstate == (from) && state == (to))
3056void
3057mapping_state (enum mstate state)
3058{
3059  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3060
3061  if (mapstate == state)
3062    /* The mapping symbol has already been emitted.
3063       There is nothing else to do.  */
3064    return;
3065
3066  if (state == MAP_ARM || state == MAP_THUMB)
3067    /*  PR gas/12931
3068	All ARM instructions require 4-byte alignment.
3069	(Almost) all Thumb instructions require 2-byte alignment.
3070
3071	When emitting instructions into any section, mark the section
3072	appropriately.
3073
3074	Some Thumb instructions are alignment-sensitive modulo 4 bytes,
3075	but themselves require 2-byte alignment; this applies to some
3076	PC- relative forms.  However, these cases will involve implicit
3077	literal pool generation or an explicit .align >=2, both of
3078	which will cause the section to me marked with sufficient
3079	alignment.  Thus, we don't handle those cases here.  */
3080    record_alignment (now_seg, state == MAP_ARM ? 2 : 1);
3081
3082  if (TRANSITION (MAP_UNDEFINED, MAP_DATA))
3083    /* This case will be evaluated later.  */
3084    return;
3085
3086  mapping_state_2 (state, 0);
3087}
3088
3089/* Same as mapping_state, but MAX_CHARS bytes have already been
3090   allocated.  Put the mapping symbol that far back.  */
3091
3092static void
3093mapping_state_2 (enum mstate state, int max_chars)
3094{
3095  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
3096
3097  if (!SEG_NORMAL (now_seg))
3098    return;
3099
3100  if (mapstate == state)
3101    /* The mapping symbol has already been emitted.
3102       There is nothing else to do.  */
3103    return;
3104
3105  if (TRANSITION (MAP_UNDEFINED, MAP_ARM)
3106	  || TRANSITION (MAP_UNDEFINED, MAP_THUMB))
3107    {
3108      struct frag * const frag_first = seg_info (now_seg)->frchainP->frch_root;
3109      const int add_symbol = (frag_now != frag_first) || (frag_now_fix () > 0);
3110
3111      if (add_symbol)
3112	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
3113    }
3114
3115  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
3116  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
3117}
3118#undef TRANSITION
3119#else
3120#define mapping_state(x) ((void)0)
3121#define mapping_state_2(x, y) ((void)0)
3122#endif
3123
3124/* Find the real, Thumb encoded start of a Thumb function.  */
3125
3126#ifdef OBJ_COFF
3127static symbolS *
3128find_real_start (symbolS * symbolP)
3129{
3130  char *       real_start;
3131  const char * name = S_GET_NAME (symbolP);
3132  symbolS *    new_target;
3133
3134  /* This definition must agree with the one in gcc/config/arm/thumb.c.	 */
3135#define STUB_NAME ".real_start_of"
3136
3137  if (name == NULL)
3138    abort ();
3139
3140  /* The compiler may generate BL instructions to local labels because
3141     it needs to perform a branch to a far away location. These labels
3142     do not have a corresponding ".real_start_of" label.  We check
3143     both for S_IS_LOCAL and for a leading dot, to give a way to bypass
3144     the ".real_start_of" convention for nonlocal branches.  */
3145  if (S_IS_LOCAL (symbolP) || name[0] == '.')
3146    return symbolP;
3147
3148  real_start = concat (STUB_NAME, name, NULL);
3149  new_target = symbol_find (real_start);
3150  free (real_start);
3151
3152  if (new_target == NULL)
3153    {
3154      as_warn (_("Failed to find real start of function: %s\n"), name);
3155      new_target = symbolP;
3156    }
3157
3158  return new_target;
3159}
3160#endif
3161
3162static void
3163opcode_select (int width)
3164{
3165  switch (width)
3166    {
3167    case 16:
3168      if (! thumb_mode)
3169	{
3170	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
3171	    as_bad (_("selected processor does not support THUMB opcodes"));
3172
3173	  thumb_mode = 1;
3174	  /* No need to force the alignment, since we will have been
3175	     coming from ARM mode, which is word-aligned.  */
3176	  record_alignment (now_seg, 1);
3177	}
3178      break;
3179
3180    case 32:
3181      if (thumb_mode)
3182	{
3183	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
3184	    as_bad (_("selected processor does not support ARM opcodes"));
3185
3186	  thumb_mode = 0;
3187
3188	  if (!need_pass_2)
3189	    frag_align (2, 0, 0);
3190
3191	  record_alignment (now_seg, 1);
3192	}
3193      break;
3194
3195    default:
3196      as_bad (_("invalid instruction size selected (%d)"), width);
3197    }
3198}
3199
3200static void
3201s_arm (int ignore ATTRIBUTE_UNUSED)
3202{
3203  opcode_select (32);
3204  demand_empty_rest_of_line ();
3205}
3206
3207static void
3208s_thumb (int ignore ATTRIBUTE_UNUSED)
3209{
3210  opcode_select (16);
3211  demand_empty_rest_of_line ();
3212}
3213
3214static void
3215s_code (int unused ATTRIBUTE_UNUSED)
3216{
3217  int temp;
3218
3219  temp = get_absolute_expression ();
3220  switch (temp)
3221    {
3222    case 16:
3223    case 32:
3224      opcode_select (temp);
3225      break;
3226
3227    default:
3228      as_bad (_("invalid operand to .code directive (%d) (expecting 16 or 32)"), temp);
3229    }
3230}
3231
3232static void
3233s_force_thumb (int ignore ATTRIBUTE_UNUSED)
3234{
3235  /* If we are not already in thumb mode go into it, EVEN if
3236     the target processor does not support thumb instructions.
3237     This is used by gcc/config/arm/lib1funcs.asm for example
3238     to compile interworking support functions even if the
3239     target processor should not support interworking.	*/
3240  if (! thumb_mode)
3241    {
3242      thumb_mode = 2;
3243      record_alignment (now_seg, 1);
3244    }
3245
3246  demand_empty_rest_of_line ();
3247}
3248
3249static void
3250s_thumb_func (int ignore ATTRIBUTE_UNUSED)
3251{
3252  s_thumb (0);
3253
3254  /* The following label is the name/address of the start of a Thumb function.
3255     We need to know this for the interworking support.	 */
3256  label_is_thumb_function_name = TRUE;
3257}
3258
3259/* Perform a .set directive, but also mark the alias as
3260   being a thumb function.  */
3261
3262static void
3263s_thumb_set (int equiv)
3264{
3265  /* XXX the following is a duplicate of the code for s_set() in read.c
3266     We cannot just call that code as we need to get at the symbol that
3267     is created.  */
3268  char *    name;
3269  char	    delim;
3270  char *    end_name;
3271  symbolS * symbolP;
3272
3273  /* Especial apologies for the random logic:
3274     This just grew, and could be parsed much more simply!
3275     Dean - in haste.  */
3276  delim	    = get_symbol_name (& name);
3277  end_name  = input_line_pointer;
3278  (void) restore_line_pointer (delim);
3279
3280  if (*input_line_pointer != ',')
3281    {
3282      *end_name = 0;
3283      as_bad (_("expected comma after name \"%s\""), name);
3284      *end_name = delim;
3285      ignore_rest_of_line ();
3286      return;
3287    }
3288
3289  input_line_pointer++;
3290  *end_name = 0;
3291
3292  if (name[0] == '.' && name[1] == '\0')
3293    {
3294      /* XXX - this should not happen to .thumb_set.  */
3295      abort ();
3296    }
3297
3298  if ((symbolP = symbol_find (name)) == NULL
3299      && (symbolP = md_undefined_symbol (name)) == NULL)
3300    {
3301#ifndef NO_LISTING
3302      /* When doing symbol listings, play games with dummy fragments living
3303	 outside the normal fragment chain to record the file and line info
3304	 for this symbol.  */
3305      if (listing & LISTING_SYMBOLS)
3306	{
3307	  extern struct list_info_struct * listing_tail;
3308	  fragS * dummy_frag = (fragS * ) xmalloc (sizeof (fragS));
3309
3310	  memset (dummy_frag, 0, sizeof (fragS));
3311	  dummy_frag->fr_type = rs_fill;
3312	  dummy_frag->line = listing_tail;
3313	  symbolP = symbol_new (name, undefined_section, dummy_frag, 0);
3314	  dummy_frag->fr_symbol = symbolP;
3315	}
3316      else
3317#endif
3318	symbolP = symbol_new (name, undefined_section, &zero_address_frag, 0);
3319
3320#ifdef OBJ_COFF
3321      /* "set" symbols are local unless otherwise specified.  */
3322      SF_SET_LOCAL (symbolP);
3323#endif /* OBJ_COFF  */
3324    }				/* Make a new symbol.  */
3325
3326  symbol_table_insert (symbolP);
3327
3328  * end_name = delim;
3329
3330  if (equiv
3331      && S_IS_DEFINED (symbolP)
3332      && S_GET_SEGMENT (symbolP) != reg_section)
3333    as_bad (_("symbol `%s' already defined"), S_GET_NAME (symbolP));
3334
3335  pseudo_set (symbolP);
3336
3337  demand_empty_rest_of_line ();
3338
3339  /* XXX Now we come to the Thumb specific bit of code.	 */
3340
3341  THUMB_SET_FUNC (symbolP, 1);
3342  ARM_SET_THUMB (symbolP, 1);
3343#if defined OBJ_ELF || defined OBJ_COFF
3344  ARM_SET_INTERWORK (symbolP, support_interwork);
3345#endif
3346}
3347
3348/* Directives: Mode selection.  */
3349
3350/* .syntax [unified|divided] - choose the new unified syntax
3351   (same for Arm and Thumb encoding, modulo slight differences in what
3352   can be represented) or the old divergent syntax for each mode.  */
3353static void
3354s_syntax (int unused ATTRIBUTE_UNUSED)
3355{
3356  char *name, delim;
3357
3358  delim = get_symbol_name (& name);
3359
3360  if (!strcasecmp (name, "unified"))
3361    unified_syntax = TRUE;
3362  else if (!strcasecmp (name, "divided"))
3363    unified_syntax = FALSE;
3364  else
3365    {
3366      as_bad (_("unrecognized syntax mode \"%s\""), name);
3367      return;
3368    }
3369  (void) restore_line_pointer (delim);
3370  demand_empty_rest_of_line ();
3371}
3372
3373/* Directives: sectioning and alignment.  */
3374
3375static void
3376s_bss (int ignore ATTRIBUTE_UNUSED)
3377{
3378  /* We don't support putting frags in the BSS segment, we fake it by
3379     marking in_bss, then looking at s_skip for clues.	*/
3380  subseg_set (bss_section, 0);
3381  demand_empty_rest_of_line ();
3382
3383#ifdef md_elf_section_change_hook
3384  md_elf_section_change_hook ();
3385#endif
3386}
3387
3388static void
3389s_even (int ignore ATTRIBUTE_UNUSED)
3390{
3391  /* Never make frag if expect extra pass.  */
3392  if (!need_pass_2)
3393    frag_align (1, 0, 0);
3394
3395  record_alignment (now_seg, 1);
3396
3397  demand_empty_rest_of_line ();
3398}
3399
3400/* Directives: CodeComposer Studio.  */
3401
3402/*  .ref  (for CodeComposer Studio syntax only).  */
3403static void
3404s_ccs_ref (int unused ATTRIBUTE_UNUSED)
3405{
3406  if (codecomposer_syntax)
3407    ignore_rest_of_line ();
3408  else
3409    as_bad (_(".ref pseudo-op only available with -mccs flag."));
3410}
3411
3412/*  If name is not NULL, then it is used for marking the beginning of a
3413    function, whereas if it is NULL then it means the function end.  */
3414static void
3415asmfunc_debug (const char * name)
3416{
3417  static const char * last_name = NULL;
3418
3419  if (name != NULL)
3420    {
3421      gas_assert (last_name == NULL);
3422      last_name = name;
3423
3424      if (debug_type == DEBUG_STABS)
3425         stabs_generate_asm_func (name, name);
3426    }
3427  else
3428    {
3429      gas_assert (last_name != NULL);
3430
3431      if (debug_type == DEBUG_STABS)
3432        stabs_generate_asm_endfunc (last_name, last_name);
3433
3434      last_name = NULL;
3435    }
3436}
3437
3438static void
3439s_ccs_asmfunc (int unused ATTRIBUTE_UNUSED)
3440{
3441  if (codecomposer_syntax)
3442    {
3443      switch (asmfunc_state)
3444	{
3445	case OUTSIDE_ASMFUNC:
3446	  asmfunc_state = WAITING_ASMFUNC_NAME;
3447	  break;
3448
3449	case WAITING_ASMFUNC_NAME:
3450	  as_bad (_(".asmfunc repeated."));
3451	  break;
3452
3453	case WAITING_ENDASMFUNC:
3454	  as_bad (_(".asmfunc without function."));
3455	  break;
3456	}
3457      demand_empty_rest_of_line ();
3458    }
3459  else
3460    as_bad (_(".asmfunc pseudo-op only available with -mccs flag."));
3461}
3462
3463static void
3464s_ccs_endasmfunc (int unused ATTRIBUTE_UNUSED)
3465{
3466  if (codecomposer_syntax)
3467    {
3468      switch (asmfunc_state)
3469	{
3470	case OUTSIDE_ASMFUNC:
3471	  as_bad (_(".endasmfunc without a .asmfunc."));
3472	  break;
3473
3474	case WAITING_ASMFUNC_NAME:
3475	  as_bad (_(".endasmfunc without function."));
3476	  break;
3477
3478	case WAITING_ENDASMFUNC:
3479	  asmfunc_state = OUTSIDE_ASMFUNC;
3480	  asmfunc_debug (NULL);
3481	  break;
3482	}
3483      demand_empty_rest_of_line ();
3484    }
3485  else
3486    as_bad (_(".endasmfunc pseudo-op only available with -mccs flag."));
3487}
3488
3489static void
3490s_ccs_def (int name)
3491{
3492  if (codecomposer_syntax)
3493    s_globl (name);
3494  else
3495    as_bad (_(".def pseudo-op only available with -mccs flag."));
3496}
3497
3498/* Directives: Literal pools.  */
3499
3500static literal_pool *
3501find_literal_pool (void)
3502{
3503  literal_pool * pool;
3504
3505  for (pool = list_of_pools; pool != NULL; pool = pool->next)
3506    {
3507      if (pool->section == now_seg
3508	  && pool->sub_section == now_subseg)
3509	break;
3510    }
3511
3512  return pool;
3513}
3514
3515static literal_pool *
3516find_or_make_literal_pool (void)
3517{
3518  /* Next literal pool ID number.  */
3519  static unsigned int latest_pool_num = 1;
3520  literal_pool *      pool;
3521
3522  pool = find_literal_pool ();
3523
3524  if (pool == NULL)
3525    {
3526      /* Create a new pool.  */
3527      pool = XNEW (literal_pool);
3528      if (! pool)
3529	return NULL;
3530
3531      pool->next_free_entry = 0;
3532      pool->section	    = now_seg;
3533      pool->sub_section	    = now_subseg;
3534      pool->next	    = list_of_pools;
3535      pool->symbol	    = NULL;
3536      pool->alignment	    = 2;
3537
3538      /* Add it to the list.  */
3539      list_of_pools = pool;
3540    }
3541
3542  /* New pools, and emptied pools, will have a NULL symbol.  */
3543  if (pool->symbol == NULL)
3544    {
3545      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
3546				    &zero_address_frag, 0);
3547      pool->id = latest_pool_num ++;
3548    }
3549
3550  /* Done.  */
3551  return pool;
3552}
3553
3554/* Add the literal in the global 'inst'
3555   structure to the relevant literal pool.  */
3556
3557static int
3558add_to_lit_pool (unsigned int nbytes)
3559{
3560#define PADDING_SLOT 0x1
3561#define LIT_ENTRY_SIZE_MASK 0xFF
3562  literal_pool * pool;
3563  unsigned int entry, pool_size = 0;
3564  bfd_boolean padding_slot_p = FALSE;
3565  unsigned imm1 = 0;
3566  unsigned imm2 = 0;
3567
3568  if (nbytes == 8)
3569    {
3570      imm1 = inst.operands[1].imm;
3571      imm2 = (inst.operands[1].regisimm ? inst.operands[1].reg
3572	       : inst.relocs[0].exp.X_unsigned ? 0
3573	       : ((bfd_int64_t) inst.operands[1].imm) >> 32);
3574      if (target_big_endian)
3575	{
3576	  imm1 = imm2;
3577	  imm2 = inst.operands[1].imm;
3578	}
3579    }
3580
3581  pool = find_or_make_literal_pool ();
3582
3583  /* Check if this literal value is already in the pool.  */
3584  for (entry = 0; entry < pool->next_free_entry; entry ++)
3585    {
3586      if (nbytes == 4)
3587	{
3588	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3589	      && (inst.relocs[0].exp.X_op == O_constant)
3590	      && (pool->literals[entry].X_add_number
3591		  == inst.relocs[0].exp.X_add_number)
3592	      && (pool->literals[entry].X_md == nbytes)
3593	      && (pool->literals[entry].X_unsigned
3594		  == inst.relocs[0].exp.X_unsigned))
3595	    break;
3596
3597	  if ((pool->literals[entry].X_op == inst.relocs[0].exp.X_op)
3598	      && (inst.relocs[0].exp.X_op == O_symbol)
3599	      && (pool->literals[entry].X_add_number
3600		  == inst.relocs[0].exp.X_add_number)
3601	      && (pool->literals[entry].X_add_symbol
3602		  == inst.relocs[0].exp.X_add_symbol)
3603	      && (pool->literals[entry].X_op_symbol
3604		  == inst.relocs[0].exp.X_op_symbol)
3605	      && (pool->literals[entry].X_md == nbytes))
3606	    break;
3607	}
3608      else if ((nbytes == 8)
3609	       && !(pool_size & 0x7)
3610	       && ((entry + 1) != pool->next_free_entry)
3611	       && (pool->literals[entry].X_op == O_constant)
3612	       && (pool->literals[entry].X_add_number == (offsetT) imm1)
3613	       && (pool->literals[entry].X_unsigned
3614		   == inst.relocs[0].exp.X_unsigned)
3615	       && (pool->literals[entry + 1].X_op == O_constant)
3616	       && (pool->literals[entry + 1].X_add_number == (offsetT) imm2)
3617	       && (pool->literals[entry + 1].X_unsigned
3618		   == inst.relocs[0].exp.X_unsigned))
3619	break;
3620
3621      padding_slot_p = ((pool->literals[entry].X_md >> 8) == PADDING_SLOT);
3622      if (padding_slot_p && (nbytes == 4))
3623	break;
3624
3625      pool_size += 4;
3626    }
3627
3628  /* Do we need to create a new entry?	*/
3629  if (entry == pool->next_free_entry)
3630    {
3631      if (entry >= MAX_LITERAL_POOL_SIZE)
3632	{
3633	  inst.error = _("literal pool overflow");
3634	  return FAIL;
3635	}
3636
3637      if (nbytes == 8)
3638	{
3639	  /* For 8-byte entries, we align to an 8-byte boundary,
3640	     and split it into two 4-byte entries, because on 32-bit
3641	     host, 8-byte constants are treated as big num, thus
3642	     saved in "generic_bignum" which will be overwritten
3643	     by later assignments.
3644
3645	     We also need to make sure there is enough space for
3646	     the split.
3647
3648	     We also check to make sure the literal operand is a
3649	     constant number.  */
3650	  if (!(inst.relocs[0].exp.X_op == O_constant
3651		|| inst.relocs[0].exp.X_op == O_big))
3652	    {
3653	      inst.error = _("invalid type for literal pool");
3654	      return FAIL;
3655	    }
3656	  else if (pool_size & 0x7)
3657	    {
3658	      if ((entry + 2) >= MAX_LITERAL_POOL_SIZE)
3659		{
3660		  inst.error = _("literal pool overflow");
3661		  return FAIL;
3662		}
3663
3664	      pool->literals[entry] = inst.relocs[0].exp;
3665	      pool->literals[entry].X_op = O_constant;
3666	      pool->literals[entry].X_add_number = 0;
3667	      pool->literals[entry++].X_md = (PADDING_SLOT << 8) | 4;
3668	      pool->next_free_entry += 1;
3669	      pool_size += 4;
3670	    }
3671	  else if ((entry + 1) >= MAX_LITERAL_POOL_SIZE)
3672	    {
3673	      inst.error = _("literal pool overflow");
3674	      return FAIL;
3675	    }
3676
3677	  pool->literals[entry] = inst.relocs[0].exp;
3678	  pool->literals[entry].X_op = O_constant;
3679	  pool->literals[entry].X_add_number = imm1;
3680	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3681	  pool->literals[entry++].X_md = 4;
3682	  pool->literals[entry] = inst.relocs[0].exp;
3683	  pool->literals[entry].X_op = O_constant;
3684	  pool->literals[entry].X_add_number = imm2;
3685	  pool->literals[entry].X_unsigned = inst.relocs[0].exp.X_unsigned;
3686	  pool->literals[entry].X_md = 4;
3687	  pool->alignment = 3;
3688	  pool->next_free_entry += 1;
3689	}
3690      else
3691	{
3692	  pool->literals[entry] = inst.relocs[0].exp;
3693	  pool->literals[entry].X_md = 4;
3694	}
3695
3696#ifdef OBJ_ELF
3697      /* PR ld/12974: Record the location of the first source line to reference
3698	 this entry in the literal pool.  If it turns out during linking that the
3699	 symbol does not exist we will be able to give an accurate line number for
3700	 the (first use of the) missing reference.  */
3701      if (debug_type == DEBUG_DWARF2)
3702	dwarf2_where (pool->locs + entry);
3703#endif
3704      pool->next_free_entry += 1;
3705    }
3706  else if (padding_slot_p)
3707    {
3708      pool->literals[entry] = inst.relocs[0].exp;
3709      pool->literals[entry].X_md = nbytes;
3710    }
3711
3712  inst.relocs[0].exp.X_op	      = O_symbol;
3713  inst.relocs[0].exp.X_add_number = pool_size;
3714  inst.relocs[0].exp.X_add_symbol = pool->symbol;
3715
3716  return SUCCESS;
3717}
3718
3719bfd_boolean
3720tc_start_label_without_colon (void)
3721{
3722  bfd_boolean ret = TRUE;
3723
3724  if (codecomposer_syntax && asmfunc_state == WAITING_ASMFUNC_NAME)
3725    {
3726      const char *label = input_line_pointer;
3727
3728      while (!is_end_of_line[(int) label[-1]])
3729	--label;
3730
3731      if (*label == '.')
3732	{
3733	  as_bad (_("Invalid label '%s'"), label);
3734	  ret = FALSE;
3735	}
3736
3737      asmfunc_debug (label);
3738
3739      asmfunc_state = WAITING_ENDASMFUNC;
3740    }
3741
3742  return ret;
3743}
3744
3745/* Can't use symbol_new here, so have to create a symbol and then at
3746   a later date assign it a value. That's what these functions do.  */
3747
3748static void
3749symbol_locate (symbolS *    symbolP,
3750	       const char * name,	/* It is copied, the caller can modify.	 */
3751	       segT	    segment,	/* Segment identifier (SEG_<something>).  */
3752	       valueT	    valu,	/* Symbol value.  */
3753	       fragS *	    frag)	/* Associated fragment.	 */
3754{
3755  size_t name_length;
3756  char * preserved_copy_of_name;
3757
3758  name_length = strlen (name) + 1;   /* +1 for \0.  */
3759  obstack_grow (&notes, name, name_length);
3760  preserved_copy_of_name = (char *) obstack_finish (&notes);
3761
3762#ifdef tc_canonicalize_symbol_name
3763  preserved_copy_of_name =
3764    tc_canonicalize_symbol_name (preserved_copy_of_name);
3765#endif
3766
3767  S_SET_NAME (symbolP, preserved_copy_of_name);
3768
3769  S_SET_SEGMENT (symbolP, segment);
3770  S_SET_VALUE (symbolP, valu);
3771  symbol_clear_list_pointers (symbolP);
3772
3773  symbol_set_frag (symbolP, frag);
3774
3775  /* Link to end of symbol chain.  */
3776  {
3777    extern int symbol_table_frozen;
3778
3779    if (symbol_table_frozen)
3780      abort ();
3781  }
3782
3783  symbol_append (symbolP, symbol_lastP, & symbol_rootP, & symbol_lastP);
3784
3785  obj_symbol_new_hook (symbolP);
3786
3787#ifdef tc_symbol_new_hook
3788  tc_symbol_new_hook (symbolP);
3789#endif
3790
3791#ifdef DEBUG_SYMS
3792  verify_symbol_chain (symbol_rootP, symbol_lastP);
3793#endif /* DEBUG_SYMS  */
3794}
3795
3796static void
3797s_ltorg (int ignored ATTRIBUTE_UNUSED)
3798{
3799  unsigned int entry;
3800  literal_pool * pool;
3801  char sym_name[20];
3802
3803  pool = find_literal_pool ();
3804  if (pool == NULL
3805      || pool->symbol == NULL
3806      || pool->next_free_entry == 0)
3807    return;
3808
3809  /* Align pool as you have word accesses.
3810     Only make a frag if we have to.  */
3811  if (!need_pass_2)
3812    frag_align (pool->alignment, 0, 0);
3813
3814  record_alignment (now_seg, 2);
3815
3816#ifdef OBJ_ELF
3817  seg_info (now_seg)->tc_segment_info_data.mapstate = MAP_DATA;
3818  make_mapping_symbol (MAP_DATA, (valueT) frag_now_fix (), frag_now);
3819#endif
3820  sprintf (sym_name, "$$lit_\002%x", pool->id);
3821
3822  symbol_locate (pool->symbol, sym_name, now_seg,
3823		 (valueT) frag_now_fix (), frag_now);
3824  symbol_table_insert (pool->symbol);
3825
3826  ARM_SET_THUMB (pool->symbol, thumb_mode);
3827
3828#if defined OBJ_COFF || defined OBJ_ELF
3829  ARM_SET_INTERWORK (pool->symbol, support_interwork);
3830#endif
3831
3832  for (entry = 0; entry < pool->next_free_entry; entry ++)
3833    {
3834#ifdef OBJ_ELF
3835      if (debug_type == DEBUG_DWARF2)
3836	dwarf2_gen_line_info (frag_now_fix (), pool->locs + entry);
3837#endif
3838      /* First output the expression in the instruction to the pool.  */
3839      emit_expr (&(pool->literals[entry]),
3840		 pool->literals[entry].X_md & LIT_ENTRY_SIZE_MASK);
3841    }
3842
3843  /* Mark the pool as empty.  */
3844  pool->next_free_entry = 0;
3845  pool->symbol = NULL;
3846}
3847
3848#ifdef OBJ_ELF
3849/* Forward declarations for functions below, in the MD interface
3850   section.  */
3851static void fix_new_arm (fragS *, int, short, expressionS *, int, int);
3852static valueT create_unwind_entry (int);
3853static void start_unwind_section (const segT, int);
3854static void add_unwind_opcode (valueT, int);
3855static void flush_pending_unwind (void);
3856
3857/* Directives: Data.  */
3858
3859static void
3860s_arm_elf_cons (int nbytes)
3861{
3862  expressionS exp;
3863
3864#ifdef md_flush_pending_output
3865  md_flush_pending_output ();
3866#endif
3867
3868  if (is_it_end_of_statement ())
3869    {
3870      demand_empty_rest_of_line ();
3871      return;
3872    }
3873
3874#ifdef md_cons_align
3875  md_cons_align (nbytes);
3876#endif
3877
3878  mapping_state (MAP_DATA);
3879  do
3880    {
3881      int reloc;
3882      char *base = input_line_pointer;
3883
3884      expression (& exp);
3885
3886      if (exp.X_op != O_symbol)
3887	emit_expr (&exp, (unsigned int) nbytes);
3888      else
3889	{
3890	  char *before_reloc = input_line_pointer;
3891	  reloc = parse_reloc (&input_line_pointer);
3892	  if (reloc == -1)
3893	    {
3894	      as_bad (_("unrecognized relocation suffix"));
3895	      ignore_rest_of_line ();
3896	      return;
3897	    }
3898	  else if (reloc == BFD_RELOC_UNUSED)
3899	    emit_expr (&exp, (unsigned int) nbytes);
3900	  else
3901	    {
3902	      reloc_howto_type *howto = (reloc_howto_type *)
3903		  bfd_reloc_type_lookup (stdoutput,
3904					 (bfd_reloc_code_real_type) reloc);
3905	      int size = bfd_get_reloc_size (howto);
3906
3907	      if (reloc == BFD_RELOC_ARM_PLT32)
3908		{
3909		  as_bad (_("(plt) is only valid on branch targets"));
3910		  reloc = BFD_RELOC_UNUSED;
3911		  size = 0;
3912		}
3913
3914	      if (size > nbytes)
3915		as_bad (ngettext ("%s relocations do not fit in %d byte",
3916				  "%s relocations do not fit in %d bytes",
3917				  nbytes),
3918			howto->name, nbytes);
3919	      else
3920		{
3921		  /* We've parsed an expression stopping at O_symbol.
3922		     But there may be more expression left now that we
3923		     have parsed the relocation marker.  Parse it again.
3924		     XXX Surely there is a cleaner way to do this.  */
3925		  char *p = input_line_pointer;
3926		  int offset;
3927		  char *save_buf = XNEWVEC (char, input_line_pointer - base);
3928
3929		  memcpy (save_buf, base, input_line_pointer - base);
3930		  memmove (base + (input_line_pointer - before_reloc),
3931			   base, before_reloc - base);
3932
3933		  input_line_pointer = base + (input_line_pointer-before_reloc);
3934		  expression (&exp);
3935		  memcpy (base, save_buf, p - base);
3936
3937		  offset = nbytes - size;
3938		  p = frag_more (nbytes);
3939		  memset (p, 0, nbytes);
3940		  fix_new_exp (frag_now, p - frag_now->fr_literal + offset,
3941			       size, &exp, 0, (enum bfd_reloc_code_real) reloc);
3942		  free (save_buf);
3943		}
3944	    }
3945	}
3946    }
3947  while (*input_line_pointer++ == ',');
3948
3949  /* Put terminator back into stream.  */
3950  input_line_pointer --;
3951  demand_empty_rest_of_line ();
3952}
3953
3954/* Emit an expression containing a 32-bit thumb instruction.
3955   Implementation based on put_thumb32_insn.  */
3956
3957static void
3958emit_thumb32_expr (expressionS * exp)
3959{
3960  expressionS exp_high = *exp;
3961
3962  exp_high.X_add_number = (unsigned long)exp_high.X_add_number >> 16;
3963  emit_expr (& exp_high, (unsigned int) THUMB_SIZE);
3964  exp->X_add_number &= 0xffff;
3965  emit_expr (exp, (unsigned int) THUMB_SIZE);
3966}
3967
3968/*  Guess the instruction size based on the opcode.  */
3969
3970static int
3971thumb_insn_size (int opcode)
3972{
3973  if ((unsigned int) opcode < 0xe800u)
3974    return 2;
3975  else if ((unsigned int) opcode >= 0xe8000000u)
3976    return 4;
3977  else
3978    return 0;
3979}
3980
3981static bfd_boolean
3982emit_insn (expressionS *exp, int nbytes)
3983{
3984  int size = 0;
3985
3986  if (exp->X_op == O_constant)
3987    {
3988      size = nbytes;
3989
3990      if (size == 0)
3991	size = thumb_insn_size (exp->X_add_number);
3992
3993      if (size != 0)
3994	{
3995	  if (size == 2 && (unsigned int)exp->X_add_number > 0xffffu)
3996	    {
3997	      as_bad (_(".inst.n operand too big. "\
3998			"Use .inst.w instead"));
3999	      size = 0;
4000	    }
4001	  else
4002	    {
4003	      if (now_pred.state == AUTOMATIC_PRED_BLOCK)
4004		set_pred_insn_type_nonvoid (OUTSIDE_PRED_INSN, 0);
4005	      else
4006		set_pred_insn_type_nonvoid (NEUTRAL_IT_INSN, 0);
4007
4008	      if (thumb_mode && (size > THUMB_SIZE) && !target_big_endian)
4009		emit_thumb32_expr (exp);
4010	      else
4011		emit_expr (exp, (unsigned int) size);
4012
4013	      it_fsm_post_encode ();
4014	    }
4015	}
4016      else
4017	as_bad (_("cannot determine Thumb instruction size. "	\
4018		  "Use .inst.n/.inst.w instead"));
4019    }
4020  else
4021    as_bad (_("constant expression required"));
4022
4023  return (size != 0);
4024}
4025
4026/* Like s_arm_elf_cons but do not use md_cons_align and
4027   set the mapping state to MAP_ARM/MAP_THUMB.  */
4028
4029static void
4030s_arm_elf_inst (int nbytes)
4031{
4032  if (is_it_end_of_statement ())
4033    {
4034      demand_empty_rest_of_line ();
4035      return;
4036    }
4037
4038  /* Calling mapping_state () here will not change ARM/THUMB,
4039     but will ensure not to be in DATA state.  */
4040
4041  if (thumb_mode)
4042    mapping_state (MAP_THUMB);
4043  else
4044    {
4045      if (nbytes != 0)
4046	{
4047	  as_bad (_("width suffixes are invalid in ARM mode"));
4048	  ignore_rest_of_line ();
4049	  return;
4050	}
4051
4052      nbytes = 4;
4053
4054      mapping_state (MAP_ARM);
4055    }
4056
4057  do
4058    {
4059      expressionS exp;
4060
4061      expression (& exp);
4062
4063      if (! emit_insn (& exp, nbytes))
4064	{
4065	  ignore_rest_of_line ();
4066	  return;
4067	}
4068    }
4069  while (*input_line_pointer++ == ',');
4070
4071  /* Put terminator back into stream.  */
4072  input_line_pointer --;
4073  demand_empty_rest_of_line ();
4074}
4075
4076/* Parse a .rel31 directive.  */
4077
4078static void
4079s_arm_rel31 (int ignored ATTRIBUTE_UNUSED)
4080{
4081  expressionS exp;
4082  char *p;
4083  valueT highbit;
4084
4085  highbit = 0;
4086  if (*input_line_pointer == '1')
4087    highbit = 0x80000000;
4088  else if (*input_line_pointer != '0')
4089    as_bad (_("expected 0 or 1"));
4090
4091  input_line_pointer++;
4092  if (*input_line_pointer != ',')
4093    as_bad (_("missing comma"));
4094  input_line_pointer++;
4095
4096#ifdef md_flush_pending_output
4097  md_flush_pending_output ();
4098#endif
4099
4100#ifdef md_cons_align
4101  md_cons_align (4);
4102#endif
4103
4104  mapping_state (MAP_DATA);
4105
4106  expression (&exp);
4107
4108  p = frag_more (4);
4109  md_number_to_chars (p, highbit, 4);
4110  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 1,
4111	       BFD_RELOC_ARM_PREL31);
4112
4113  demand_empty_rest_of_line ();
4114}
4115
4116/* Directives: AEABI stack-unwind tables.  */
4117
4118/* Parse an unwind_fnstart directive.  Simply records the current location.  */
4119
4120static void
4121s_arm_unwind_fnstart (int ignored ATTRIBUTE_UNUSED)
4122{
4123  demand_empty_rest_of_line ();
4124  if (unwind.proc_start)
4125    {
4126      as_bad (_("duplicate .fnstart directive"));
4127      return;
4128    }
4129
4130  /* Mark the start of the function.  */
4131  unwind.proc_start = expr_build_dot ();
4132
4133  /* Reset the rest of the unwind info.	 */
4134  unwind.opcode_count = 0;
4135  unwind.table_entry = NULL;
4136  unwind.personality_routine = NULL;
4137  unwind.personality_index = -1;
4138  unwind.frame_size = 0;
4139  unwind.fp_offset = 0;
4140  unwind.fp_reg = REG_SP;
4141  unwind.fp_used = 0;
4142  unwind.sp_restored = 0;
4143}
4144
4145
4146/* Parse a handlerdata directive.  Creates the exception handling table entry
4147   for the function.  */
4148
4149static void
4150s_arm_unwind_handlerdata (int ignored ATTRIBUTE_UNUSED)
4151{
4152  demand_empty_rest_of_line ();
4153  if (!unwind.proc_start)
4154    as_bad (MISSING_FNSTART);
4155
4156  if (unwind.table_entry)
4157    as_bad (_("duplicate .handlerdata directive"));
4158
4159  create_unwind_entry (1);
4160}
4161
4162/* Parse an unwind_fnend directive.  Generates the index table entry.  */
4163
4164static void
4165s_arm_unwind_fnend (int ignored ATTRIBUTE_UNUSED)
4166{
4167  long where;
4168  char *ptr;
4169  valueT val;
4170  unsigned int marked_pr_dependency;
4171
4172  demand_empty_rest_of_line ();
4173
4174  if (!unwind.proc_start)
4175    {
4176      as_bad (_(".fnend directive without .fnstart"));
4177      return;
4178    }
4179
4180  /* Add eh table entry.  */
4181  if (unwind.table_entry == NULL)
4182    val = create_unwind_entry (0);
4183  else
4184    val = 0;
4185
4186  /* Add index table entry.  This is two words.	 */
4187  start_unwind_section (unwind.saved_seg, 1);
4188  frag_align (2, 0, 0);
4189  record_alignment (now_seg, 2);
4190
4191  ptr = frag_more (8);
4192  memset (ptr, 0, 8);
4193  where = frag_now_fix () - 8;
4194
4195  /* Self relative offset of the function start.  */
4196  fix_new (frag_now, where, 4, unwind.proc_start, 0, 1,
4197	   BFD_RELOC_ARM_PREL31);
4198
4199  /* Indicate dependency on EHABI-defined personality routines to the
4200     linker, if it hasn't been done already.  */
4201  marked_pr_dependency
4202    = seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency;
4203  if (unwind.personality_index >= 0 && unwind.personality_index < 3
4204      && !(marked_pr_dependency & (1 << unwind.personality_index)))
4205    {
4206      static const char *const name[] =
4207	{
4208	  "__aeabi_unwind_cpp_pr0",
4209	  "__aeabi_unwind_cpp_pr1",
4210	  "__aeabi_unwind_cpp_pr2"
4211	};
4212      symbolS *pr = symbol_find_or_make (name[unwind.personality_index]);
4213      fix_new (frag_now, where, 0, pr, 0, 1, BFD_RELOC_NONE);
4214      seg_info (now_seg)->tc_segment_info_data.marked_pr_dependency
4215	|= 1 << unwind.personality_index;
4216    }
4217
4218  if (val)
4219    /* Inline exception table entry.  */
4220    md_number_to_chars (ptr + 4, val, 4);
4221  else
4222    /* Self relative offset of the table entry.	 */
4223    fix_new (frag_now, where + 4, 4, unwind.table_entry, 0, 1,
4224	     BFD_RELOC_ARM_PREL31);
4225
4226  /* Restore the original section.  */
4227  subseg_set (unwind.saved_seg, unwind.saved_subseg);
4228
4229  unwind.proc_start = NULL;
4230}
4231
4232
4233/* Parse an unwind_cantunwind directive.  */
4234
4235static void
4236s_arm_unwind_cantunwind (int ignored ATTRIBUTE_UNUSED)
4237{
4238  demand_empty_rest_of_line ();
4239  if (!unwind.proc_start)
4240    as_bad (MISSING_FNSTART);
4241
4242  if (unwind.personality_routine || unwind.personality_index != -1)
4243    as_bad (_("personality routine specified for cantunwind frame"));
4244
4245  unwind.personality_index = -2;
4246}
4247
4248
4249/* Parse a personalityindex directive.	*/
4250
4251static void
4252s_arm_unwind_personalityindex (int ignored ATTRIBUTE_UNUSED)
4253{
4254  expressionS exp;
4255
4256  if (!unwind.proc_start)
4257    as_bad (MISSING_FNSTART);
4258
4259  if (unwind.personality_routine || unwind.personality_index != -1)
4260    as_bad (_("duplicate .personalityindex directive"));
4261
4262  expression (&exp);
4263
4264  if (exp.X_op != O_constant
4265      || exp.X_add_number < 0 || exp.X_add_number > 15)
4266    {
4267      as_bad (_("bad personality routine number"));
4268      ignore_rest_of_line ();
4269      return;
4270    }
4271
4272  unwind.personality_index = exp.X_add_number;
4273
4274  demand_empty_rest_of_line ();
4275}
4276
4277
4278/* Parse a personality directive.  */
4279
4280static void
4281s_arm_unwind_personality (int ignored ATTRIBUTE_UNUSED)
4282{
4283  char *name, *p, c;
4284
4285  if (!unwind.proc_start)
4286    as_bad (MISSING_FNSTART);
4287
4288  if (unwind.personality_routine || unwind.personality_index != -1)
4289    as_bad (_("duplicate .personality directive"));
4290
4291  c = get_symbol_name (& name);
4292  p = input_line_pointer;
4293  if (c == '"')
4294    ++ input_line_pointer;
4295  unwind.personality_routine = symbol_find_or_make (name);
4296  *p = c;
4297  demand_empty_rest_of_line ();
4298}
4299
4300
4301/* Parse a directive saving core registers.  */
4302
4303static void
4304s_arm_unwind_save_core (void)
4305{
4306  valueT op;
4307  long range;
4308  int n;
4309
4310  range = parse_reg_list (&input_line_pointer, REGLIST_RN);
4311  if (range == FAIL)
4312    {
4313      as_bad (_("expected register list"));
4314      ignore_rest_of_line ();
4315      return;
4316    }
4317
4318  demand_empty_rest_of_line ();
4319
4320  /* Turn .unwind_movsp ip followed by .unwind_save {..., ip, ...}
4321     into .unwind_save {..., sp...}.  We aren't bothered about the value of
4322     ip because it is clobbered by calls.  */
4323  if (unwind.sp_restored && unwind.fp_reg == 12
4324      && (range & 0x3000) == 0x1000)
4325    {
4326      unwind.opcode_count--;
4327      unwind.sp_restored = 0;
4328      range = (range | 0x2000) & ~0x1000;
4329      unwind.pending_offset = 0;
4330    }
4331
4332  /* Pop r4-r15.  */
4333  if (range & 0xfff0)
4334    {
4335      /* See if we can use the short opcodes.  These pop a block of up to 8
4336	 registers starting with r4, plus maybe r14.  */
4337      for (n = 0; n < 8; n++)
4338	{
4339	  /* Break at the first non-saved register.	 */
4340	  if ((range & (1 << (n + 4))) == 0)
4341	    break;
4342	}
4343      /* See if there are any other bits set.  */
4344      if (n == 0 || (range & (0xfff0 << n) & 0xbff0) != 0)
4345	{
4346	  /* Use the long form.  */
4347	  op = 0x8000 | ((range >> 4) & 0xfff);
4348	  add_unwind_opcode (op, 2);
4349	}
4350      else
4351	{
4352	  /* Use the short form.  */
4353	  if (range & 0x4000)
4354	    op = 0xa8; /* Pop r14.	*/
4355	  else
4356	    op = 0xa0; /* Do not pop r14.  */
4357	  op |= (n - 1);
4358	  add_unwind_opcode (op, 1);
4359	}
4360    }
4361
4362  /* Pop r0-r3.	 */
4363  if (range & 0xf)
4364    {
4365      op = 0xb100 | (range & 0xf);
4366      add_unwind_opcode (op, 2);
4367    }
4368
4369  /* Record the number of bytes pushed.	 */
4370  for (n = 0; n < 16; n++)
4371    {
4372      if (range & (1 << n))
4373	unwind.frame_size += 4;
4374    }
4375}
4376
4377
4378/* Parse a directive saving FPA registers.  */
4379
4380static void
4381s_arm_unwind_save_fpa (int reg)
4382{
4383  expressionS exp;
4384  int num_regs;
4385  valueT op;
4386
4387  /* Get Number of registers to transfer.  */
4388  if (skip_past_comma (&input_line_pointer) != FAIL)
4389    expression (&exp);
4390  else
4391    exp.X_op = O_illegal;
4392
4393  if (exp.X_op != O_constant)
4394    {
4395      as_bad (_("expected , <constant>"));
4396      ignore_rest_of_line ();
4397      return;
4398    }
4399
4400  num_regs = exp.X_add_number;
4401
4402  if (num_regs < 1 || num_regs > 4)
4403    {
4404      as_bad (_("number of registers must be in the range [1:4]"));
4405      ignore_rest_of_line ();
4406      return;
4407    }
4408
4409  demand_empty_rest_of_line ();
4410
4411  if (reg == 4)
4412    {
4413      /* Short form.  */
4414      op = 0xb4 | (num_regs - 1);
4415      add_unwind_opcode (op, 1);
4416    }
4417  else
4418    {
4419      /* Long form.  */
4420      op = 0xc800 | (reg << 4) | (num_regs - 1);
4421      add_unwind_opcode (op, 2);
4422    }
4423  unwind.frame_size += num_regs * 12;
4424}
4425
4426
4427/* Parse a directive saving VFP registers for ARMv6 and above.  */
4428
4429static void
4430s_arm_unwind_save_vfp_armv6 (void)
4431{
4432  int count;
4433  unsigned int start;
4434  valueT op;
4435  int num_vfpv3_regs = 0;
4436  int num_regs_below_16;
4437  bfd_boolean partial_match;
4438
4439  count = parse_vfp_reg_list (&input_line_pointer, &start, REGLIST_VFP_D,
4440			      &partial_match);
4441  if (count == FAIL)
4442    {
4443      as_bad (_("expected register list"));
4444      ignore_rest_of_line ();
4445      return;
4446    }
4447
4448  demand_empty_rest_of_line ();
4449
4450  /* We always generate FSTMD/FLDMD-style unwinding opcodes (rather
4451     than FSTMX/FLDMX-style ones).  */
4452
4453  /* Generate opcode for (VFPv3) registers numbered in the range 16 .. 31.  */
4454  if (start >= 16)
4455    num_vfpv3_regs = count;
4456  else if (start + count > 16)
4457    num_vfpv3_regs = start + count - 16;
4458
4459  if (num_vfpv3_regs > 0)
4460    {
4461      int start_offset = start > 16 ? start - 16 : 0;
4462      op = 0xc800 | (start_offset << 4) | (num_vfpv3_regs - 1);
4463      add_unwind_opcode (op, 2);
4464    }
4465
4466  /* Generate opcode for registers numbered in the range 0 .. 15.  */
4467  num_regs_below_16 = num_vfpv3_regs > 0 ? 16 - (int) start : count;
4468  gas_assert (num_regs_below_16 + num_vfpv3_regs == count);
4469  if (num_regs_below_16 > 0)
4470    {
4471      op = 0xc900 | (start << 4) | (num_regs_below_16 - 1);
4472      add_unwind_opcode (op, 2);
4473    }
4474
4475  unwind.frame_size += count * 8;
4476}
4477
4478
4479/* Parse a directive saving VFP registers for pre-ARMv6.  */
4480
4481static void
4482s_arm_unwind_save_vfp (void)
4483{
4484  int count;
4485  unsigned int reg;
4486  valueT op;
4487  bfd_boolean partial_match;
4488
4489  count = parse_vfp_reg_list (&input_line_pointer, &reg, REGLIST_VFP_D,
4490			      &partial_match);
4491  if (count == FAIL)
4492    {
4493      as_bad (_("expected register list"));
4494      ignore_rest_of_line ();
4495      return;
4496    }
4497
4498  demand_empty_rest_of_line ();
4499
4500  if (reg == 8)
4501    {
4502      /* Short form.  */
4503      op = 0xb8 | (count - 1);
4504      add_unwind_opcode (op, 1);
4505    }
4506  else
4507    {
4508      /* Long form.  */
4509      op = 0xb300 | (reg << 4) | (count - 1);
4510      add_unwind_opcode (op, 2);
4511    }
4512  unwind.frame_size += count * 8 + 4;
4513}
4514
4515
4516/* Parse a directive saving iWMMXt data registers.  */
4517
4518static void
4519s_arm_unwind_save_mmxwr (void)
4520{
4521  int reg;
4522  int hi_reg;
4523  int i;
4524  unsigned mask = 0;
4525  valueT op;
4526
4527  if (*input_line_pointer == '{')
4528    input_line_pointer++;
4529
4530  do
4531    {
4532      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4533
4534      if (reg == FAIL)
4535	{
4536	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4537	  goto error;
4538	}
4539
4540      if (mask >> reg)
4541	as_tsktsk (_("register list not in ascending order"));
4542      mask |= 1 << reg;
4543
4544      if (*input_line_pointer == '-')
4545	{
4546	  input_line_pointer++;
4547	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWR);
4548	  if (hi_reg == FAIL)
4549	    {
4550	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWR]));
4551	      goto error;
4552	    }
4553	  else if (reg >= hi_reg)
4554	    {
4555	      as_bad (_("bad register range"));
4556	      goto error;
4557	    }
4558	  for (; reg < hi_reg; reg++)
4559	    mask |= 1 << reg;
4560	}
4561    }
4562  while (skip_past_comma (&input_line_pointer) != FAIL);
4563
4564  skip_past_char (&input_line_pointer, '}');
4565
4566  demand_empty_rest_of_line ();
4567
4568  /* Generate any deferred opcodes because we're going to be looking at
4569     the list.	*/
4570  flush_pending_unwind ();
4571
4572  for (i = 0; i < 16; i++)
4573    {
4574      if (mask & (1 << i))
4575	unwind.frame_size += 8;
4576    }
4577
4578  /* Attempt to combine with a previous opcode.	 We do this because gcc
4579     likes to output separate unwind directives for a single block of
4580     registers.	 */
4581  if (unwind.opcode_count > 0)
4582    {
4583      i = unwind.opcodes[unwind.opcode_count - 1];
4584      if ((i & 0xf8) == 0xc0)
4585	{
4586	  i &= 7;
4587	  /* Only merge if the blocks are contiguous.  */
4588	  if (i < 6)
4589	    {
4590	      if ((mask & 0xfe00) == (1 << 9))
4591		{
4592		  mask |= ((1 << (i + 11)) - 1) & 0xfc00;
4593		  unwind.opcode_count--;
4594		}
4595	    }
4596	  else if (i == 6 && unwind.opcode_count >= 2)
4597	    {
4598	      i = unwind.opcodes[unwind.opcode_count - 2];
4599	      reg = i >> 4;
4600	      i &= 0xf;
4601
4602	      op = 0xffff << (reg - 1);
4603	      if (reg > 0
4604		  && ((mask & op) == (1u << (reg - 1))))
4605		{
4606		  op = (1 << (reg + i + 1)) - 1;
4607		  op &= ~((1 << reg) - 1);
4608		  mask |= op;
4609		  unwind.opcode_count -= 2;
4610		}
4611	    }
4612	}
4613    }
4614
4615  hi_reg = 15;
4616  /* We want to generate opcodes in the order the registers have been
4617     saved, ie. descending order.  */
4618  for (reg = 15; reg >= -1; reg--)
4619    {
4620      /* Save registers in blocks.  */
4621      if (reg < 0
4622	  || !(mask & (1 << reg)))
4623	{
4624	  /* We found an unsaved reg.  Generate opcodes to save the
4625	     preceding block.	*/
4626	  if (reg != hi_reg)
4627	    {
4628	      if (reg == 9)
4629		{
4630		  /* Short form.  */
4631		  op = 0xc0 | (hi_reg - 10);
4632		  add_unwind_opcode (op, 1);
4633		}
4634	      else
4635		{
4636		  /* Long form.	 */
4637		  op = 0xc600 | ((reg + 1) << 4) | ((hi_reg - reg) - 1);
4638		  add_unwind_opcode (op, 2);
4639		}
4640	    }
4641	  hi_reg = reg - 1;
4642	}
4643    }
4644
4645  return;
4646 error:
4647  ignore_rest_of_line ();
4648}
4649
4650static void
4651s_arm_unwind_save_mmxwcg (void)
4652{
4653  int reg;
4654  int hi_reg;
4655  unsigned mask = 0;
4656  valueT op;
4657
4658  if (*input_line_pointer == '{')
4659    input_line_pointer++;
4660
4661  skip_whitespace (input_line_pointer);
4662
4663  do
4664    {
4665      reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4666
4667      if (reg == FAIL)
4668	{
4669	  as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4670	  goto error;
4671	}
4672
4673      reg -= 8;
4674      if (mask >> reg)
4675	as_tsktsk (_("register list not in ascending order"));
4676      mask |= 1 << reg;
4677
4678      if (*input_line_pointer == '-')
4679	{
4680	  input_line_pointer++;
4681	  hi_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_MMXWCG);
4682	  if (hi_reg == FAIL)
4683	    {
4684	      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_MMXWCG]));
4685	      goto error;
4686	    }
4687	  else if (reg >= hi_reg)
4688	    {
4689	      as_bad (_("bad register range"));
4690	      goto error;
4691	    }
4692	  for (; reg < hi_reg; reg++)
4693	    mask |= 1 << reg;
4694	}
4695    }
4696  while (skip_past_comma (&input_line_pointer) != FAIL);
4697
4698  skip_past_char (&input_line_pointer, '}');
4699
4700  demand_empty_rest_of_line ();
4701
4702  /* Generate any deferred opcodes because we're going to be looking at
4703     the list.	*/
4704  flush_pending_unwind ();
4705
4706  for (reg = 0; reg < 16; reg++)
4707    {
4708      if (mask & (1 << reg))
4709	unwind.frame_size += 4;
4710    }
4711  op = 0xc700 | mask;
4712  add_unwind_opcode (op, 2);
4713  return;
4714 error:
4715  ignore_rest_of_line ();
4716}
4717
4718
4719/* Parse an unwind_save directive.
4720   If the argument is non-zero, this is a .vsave directive.  */
4721
4722static void
4723s_arm_unwind_save (int arch_v6)
4724{
4725  char *peek;
4726  struct reg_entry *reg;
4727  bfd_boolean had_brace = FALSE;
4728
4729  if (!unwind.proc_start)
4730    as_bad (MISSING_FNSTART);
4731
4732  /* Figure out what sort of save we have.  */
4733  peek = input_line_pointer;
4734
4735  if (*peek == '{')
4736    {
4737      had_brace = TRUE;
4738      peek++;
4739    }
4740
4741  reg = arm_reg_parse_multi (&peek);
4742
4743  if (!reg)
4744    {
4745      as_bad (_("register expected"));
4746      ignore_rest_of_line ();
4747      return;
4748    }
4749
4750  switch (reg->type)
4751    {
4752    case REG_TYPE_FN:
4753      if (had_brace)
4754	{
4755	  as_bad (_("FPA .unwind_save does not take a register list"));
4756	  ignore_rest_of_line ();
4757	  return;
4758	}
4759      input_line_pointer = peek;
4760      s_arm_unwind_save_fpa (reg->number);
4761      return;
4762
4763    case REG_TYPE_RN:
4764      s_arm_unwind_save_core ();
4765      return;
4766
4767    case REG_TYPE_VFD:
4768      if (arch_v6)
4769	s_arm_unwind_save_vfp_armv6 ();
4770      else
4771	s_arm_unwind_save_vfp ();
4772      return;
4773
4774    case REG_TYPE_MMXWR:
4775      s_arm_unwind_save_mmxwr ();
4776      return;
4777
4778    case REG_TYPE_MMXWCG:
4779      s_arm_unwind_save_mmxwcg ();
4780      return;
4781
4782    default:
4783      as_bad (_(".unwind_save does not support this kind of register"));
4784      ignore_rest_of_line ();
4785    }
4786}
4787
4788
4789/* Parse an unwind_movsp directive.  */
4790
4791static void
4792s_arm_unwind_movsp (int ignored ATTRIBUTE_UNUSED)
4793{
4794  int reg;
4795  valueT op;
4796  int offset;
4797
4798  if (!unwind.proc_start)
4799    as_bad (MISSING_FNSTART);
4800
4801  reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4802  if (reg == FAIL)
4803    {
4804      as_bad ("%s", _(reg_expected_msgs[REG_TYPE_RN]));
4805      ignore_rest_of_line ();
4806      return;
4807    }
4808
4809  /* Optional constant.	 */
4810  if (skip_past_comma (&input_line_pointer) != FAIL)
4811    {
4812      if (immediate_for_directive (&offset) == FAIL)
4813	return;
4814    }
4815  else
4816    offset = 0;
4817
4818  demand_empty_rest_of_line ();
4819
4820  if (reg == REG_SP || reg == REG_PC)
4821    {
4822      as_bad (_("SP and PC not permitted in .unwind_movsp directive"));
4823      return;
4824    }
4825
4826  if (unwind.fp_reg != REG_SP)
4827    as_bad (_("unexpected .unwind_movsp directive"));
4828
4829  /* Generate opcode to restore the value.  */
4830  op = 0x90 | reg;
4831  add_unwind_opcode (op, 1);
4832
4833  /* Record the information for later.	*/
4834  unwind.fp_reg = reg;
4835  unwind.fp_offset = unwind.frame_size - offset;
4836  unwind.sp_restored = 1;
4837}
4838
4839/* Parse an unwind_pad directive.  */
4840
4841static void
4842s_arm_unwind_pad (int ignored ATTRIBUTE_UNUSED)
4843{
4844  int offset;
4845
4846  if (!unwind.proc_start)
4847    as_bad (MISSING_FNSTART);
4848
4849  if (immediate_for_directive (&offset) == FAIL)
4850    return;
4851
4852  if (offset & 3)
4853    {
4854      as_bad (_("stack increment must be multiple of 4"));
4855      ignore_rest_of_line ();
4856      return;
4857    }
4858
4859  /* Don't generate any opcodes, just record the details for later.  */
4860  unwind.frame_size += offset;
4861  unwind.pending_offset += offset;
4862
4863  demand_empty_rest_of_line ();
4864}
4865
4866/* Parse an unwind_setfp directive.  */
4867
4868static void
4869s_arm_unwind_setfp (int ignored ATTRIBUTE_UNUSED)
4870{
4871  int sp_reg;
4872  int fp_reg;
4873  int offset;
4874
4875  if (!unwind.proc_start)
4876    as_bad (MISSING_FNSTART);
4877
4878  fp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4879  if (skip_past_comma (&input_line_pointer) == FAIL)
4880    sp_reg = FAIL;
4881  else
4882    sp_reg = arm_reg_parse (&input_line_pointer, REG_TYPE_RN);
4883
4884  if (fp_reg == FAIL || sp_reg == FAIL)
4885    {
4886      as_bad (_("expected <reg>, <reg>"));
4887      ignore_rest_of_line ();
4888      return;
4889    }
4890
4891  /* Optional constant.	 */
4892  if (skip_past_comma (&input_line_pointer) != FAIL)
4893    {
4894      if (immediate_for_directive (&offset) == FAIL)
4895	return;
4896    }
4897  else
4898    offset = 0;
4899
4900  demand_empty_rest_of_line ();
4901
4902  if (sp_reg != REG_SP && sp_reg != unwind.fp_reg)
4903    {
4904      as_bad (_("register must be either sp or set by a previous"
4905		"unwind_movsp directive"));
4906      return;
4907    }
4908
4909  /* Don't generate any opcodes, just record the information for later.	 */
4910  unwind.fp_reg = fp_reg;
4911  unwind.fp_used = 1;
4912  if (sp_reg == REG_SP)
4913    unwind.fp_offset = unwind.frame_size - offset;
4914  else
4915    unwind.fp_offset -= offset;
4916}
4917
4918/* Parse an unwind_raw directive.  */
4919
4920static void
4921s_arm_unwind_raw (int ignored ATTRIBUTE_UNUSED)
4922{
4923  expressionS exp;
4924  /* This is an arbitrary limit.	 */
4925  unsigned char op[16];
4926  int count;
4927
4928  if (!unwind.proc_start)
4929    as_bad (MISSING_FNSTART);
4930
4931  expression (&exp);
4932  if (exp.X_op == O_constant
4933      && skip_past_comma (&input_line_pointer) != FAIL)
4934    {
4935      unwind.frame_size += exp.X_add_number;
4936      expression (&exp);
4937    }
4938  else
4939    exp.X_op = O_illegal;
4940
4941  if (exp.X_op != O_constant)
4942    {
4943      as_bad (_("expected <offset>, <opcode>"));
4944      ignore_rest_of_line ();
4945      return;
4946    }
4947
4948  count = 0;
4949
4950  /* Parse the opcode.	*/
4951  for (;;)
4952    {
4953      if (count >= 16)
4954	{
4955	  as_bad (_("unwind opcode too long"));
4956	  ignore_rest_of_line ();
4957	}
4958      if (exp.X_op != O_constant || exp.X_add_number & ~0xff)
4959	{
4960	  as_bad (_("invalid unwind opcode"));
4961	  ignore_rest_of_line ();
4962	  return;
4963	}
4964      op[count++] = exp.X_add_number;
4965
4966      /* Parse the next byte.  */
4967      if (skip_past_comma (&input_line_pointer) == FAIL)
4968	break;
4969
4970      expression (&exp);
4971    }
4972
4973  /* Add the opcode bytes in reverse order.  */
4974  while (count--)
4975    add_unwind_opcode (op[count], 1);
4976
4977  demand_empty_rest_of_line ();
4978}
4979
4980
4981/* Parse a .eabi_attribute directive.  */
4982
4983static void
4984s_arm_eabi_attribute (int ignored ATTRIBUTE_UNUSED)
4985{
4986  int tag = obj_elf_vendor_attribute (OBJ_ATTR_PROC);
4987
4988  if (tag >= 0 && tag < NUM_KNOWN_OBJ_ATTRIBUTES)
4989    attributes_set_explicitly[tag] = 1;
4990}
4991
4992/* Emit a tls fix for the symbol.  */
4993
4994static void
4995s_arm_tls_descseq (int ignored ATTRIBUTE_UNUSED)
4996{
4997  char *p;
4998  expressionS exp;
4999#ifdef md_flush_pending_output
5000  md_flush_pending_output ();
5001#endif
5002
5003#ifdef md_cons_align
5004  md_cons_align (4);
5005#endif
5006
5007  /* Since we're just labelling the code, there's no need to define a
5008     mapping symbol.  */
5009  expression (&exp);
5010  p = obstack_next_free (&frchain_now->frch_obstack);
5011  fix_new_arm (frag_now, p - frag_now->fr_literal, 4, &exp, 0,
5012	       thumb_mode ? BFD_RELOC_ARM_THM_TLS_DESCSEQ
5013	       : BFD_RELOC_ARM_TLS_DESCSEQ);
5014}
5015#endif /* OBJ_ELF */
5016
5017static void s_arm_arch (int);
5018static void s_arm_object_arch (int);
5019static void s_arm_cpu (int);
5020static void s_arm_fpu (int);
5021static void s_arm_arch_extension (int);
5022
5023#ifdef TE_PE
5024
5025static void
5026pe_directive_secrel (int dummy ATTRIBUTE_UNUSED)
5027{
5028  expressionS exp;
5029
5030  do
5031    {
5032      expression (&exp);
5033      if (exp.X_op == O_symbol)
5034	exp.X_op = O_secrel;
5035
5036      emit_expr (&exp, 4);
5037    }
5038  while (*input_line_pointer++ == ',');
5039
5040  input_line_pointer--;
5041  demand_empty_rest_of_line ();
5042}
5043#endif /* TE_PE */
5044
5045int
5046arm_is_largest_exponent_ok (int precision)
5047{
5048  /* precision == 1 ensures that this will only return
5049     true for 16 bit floats.  */
5050  return (precision == 1) && (fp16_format == ARM_FP16_FORMAT_ALTERNATIVE);
5051}
5052
5053static void
5054set_fp16_format (int dummy ATTRIBUTE_UNUSED)
5055{
5056  char saved_char;
5057  char* name;
5058  enum fp_16bit_format new_format;
5059
5060  new_format = ARM_FP16_FORMAT_DEFAULT;
5061
5062  name = input_line_pointer;
5063  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
5064    input_line_pointer++;
5065
5066  saved_char = *input_line_pointer;
5067  *input_line_pointer = 0;
5068
5069  if (strcasecmp (name, "ieee") == 0)
5070    new_format = ARM_FP16_FORMAT_IEEE;
5071  else if (strcasecmp (name, "alternative") == 0)
5072    new_format = ARM_FP16_FORMAT_ALTERNATIVE;
5073  else
5074    {
5075      as_bad (_("unrecognised float16 format \"%s\""), name);
5076      goto cleanup;
5077    }
5078
5079  /* Only set fp16_format if it is still the default (aka not already
5080     been set yet).  */
5081  if (fp16_format == ARM_FP16_FORMAT_DEFAULT)
5082    fp16_format = new_format;
5083  else
5084    {
5085      if (new_format != fp16_format)
5086	as_warn (_("float16 format cannot be set more than once, ignoring."));
5087    }
5088
5089 cleanup:
5090  *input_line_pointer = saved_char;
5091  ignore_rest_of_line ();
5092}
5093
5094/* This table describes all the machine specific pseudo-ops the assembler
5095   has to support.  The fields are:
5096     pseudo-op name without dot
5097     function to call to execute this pseudo-op
5098     Integer arg to pass to the function.  */
5099
5100const pseudo_typeS md_pseudo_table[] =
5101{
5102  /* Never called because '.req' does not start a line.	 */
5103  { "req",	   s_req,	  0 },
5104  /* Following two are likewise never called.  */
5105  { "dn",	   s_dn,          0 },
5106  { "qn",          s_qn,          0 },
5107  { "unreq",	   s_unreq,	  0 },
5108  { "bss",	   s_bss,	  0 },
5109  { "align",	   s_align_ptwo,  2 },
5110  { "arm",	   s_arm,	  0 },
5111  { "thumb",	   s_thumb,	  0 },
5112  { "code",	   s_code,	  0 },
5113  { "force_thumb", s_force_thumb, 0 },
5114  { "thumb_func",  s_thumb_func,  0 },
5115  { "thumb_set",   s_thumb_set,	  0 },
5116  { "even",	   s_even,	  0 },
5117  { "ltorg",	   s_ltorg,	  0 },
5118  { "pool",	   s_ltorg,	  0 },
5119  { "syntax",	   s_syntax,	  0 },
5120  { "cpu",	   s_arm_cpu,	  0 },
5121  { "arch",	   s_arm_arch,	  0 },
5122  { "object_arch", s_arm_object_arch,	0 },
5123  { "fpu",	   s_arm_fpu,	  0 },
5124  { "arch_extension", s_arm_arch_extension, 0 },
5125#ifdef OBJ_ELF
5126  { "word",	        s_arm_elf_cons, 4 },
5127  { "long",	        s_arm_elf_cons, 4 },
5128  { "inst.n",           s_arm_elf_inst, 2 },
5129  { "inst.w",           s_arm_elf_inst, 4 },
5130  { "inst",             s_arm_elf_inst, 0 },
5131  { "rel31",	        s_arm_rel31,	  0 },
5132  { "fnstart",		s_arm_unwind_fnstart,	0 },
5133  { "fnend",		s_arm_unwind_fnend,	0 },
5134  { "cantunwind",	s_arm_unwind_cantunwind, 0 },
5135  { "personality",	s_arm_unwind_personality, 0 },
5136  { "personalityindex",	s_arm_unwind_personalityindex, 0 },
5137  { "handlerdata",	s_arm_unwind_handlerdata, 0 },
5138  { "save",		s_arm_unwind_save,	0 },
5139  { "vsave",		s_arm_unwind_save,	1 },
5140  { "movsp",		s_arm_unwind_movsp,	0 },
5141  { "pad",		s_arm_unwind_pad,	0 },
5142  { "setfp",		s_arm_unwind_setfp,	0 },
5143  { "unwind_raw",	s_arm_unwind_raw,	0 },
5144  { "eabi_attribute",	s_arm_eabi_attribute,	0 },
5145  { "tlsdescseq",	s_arm_tls_descseq,      0 },
5146#else
5147  { "word",	   cons, 4},
5148
5149  /* These are used for dwarf.  */
5150  {"2byte", cons, 2},
5151  {"4byte", cons, 4},
5152  {"8byte", cons, 8},
5153  /* These are used for dwarf2.  */
5154  { "file", dwarf2_directive_file, 0 },
5155  { "loc",  dwarf2_directive_loc,  0 },
5156  { "loc_mark_labels", dwarf2_directive_loc_mark_labels, 0 },
5157#endif
5158  { "extend",	   float_cons, 'x' },
5159  { "ldouble",	   float_cons, 'x' },
5160  { "packed",	   float_cons, 'p' },
5161  { "bfloat16",	   float_cons, 'b' },
5162#ifdef TE_PE
5163  {"secrel32", pe_directive_secrel, 0},
5164#endif
5165
5166  /* These are for compatibility with CodeComposer Studio.  */
5167  {"ref",          s_ccs_ref,        0},
5168  {"def",          s_ccs_def,        0},
5169  {"asmfunc",      s_ccs_asmfunc,    0},
5170  {"endasmfunc",   s_ccs_endasmfunc, 0},
5171
5172  {"float16", float_cons, 'h' },
5173  {"float16_format", set_fp16_format, 0 },
5174
5175  { 0, 0, 0 }
5176};
5177
5178/* Parser functions used exclusively in instruction operands.  */
5179
5180/* Generic immediate-value read function for use in insn parsing.
5181   STR points to the beginning of the immediate (the leading #);
5182   VAL receives the value; if the value is outside [MIN, MAX]
5183   issue an error.  PREFIX_OPT is true if the immediate prefix is
5184   optional.  */
5185
5186static int
5187parse_immediate (char **str, int *val, int min, int max,
5188		 bfd_boolean prefix_opt)
5189{
5190  expressionS exp;
5191
5192  my_get_expression (&exp, str, prefix_opt ? GE_OPT_PREFIX : GE_IMM_PREFIX);
5193  if (exp.X_op != O_constant)
5194    {
5195      inst.error = _("constant expression required");
5196      return FAIL;
5197    }
5198
5199  if (exp.X_add_number < min || exp.X_add_number > max)
5200    {
5201      inst.error = _("immediate value out of range");
5202      return FAIL;
5203    }
5204
5205  *val = exp.X_add_number;
5206  return SUCCESS;
5207}
5208
5209/* Less-generic immediate-value read function with the possibility of loading a
5210   big (64-bit) immediate, as required by Neon VMOV, VMVN and logic immediate
5211   instructions. Puts the result directly in inst.operands[i].  */
5212
5213static int
5214parse_big_immediate (char **str, int i, expressionS *in_exp,
5215		     bfd_boolean allow_symbol_p)
5216{
5217  expressionS exp;
5218  expressionS *exp_p = in_exp ? in_exp : &exp;
5219  char *ptr = *str;
5220
5221  my_get_expression (exp_p, &ptr, GE_OPT_PREFIX_BIG);
5222
5223  if (exp_p->X_op == O_constant)
5224    {
5225      inst.operands[i].imm = exp_p->X_add_number & 0xffffffff;
5226      /* If we're on a 64-bit host, then a 64-bit number can be returned using
5227	 O_constant.  We have to be careful not to break compilation for
5228	 32-bit X_add_number, though.  */
5229      if ((exp_p->X_add_number & ~(offsetT)(0xffffffffU)) != 0)
5230	{
5231	  /* X >> 32 is illegal if sizeof (exp_p->X_add_number) == 4.  */
5232	  inst.operands[i].reg = (((exp_p->X_add_number >> 16) >> 16)
5233				  & 0xffffffff);
5234	  inst.operands[i].regisimm = 1;
5235	}
5236    }
5237  else if (exp_p->X_op == O_big
5238	   && LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 32)
5239    {
5240      unsigned parts = 32 / LITTLENUM_NUMBER_OF_BITS, j, idx = 0;
5241
5242      /* Bignums have their least significant bits in
5243	 generic_bignum[0]. Make sure we put 32 bits in imm and
5244	 32 bits in reg,  in a (hopefully) portable way.  */
5245      gas_assert (parts != 0);
5246
5247      /* Make sure that the number is not too big.
5248	 PR 11972: Bignums can now be sign-extended to the
5249	 size of a .octa so check that the out of range bits
5250	 are all zero or all one.  */
5251      if (LITTLENUM_NUMBER_OF_BITS * exp_p->X_add_number > 64)
5252	{
5253	  LITTLENUM_TYPE m = -1;
5254
5255	  if (generic_bignum[parts * 2] != 0
5256	      && generic_bignum[parts * 2] != m)
5257	    return FAIL;
5258
5259	  for (j = parts * 2 + 1; j < (unsigned) exp_p->X_add_number; j++)
5260	    if (generic_bignum[j] != generic_bignum[j-1])
5261	      return FAIL;
5262	}
5263
5264      inst.operands[i].imm = 0;
5265      for (j = 0; j < parts; j++, idx++)
5266	inst.operands[i].imm |= ((unsigned) generic_bignum[idx]
5267				 << (LITTLENUM_NUMBER_OF_BITS * j));
5268      inst.operands[i].reg = 0;
5269      for (j = 0; j < parts; j++, idx++)
5270	inst.operands[i].reg |= ((unsigned) generic_bignum[idx]
5271				 << (LITTLENUM_NUMBER_OF_BITS * j));
5272      inst.operands[i].regisimm = 1;
5273    }
5274  else if (!(exp_p->X_op == O_symbol && allow_symbol_p))
5275    return FAIL;
5276
5277  *str = ptr;
5278
5279  return SUCCESS;
5280}
5281
5282/* Returns the pseudo-register number of an FPA immediate constant,
5283   or FAIL if there isn't a valid constant here.  */
5284
5285static int
5286parse_fpa_immediate (char ** str)
5287{
5288  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5289  char *	 save_in;
5290  expressionS	 exp;
5291  int		 i;
5292  int		 j;
5293
5294  /* First try and match exact strings, this is to guarantee
5295     that some formats will work even for cross assembly.  */
5296
5297  for (i = 0; fp_const[i]; i++)
5298    {
5299      if (strncmp (*str, fp_const[i], strlen (fp_const[i])) == 0)
5300	{
5301	  char *start = *str;
5302
5303	  *str += strlen (fp_const[i]);
5304	  if (is_end_of_line[(unsigned char) **str])
5305	    return i + 8;
5306	  *str = start;
5307	}
5308    }
5309
5310  /* Just because we didn't get a match doesn't mean that the constant
5311     isn't valid, just that it is in a format that we don't
5312     automatically recognize.  Try parsing it with the standard
5313     expression routines.  */
5314
5315  memset (words, 0, MAX_LITTLENUMS * sizeof (LITTLENUM_TYPE));
5316
5317  /* Look for a raw floating point number.  */
5318  if ((save_in = atof_ieee (*str, 'x', words)) != NULL
5319      && is_end_of_line[(unsigned char) *save_in])
5320    {
5321      for (i = 0; i < NUM_FLOAT_VALS; i++)
5322	{
5323	  for (j = 0; j < MAX_LITTLENUMS; j++)
5324	    {
5325	      if (words[j] != fp_values[i][j])
5326		break;
5327	    }
5328
5329	  if (j == MAX_LITTLENUMS)
5330	    {
5331	      *str = save_in;
5332	      return i + 8;
5333	    }
5334	}
5335    }
5336
5337  /* Try and parse a more complex expression, this will probably fail
5338     unless the code uses a floating point prefix (eg "0f").  */
5339  save_in = input_line_pointer;
5340  input_line_pointer = *str;
5341  if (expression (&exp) == absolute_section
5342      && exp.X_op == O_big
5343      && exp.X_add_number < 0)
5344    {
5345      /* FIXME: 5 = X_PRECISION, should be #define'd where we can use it.
5346	 Ditto for 15.	*/
5347#define X_PRECISION 5
5348#define E_PRECISION 15L
5349      if (gen_to_words (words, X_PRECISION, E_PRECISION) == 0)
5350	{
5351	  for (i = 0; i < NUM_FLOAT_VALS; i++)
5352	    {
5353	      for (j = 0; j < MAX_LITTLENUMS; j++)
5354		{
5355		  if (words[j] != fp_values[i][j])
5356		    break;
5357		}
5358
5359	      if (j == MAX_LITTLENUMS)
5360		{
5361		  *str = input_line_pointer;
5362		  input_line_pointer = save_in;
5363		  return i + 8;
5364		}
5365	    }
5366	}
5367    }
5368
5369  *str = input_line_pointer;
5370  input_line_pointer = save_in;
5371  inst.error = _("invalid FPA immediate expression");
5372  return FAIL;
5373}
5374
5375/* Returns 1 if a number has "quarter-precision" float format
5376   0baBbbbbbc defgh000 00000000 00000000.  */
5377
5378static int
5379is_quarter_float (unsigned imm)
5380{
5381  int bs = (imm & 0x20000000) ? 0x3e000000 : 0x40000000;
5382  return (imm & 0x7ffff) == 0 && ((imm & 0x7e000000) ^ bs) == 0;
5383}
5384
5385
5386/* Detect the presence of a floating point or integer zero constant,
5387   i.e. #0.0 or #0.  */
5388
5389static bfd_boolean
5390parse_ifimm_zero (char **in)
5391{
5392  int error_code;
5393
5394  if (!is_immediate_prefix (**in))
5395    {
5396      /* In unified syntax, all prefixes are optional.  */
5397      if (!unified_syntax)
5398	return FALSE;
5399    }
5400  else
5401    ++*in;
5402
5403  /* Accept #0x0 as a synonym for #0.  */
5404  if (strncmp (*in, "0x", 2) == 0)
5405    {
5406      int val;
5407      if (parse_immediate (in, &val, 0, 0, TRUE) == FAIL)
5408        return FALSE;
5409      return TRUE;
5410    }
5411
5412  error_code = atof_generic (in, ".", EXP_CHARS,
5413                             &generic_floating_point_number);
5414
5415  if (!error_code
5416      && generic_floating_point_number.sign == '+'
5417      && (generic_floating_point_number.low
5418          > generic_floating_point_number.leader))
5419    return TRUE;
5420
5421  return FALSE;
5422}
5423
5424/* Parse an 8-bit "quarter-precision" floating point number of the form:
5425   0baBbbbbbc defgh000 00000000 00000000.
5426   The zero and minus-zero cases need special handling, since they can't be
5427   encoded in the "quarter-precision" float format, but can nonetheless be
5428   loaded as integer constants.  */
5429
5430static unsigned
5431parse_qfloat_immediate (char **ccp, int *immed)
5432{
5433  char *str = *ccp;
5434  char *fpnum;
5435  LITTLENUM_TYPE words[MAX_LITTLENUMS];
5436  int found_fpchar = 0;
5437
5438  skip_past_char (&str, '#');
5439
5440  /* We must not accidentally parse an integer as a floating-point number. Make
5441     sure that the value we parse is not an integer by checking for special
5442     characters '.' or 'e'.
5443     FIXME: This is a horrible hack, but doing better is tricky because type
5444     information isn't in a very usable state at parse time.  */
5445  fpnum = str;
5446  skip_whitespace (fpnum);
5447
5448  if (strncmp (fpnum, "0x", 2) == 0)
5449    return FAIL;
5450  else
5451    {
5452      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
5453	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
5454	  {
5455	    found_fpchar = 1;
5456	    break;
5457	  }
5458
5459      if (!found_fpchar)
5460	return FAIL;
5461    }
5462
5463  if ((str = atof_ieee (str, 's', words)) != NULL)
5464    {
5465      unsigned fpword = 0;
5466      int i;
5467
5468      /* Our FP word must be 32 bits (single-precision FP).  */
5469      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
5470	{
5471	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
5472	  fpword |= words[i];
5473	}
5474
5475      if (is_quarter_float (fpword) || (fpword & 0x7fffffff) == 0)
5476	*immed = fpword;
5477      else
5478	return FAIL;
5479
5480      *ccp = str;
5481
5482      return SUCCESS;
5483    }
5484
5485  return FAIL;
5486}
5487
5488/* Shift operands.  */
5489enum shift_kind
5490{
5491  SHIFT_LSL, SHIFT_LSR, SHIFT_ASR, SHIFT_ROR, SHIFT_RRX, SHIFT_UXTW
5492};
5493
5494struct asm_shift_name
5495{
5496  const char	  *name;
5497  enum shift_kind  kind;
5498};
5499
5500/* Third argument to parse_shift.  */
5501enum parse_shift_mode
5502{
5503  NO_SHIFT_RESTRICT,		/* Any kind of shift is accepted.  */
5504  SHIFT_IMMEDIATE,		/* Shift operand must be an immediate.	*/
5505  SHIFT_LSL_OR_ASR_IMMEDIATE,	/* Shift must be LSL or ASR immediate.	*/
5506  SHIFT_ASR_IMMEDIATE,		/* Shift must be ASR immediate.	 */
5507  SHIFT_LSL_IMMEDIATE,		/* Shift must be LSL immediate.	 */
5508  SHIFT_UXTW_IMMEDIATE		/* Shift must be UXTW immediate.  */
5509};
5510
5511/* Parse a <shift> specifier on an ARM data processing instruction.
5512   This has three forms:
5513
5514     (LSL|LSR|ASL|ASR|ROR) Rs
5515     (LSL|LSR|ASL|ASR|ROR) #imm
5516     RRX
5517
5518   Note that ASL is assimilated to LSL in the instruction encoding, and
5519   RRX to ROR #0 (which cannot be written as such).  */
5520
5521static int
5522parse_shift (char **str, int i, enum parse_shift_mode mode)
5523{
5524  const struct asm_shift_name *shift_name;
5525  enum shift_kind shift;
5526  char *s = *str;
5527  char *p = s;
5528  int reg;
5529
5530  for (p = *str; ISALPHA (*p); p++)
5531    ;
5532
5533  if (p == *str)
5534    {
5535      inst.error = _("shift expression expected");
5536      return FAIL;
5537    }
5538
5539  shift_name
5540    = (const struct asm_shift_name *) str_hash_find_n (arm_shift_hsh, *str,
5541						       p - *str);
5542
5543  if (shift_name == NULL)
5544    {
5545      inst.error = _("shift expression expected");
5546      return FAIL;
5547    }
5548
5549  shift = shift_name->kind;
5550
5551  switch (mode)
5552    {
5553    case NO_SHIFT_RESTRICT:
5554    case SHIFT_IMMEDIATE:
5555      if (shift == SHIFT_UXTW)
5556	{
5557	  inst.error = _("'UXTW' not allowed here");
5558	  return FAIL;
5559	}
5560      break;
5561
5562    case SHIFT_LSL_OR_ASR_IMMEDIATE:
5563      if (shift != SHIFT_LSL && shift != SHIFT_ASR)
5564	{
5565	  inst.error = _("'LSL' or 'ASR' required");
5566	  return FAIL;
5567	}
5568      break;
5569
5570    case SHIFT_LSL_IMMEDIATE:
5571      if (shift != SHIFT_LSL)
5572	{
5573	  inst.error = _("'LSL' required");
5574	  return FAIL;
5575	}
5576      break;
5577
5578    case SHIFT_ASR_IMMEDIATE:
5579      if (shift != SHIFT_ASR)
5580	{
5581	  inst.error = _("'ASR' required");
5582	  return FAIL;
5583	}
5584      break;
5585    case SHIFT_UXTW_IMMEDIATE:
5586      if (shift != SHIFT_UXTW)
5587	{
5588	  inst.error = _("'UXTW' required");
5589	  return FAIL;
5590	}
5591      break;
5592
5593    default: abort ();
5594    }
5595
5596  if (shift != SHIFT_RRX)
5597    {
5598      /* Whitespace can appear here if the next thing is a bare digit.	*/
5599      skip_whitespace (p);
5600
5601      if (mode == NO_SHIFT_RESTRICT
5602	  && (reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
5603	{
5604	  inst.operands[i].imm = reg;
5605	  inst.operands[i].immisreg = 1;
5606	}
5607      else if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
5608	return FAIL;
5609    }
5610  inst.operands[i].shift_kind = shift;
5611  inst.operands[i].shifted = 1;
5612  *str = p;
5613  return SUCCESS;
5614}
5615
5616/* Parse a <shifter_operand> for an ARM data processing instruction:
5617
5618      #<immediate>
5619      #<immediate>, <rotate>
5620      <Rm>
5621      <Rm>, <shift>
5622
5623   where <shift> is defined by parse_shift above, and <rotate> is a
5624   multiple of 2 between 0 and 30.  Validation of immediate operands
5625   is deferred to md_apply_fix.  */
5626
5627static int
5628parse_shifter_operand (char **str, int i)
5629{
5630  int value;
5631  expressionS exp;
5632
5633  if ((value = arm_reg_parse (str, REG_TYPE_RN)) != FAIL)
5634    {
5635      inst.operands[i].reg = value;
5636      inst.operands[i].isreg = 1;
5637
5638      /* parse_shift will override this if appropriate */
5639      inst.relocs[0].exp.X_op = O_constant;
5640      inst.relocs[0].exp.X_add_number = 0;
5641
5642      if (skip_past_comma (str) == FAIL)
5643	return SUCCESS;
5644
5645      /* Shift operation on register.  */
5646      return parse_shift (str, i, NO_SHIFT_RESTRICT);
5647    }
5648
5649  if (my_get_expression (&inst.relocs[0].exp, str, GE_IMM_PREFIX))
5650    return FAIL;
5651
5652  if (skip_past_comma (str) == SUCCESS)
5653    {
5654      /* #x, y -- ie explicit rotation by Y.  */
5655      if (my_get_expression (&exp, str, GE_NO_PREFIX))
5656	return FAIL;
5657
5658      if (exp.X_op != O_constant || inst.relocs[0].exp.X_op != O_constant)
5659	{
5660	  inst.error = _("constant expression expected");
5661	  return FAIL;
5662	}
5663
5664      value = exp.X_add_number;
5665      if (value < 0 || value > 30 || value % 2 != 0)
5666	{
5667	  inst.error = _("invalid rotation");
5668	  return FAIL;
5669	}
5670      if (inst.relocs[0].exp.X_add_number < 0
5671	  || inst.relocs[0].exp.X_add_number > 255)
5672	{
5673	  inst.error = _("invalid constant");
5674	  return FAIL;
5675	}
5676
5677      /* Encode as specified.  */
5678      inst.operands[i].imm = inst.relocs[0].exp.X_add_number | value << 7;
5679      return SUCCESS;
5680    }
5681
5682  inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
5683  inst.relocs[0].pc_rel = 0;
5684  return SUCCESS;
5685}
5686
5687/* Group relocation information.  Each entry in the table contains the
5688   textual name of the relocation as may appear in assembler source
5689   and must end with a colon.
5690   Along with this textual name are the relocation codes to be used if
5691   the corresponding instruction is an ALU instruction (ADD or SUB only),
5692   an LDR, an LDRS, or an LDC.  */
5693
5694struct group_reloc_table_entry
5695{
5696  const char *name;
5697  int alu_code;
5698  int ldr_code;
5699  int ldrs_code;
5700  int ldc_code;
5701};
5702
5703typedef enum
5704{
5705  /* Varieties of non-ALU group relocation.  */
5706
5707  GROUP_LDR,
5708  GROUP_LDRS,
5709  GROUP_LDC,
5710  GROUP_MVE
5711} group_reloc_type;
5712
5713static struct group_reloc_table_entry group_reloc_table[] =
5714  { /* Program counter relative: */
5715    { "pc_g0_nc",
5716      BFD_RELOC_ARM_ALU_PC_G0_NC,	/* ALU */
5717      0,				/* LDR */
5718      0,				/* LDRS */
5719      0 },				/* LDC */
5720    { "pc_g0",
5721      BFD_RELOC_ARM_ALU_PC_G0,		/* ALU */
5722      BFD_RELOC_ARM_LDR_PC_G0,		/* LDR */
5723      BFD_RELOC_ARM_LDRS_PC_G0,		/* LDRS */
5724      BFD_RELOC_ARM_LDC_PC_G0 },	/* LDC */
5725    { "pc_g1_nc",
5726      BFD_RELOC_ARM_ALU_PC_G1_NC,	/* ALU */
5727      0,				/* LDR */
5728      0,				/* LDRS */
5729      0 },				/* LDC */
5730    { "pc_g1",
5731      BFD_RELOC_ARM_ALU_PC_G1,		/* ALU */
5732      BFD_RELOC_ARM_LDR_PC_G1, 		/* LDR */
5733      BFD_RELOC_ARM_LDRS_PC_G1,		/* LDRS */
5734      BFD_RELOC_ARM_LDC_PC_G1 },	/* LDC */
5735    { "pc_g2",
5736      BFD_RELOC_ARM_ALU_PC_G2,		/* ALU */
5737      BFD_RELOC_ARM_LDR_PC_G2,		/* LDR */
5738      BFD_RELOC_ARM_LDRS_PC_G2,		/* LDRS */
5739      BFD_RELOC_ARM_LDC_PC_G2 },	/* LDC */
5740    /* Section base relative */
5741    { "sb_g0_nc",
5742      BFD_RELOC_ARM_ALU_SB_G0_NC,	/* ALU */
5743      0,				/* LDR */
5744      0,				/* LDRS */
5745      0 },				/* LDC */
5746    { "sb_g0",
5747      BFD_RELOC_ARM_ALU_SB_G0,		/* ALU */
5748      BFD_RELOC_ARM_LDR_SB_G0,		/* LDR */
5749      BFD_RELOC_ARM_LDRS_SB_G0,		/* LDRS */
5750      BFD_RELOC_ARM_LDC_SB_G0 },	/* LDC */
5751    { "sb_g1_nc",
5752      BFD_RELOC_ARM_ALU_SB_G1_NC,	/* ALU */
5753      0,				/* LDR */
5754      0,				/* LDRS */
5755      0 },				/* LDC */
5756    { "sb_g1",
5757      BFD_RELOC_ARM_ALU_SB_G1,		/* ALU */
5758      BFD_RELOC_ARM_LDR_SB_G1, 		/* LDR */
5759      BFD_RELOC_ARM_LDRS_SB_G1,		/* LDRS */
5760      BFD_RELOC_ARM_LDC_SB_G1 },	/* LDC */
5761    { "sb_g2",
5762      BFD_RELOC_ARM_ALU_SB_G2,		/* ALU */
5763      BFD_RELOC_ARM_LDR_SB_G2,		/* LDR */
5764      BFD_RELOC_ARM_LDRS_SB_G2,		/* LDRS */
5765      BFD_RELOC_ARM_LDC_SB_G2 },	/* LDC */
5766    /* Absolute thumb alu relocations.  */
5767    { "lower0_7",
5768      BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC,/* ALU.  */
5769      0,				/* LDR.  */
5770      0,				/* LDRS.  */
5771      0 },				/* LDC.  */
5772    { "lower8_15",
5773      BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC,/* ALU.  */
5774      0,				/* LDR.  */
5775      0,				/* LDRS.  */
5776      0 },				/* LDC.  */
5777    { "upper0_7",
5778      BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC,/* ALU.  */
5779      0,				/* LDR.  */
5780      0,				/* LDRS.  */
5781      0 },				/* LDC.  */
5782    { "upper8_15",
5783      BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC,/* ALU.  */
5784      0,				/* LDR.  */
5785      0,				/* LDRS.  */
5786      0 } };				/* LDC.  */
5787
5788/* Given the address of a pointer pointing to the textual name of a group
5789   relocation as may appear in assembler source, attempt to find its details
5790   in group_reloc_table.  The pointer will be updated to the character after
5791   the trailing colon.  On failure, FAIL will be returned; SUCCESS
5792   otherwise.  On success, *entry will be updated to point at the relevant
5793   group_reloc_table entry. */
5794
5795static int
5796find_group_reloc_table_entry (char **str, struct group_reloc_table_entry **out)
5797{
5798  unsigned int i;
5799  for (i = 0; i < ARRAY_SIZE (group_reloc_table); i++)
5800    {
5801      int length = strlen (group_reloc_table[i].name);
5802
5803      if (strncasecmp (group_reloc_table[i].name, *str, length) == 0
5804	  && (*str)[length] == ':')
5805	{
5806	  *out = &group_reloc_table[i];
5807	  *str += (length + 1);
5808	  return SUCCESS;
5809	}
5810    }
5811
5812  return FAIL;
5813}
5814
5815/* Parse a <shifter_operand> for an ARM data processing instruction
5816   (as for parse_shifter_operand) where group relocations are allowed:
5817
5818      #<immediate>
5819      #<immediate>, <rotate>
5820      #:<group_reloc>:<expression>
5821      <Rm>
5822      <Rm>, <shift>
5823
5824   where <group_reloc> is one of the strings defined in group_reloc_table.
5825   The hashes are optional.
5826
5827   Everything else is as for parse_shifter_operand.  */
5828
5829static parse_operand_result
5830parse_shifter_operand_group_reloc (char **str, int i)
5831{
5832  /* Determine if we have the sequence of characters #: or just :
5833     coming next.  If we do, then we check for a group relocation.
5834     If we don't, punt the whole lot to parse_shifter_operand.  */
5835
5836  if (((*str)[0] == '#' && (*str)[1] == ':')
5837      || (*str)[0] == ':')
5838    {
5839      struct group_reloc_table_entry *entry;
5840
5841      if ((*str)[0] == '#')
5842	(*str) += 2;
5843      else
5844	(*str)++;
5845
5846      /* Try to parse a group relocation.  Anything else is an error.  */
5847      if (find_group_reloc_table_entry (str, &entry) == FAIL)
5848	{
5849	  inst.error = _("unknown group relocation");
5850	  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5851	}
5852
5853      /* We now have the group relocation table entry corresponding to
5854	 the name in the assembler source.  Next, we parse the expression.  */
5855      if (my_get_expression (&inst.relocs[0].exp, str, GE_NO_PREFIX))
5856	return PARSE_OPERAND_FAIL_NO_BACKTRACK;
5857
5858      /* Record the relocation type (always the ALU variant here).  */
5859      inst.relocs[0].type = (bfd_reloc_code_real_type) entry->alu_code;
5860      gas_assert (inst.relocs[0].type != 0);
5861
5862      return PARSE_OPERAND_SUCCESS;
5863    }
5864  else
5865    return parse_shifter_operand (str, i) == SUCCESS
5866	   ? PARSE_OPERAND_SUCCESS : PARSE_OPERAND_FAIL;
5867
5868  /* Never reached.  */
5869}
5870
5871/* Parse a Neon alignment expression.  Information is written to
5872   inst.operands[i].  We assume the initial ':' has been skipped.
5873
5874   align	.imm = align << 8, .immisalign=1, .preind=0  */
5875static parse_operand_result
5876parse_neon_alignment (char **str, int i)
5877{
5878  char *p = *str;
5879  expressionS exp;
5880
5881  my_get_expression (&exp, &p, GE_NO_PREFIX);
5882
5883  if (exp.X_op != O_constant)
5884    {
5885      inst.error = _("alignment must be constant");
5886      return PARSE_OPERAND_FAIL;
5887    }
5888
5889  inst.operands[i].imm = exp.X_add_number << 8;
5890  inst.operands[i].immisalign = 1;
5891  /* Alignments are not pre-indexes.  */
5892  inst.operands[i].preind = 0;
5893
5894  *str = p;
5895  return PARSE_OPERAND_SUCCESS;
5896}
5897
5898/* Parse all forms of an ARM address expression.  Information is written
5899   to inst.operands[i] and/or inst.relocs[0].
5900
5901   Preindexed addressing (.preind=1):
5902
5903   [Rn, #offset]       .reg=Rn .relocs[0].exp=offset
5904   [Rn, +/-Rm]	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5905   [Rn, +/-Rm, shift]  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5906		       .shift_kind=shift .relocs[0].exp=shift_imm
5907
5908   These three may have a trailing ! which causes .writeback to be set also.
5909
5910   Postindexed addressing (.postind=1, .writeback=1):
5911
5912   [Rn], #offset       .reg=Rn .relocs[0].exp=offset
5913   [Rn], +/-Rm	       .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5914   [Rn], +/-Rm, shift  .reg=Rn .imm=Rm .immisreg=1 .negative=0/1
5915		       .shift_kind=shift .relocs[0].exp=shift_imm
5916
5917   Unindexed addressing (.preind=0, .postind=0):
5918
5919   [Rn], {option}      .reg=Rn .imm=option .immisreg=0
5920
5921   Other:
5922
5923   [Rn]{!}	       shorthand for [Rn,#0]{!}
5924   =immediate	       .isreg=0 .relocs[0].exp=immediate
5925   label	       .reg=PC .relocs[0].pc_rel=1 .relocs[0].exp=label
5926
5927  It is the caller's responsibility to check for addressing modes not
5928  supported by the instruction, and to set inst.relocs[0].type.  */
5929
5930static parse_operand_result
5931parse_address_main (char **str, int i, int group_relocations,
5932		    group_reloc_type group_type)
5933{
5934  char *p = *str;
5935  int reg;
5936
5937  if (skip_past_char (&p, '[') == FAIL)
5938    {
5939      if (skip_past_char (&p, '=') == FAIL)
5940	{
5941	  /* Bare address - translate to PC-relative offset.  */
5942	  inst.relocs[0].pc_rel = 1;
5943	  inst.operands[i].reg = REG_PC;
5944	  inst.operands[i].isreg = 1;
5945	  inst.operands[i].preind = 1;
5946
5947	  if (my_get_expression (&inst.relocs[0].exp, &p, GE_OPT_PREFIX_BIG))
5948	    return PARSE_OPERAND_FAIL;
5949	}
5950      else if (parse_big_immediate (&p, i, &inst.relocs[0].exp,
5951				    /*allow_symbol_p=*/TRUE))
5952	return PARSE_OPERAND_FAIL;
5953
5954      *str = p;
5955      return PARSE_OPERAND_SUCCESS;
5956    }
5957
5958  /* PR gas/14887: Allow for whitespace after the opening bracket.  */
5959  skip_whitespace (p);
5960
5961  if (group_type == GROUP_MVE)
5962    {
5963      enum arm_reg_type rtype = REG_TYPE_MQ;
5964      struct neon_type_el et;
5965      if ((reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5966	{
5967	  inst.operands[i].isquad = 1;
5968	}
5969      else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5970	{
5971	  inst.error = BAD_ADDR_MODE;
5972	  return PARSE_OPERAND_FAIL;
5973	}
5974    }
5975  else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
5976    {
5977      if (group_type == GROUP_MVE)
5978	inst.error = BAD_ADDR_MODE;
5979      else
5980	inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
5981      return PARSE_OPERAND_FAIL;
5982    }
5983  inst.operands[i].reg = reg;
5984  inst.operands[i].isreg = 1;
5985
5986  if (skip_past_comma (&p) == SUCCESS)
5987    {
5988      inst.operands[i].preind = 1;
5989
5990      if (*p == '+') p++;
5991      else if (*p == '-') p++, inst.operands[i].negative = 1;
5992
5993      enum arm_reg_type rtype = REG_TYPE_MQ;
5994      struct neon_type_el et;
5995      if (group_type == GROUP_MVE
5996	  && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
5997	{
5998	  inst.operands[i].immisreg = 2;
5999	  inst.operands[i].imm = reg;
6000
6001	  if (skip_past_comma (&p) == SUCCESS)
6002	    {
6003	      if (parse_shift (&p, i, SHIFT_UXTW_IMMEDIATE) == SUCCESS)
6004		{
6005		  inst.operands[i].imm |= inst.relocs[0].exp.X_add_number << 5;
6006		  inst.relocs[0].exp.X_add_number = 0;
6007		}
6008	      else
6009		return PARSE_OPERAND_FAIL;
6010	    }
6011	}
6012      else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6013	{
6014	  inst.operands[i].imm = reg;
6015	  inst.operands[i].immisreg = 1;
6016
6017	  if (skip_past_comma (&p) == SUCCESS)
6018	    if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6019	      return PARSE_OPERAND_FAIL;
6020	}
6021      else if (skip_past_char (&p, ':') == SUCCESS)
6022	{
6023	  /* FIXME: '@' should be used here, but it's filtered out by generic
6024	     code before we get to see it here. This may be subject to
6025	     change.  */
6026	  parse_operand_result result = parse_neon_alignment (&p, i);
6027
6028	  if (result != PARSE_OPERAND_SUCCESS)
6029	    return result;
6030	}
6031      else
6032	{
6033	  if (inst.operands[i].negative)
6034	    {
6035	      inst.operands[i].negative = 0;
6036	      p--;
6037	    }
6038
6039	  if (group_relocations
6040	      && ((*p == '#' && *(p + 1) == ':') || *p == ':'))
6041	    {
6042	      struct group_reloc_table_entry *entry;
6043
6044	      /* Skip over the #: or : sequence.  */
6045	      if (*p == '#')
6046		p += 2;
6047	      else
6048		p++;
6049
6050	      /* Try to parse a group relocation.  Anything else is an
6051		 error.  */
6052	      if (find_group_reloc_table_entry (&p, &entry) == FAIL)
6053		{
6054		  inst.error = _("unknown group relocation");
6055		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6056		}
6057
6058	      /* We now have the group relocation table entry corresponding to
6059		 the name in the assembler source.  Next, we parse the
6060		 expression.  */
6061	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6062		return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6063
6064	      /* Record the relocation type.  */
6065	      switch (group_type)
6066		{
6067		  case GROUP_LDR:
6068		    inst.relocs[0].type
6069			= (bfd_reloc_code_real_type) entry->ldr_code;
6070		    break;
6071
6072		  case GROUP_LDRS:
6073		    inst.relocs[0].type
6074			= (bfd_reloc_code_real_type) entry->ldrs_code;
6075		    break;
6076
6077		  case GROUP_LDC:
6078		    inst.relocs[0].type
6079			= (bfd_reloc_code_real_type) entry->ldc_code;
6080		    break;
6081
6082		  default:
6083		    gas_assert (0);
6084		}
6085
6086	      if (inst.relocs[0].type == 0)
6087		{
6088		  inst.error = _("this group relocation is not allowed on this instruction");
6089		  return PARSE_OPERAND_FAIL_NO_BACKTRACK;
6090		}
6091	    }
6092	  else
6093	    {
6094	      char *q = p;
6095
6096	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6097		return PARSE_OPERAND_FAIL;
6098	      /* If the offset is 0, find out if it's a +0 or -0.  */
6099	      if (inst.relocs[0].exp.X_op == O_constant
6100		  && inst.relocs[0].exp.X_add_number == 0)
6101		{
6102		  skip_whitespace (q);
6103		  if (*q == '#')
6104		    {
6105		      q++;
6106		      skip_whitespace (q);
6107		    }
6108		  if (*q == '-')
6109		    inst.operands[i].negative = 1;
6110		}
6111	    }
6112	}
6113    }
6114  else if (skip_past_char (&p, ':') == SUCCESS)
6115    {
6116      /* FIXME: '@' should be used here, but it's filtered out by generic code
6117	 before we get to see it here. This may be subject to change.  */
6118      parse_operand_result result = parse_neon_alignment (&p, i);
6119
6120      if (result != PARSE_OPERAND_SUCCESS)
6121	return result;
6122    }
6123
6124  if (skip_past_char (&p, ']') == FAIL)
6125    {
6126      inst.error = _("']' expected");
6127      return PARSE_OPERAND_FAIL;
6128    }
6129
6130  if (skip_past_char (&p, '!') == SUCCESS)
6131    inst.operands[i].writeback = 1;
6132
6133  else if (skip_past_comma (&p) == SUCCESS)
6134    {
6135      if (skip_past_char (&p, '{') == SUCCESS)
6136	{
6137	  /* [Rn], {expr} - unindexed, with option */
6138	  if (parse_immediate (&p, &inst.operands[i].imm,
6139			       0, 255, TRUE) == FAIL)
6140	    return PARSE_OPERAND_FAIL;
6141
6142	  if (skip_past_char (&p, '}') == FAIL)
6143	    {
6144	      inst.error = _("'}' expected at end of 'option' field");
6145	      return PARSE_OPERAND_FAIL;
6146	    }
6147	  if (inst.operands[i].preind)
6148	    {
6149	      inst.error = _("cannot combine index with option");
6150	      return PARSE_OPERAND_FAIL;
6151	    }
6152	  *str = p;
6153	  return PARSE_OPERAND_SUCCESS;
6154	}
6155      else
6156	{
6157	  inst.operands[i].postind = 1;
6158	  inst.operands[i].writeback = 1;
6159
6160	  if (inst.operands[i].preind)
6161	    {
6162	      inst.error = _("cannot combine pre- and post-indexing");
6163	      return PARSE_OPERAND_FAIL;
6164	    }
6165
6166	  if (*p == '+') p++;
6167	  else if (*p == '-') p++, inst.operands[i].negative = 1;
6168
6169	  enum arm_reg_type rtype = REG_TYPE_MQ;
6170	  struct neon_type_el et;
6171	  if (group_type == GROUP_MVE
6172	      && (reg = arm_typed_reg_parse (&p, rtype, &rtype, &et)) != FAIL)
6173	    {
6174	      inst.operands[i].immisreg = 2;
6175	      inst.operands[i].imm = reg;
6176	    }
6177	  else if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) != FAIL)
6178	    {
6179	      /* We might be using the immediate for alignment already. If we
6180		 are, OR the register number into the low-order bits.  */
6181	      if (inst.operands[i].immisalign)
6182		inst.operands[i].imm |= reg;
6183	      else
6184		inst.operands[i].imm = reg;
6185	      inst.operands[i].immisreg = 1;
6186
6187	      if (skip_past_comma (&p) == SUCCESS)
6188		if (parse_shift (&p, i, SHIFT_IMMEDIATE) == FAIL)
6189		  return PARSE_OPERAND_FAIL;
6190	    }
6191	  else
6192	    {
6193	      char *q = p;
6194
6195	      if (inst.operands[i].negative)
6196		{
6197		  inst.operands[i].negative = 0;
6198		  p--;
6199		}
6200	      if (my_get_expression (&inst.relocs[0].exp, &p, GE_IMM_PREFIX))
6201		return PARSE_OPERAND_FAIL;
6202	      /* If the offset is 0, find out if it's a +0 or -0.  */
6203	      if (inst.relocs[0].exp.X_op == O_constant
6204		  && inst.relocs[0].exp.X_add_number == 0)
6205		{
6206		  skip_whitespace (q);
6207		  if (*q == '#')
6208		    {
6209		      q++;
6210		      skip_whitespace (q);
6211		    }
6212		  if (*q == '-')
6213		    inst.operands[i].negative = 1;
6214		}
6215	    }
6216	}
6217    }
6218
6219  /* If at this point neither .preind nor .postind is set, we have a
6220     bare [Rn]{!}, which is shorthand for [Rn,#0]{!}.  */
6221  if (inst.operands[i].preind == 0 && inst.operands[i].postind == 0)
6222    {
6223      inst.operands[i].preind = 1;
6224      inst.relocs[0].exp.X_op = O_constant;
6225      inst.relocs[0].exp.X_add_number = 0;
6226    }
6227  *str = p;
6228  return PARSE_OPERAND_SUCCESS;
6229}
6230
6231static int
6232parse_address (char **str, int i)
6233{
6234  return parse_address_main (str, i, 0, GROUP_LDR) == PARSE_OPERAND_SUCCESS
6235	 ? SUCCESS : FAIL;
6236}
6237
6238static parse_operand_result
6239parse_address_group_reloc (char **str, int i, group_reloc_type type)
6240{
6241  return parse_address_main (str, i, 1, type);
6242}
6243
6244/* Parse an operand for a MOVW or MOVT instruction.  */
6245static int
6246parse_half (char **str)
6247{
6248  char * p;
6249
6250  p = *str;
6251  skip_past_char (&p, '#');
6252  if (strncasecmp (p, ":lower16:", 9) == 0)
6253    inst.relocs[0].type = BFD_RELOC_ARM_MOVW;
6254  else if (strncasecmp (p, ":upper16:", 9) == 0)
6255    inst.relocs[0].type = BFD_RELOC_ARM_MOVT;
6256
6257  if (inst.relocs[0].type != BFD_RELOC_UNUSED)
6258    {
6259      p += 9;
6260      skip_whitespace (p);
6261    }
6262
6263  if (my_get_expression (&inst.relocs[0].exp, &p, GE_NO_PREFIX))
6264    return FAIL;
6265
6266  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
6267    {
6268      if (inst.relocs[0].exp.X_op != O_constant)
6269	{
6270	  inst.error = _("constant expression expected");
6271	  return FAIL;
6272	}
6273      if (inst.relocs[0].exp.X_add_number < 0
6274	  || inst.relocs[0].exp.X_add_number > 0xffff)
6275	{
6276	  inst.error = _("immediate value out of range");
6277	  return FAIL;
6278	}
6279    }
6280  *str = p;
6281  return SUCCESS;
6282}
6283
6284/* Miscellaneous. */
6285
6286/* Parse a PSR flag operand.  The value returned is FAIL on syntax error,
6287   or a bitmask suitable to be or-ed into the ARM msr instruction.  */
6288static int
6289parse_psr (char **str, bfd_boolean lhs)
6290{
6291  char *p;
6292  unsigned long psr_field;
6293  const struct asm_psr *psr;
6294  char *start;
6295  bfd_boolean is_apsr = FALSE;
6296  bfd_boolean m_profile = ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m);
6297
6298  /* PR gas/12698:  If the user has specified -march=all then m_profile will
6299     be TRUE, but we want to ignore it in this case as we are building for any
6300     CPU type, including non-m variants.  */
6301  if (ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any))
6302    m_profile = FALSE;
6303
6304  /* CPSR's and SPSR's can now be lowercase.  This is just a convenience
6305     feature for ease of use and backwards compatibility.  */
6306  p = *str;
6307  if (strncasecmp (p, "SPSR", 4) == 0)
6308    {
6309      if (m_profile)
6310	goto unsupported_psr;
6311
6312      psr_field = SPSR_BIT;
6313    }
6314  else if (strncasecmp (p, "CPSR", 4) == 0)
6315    {
6316      if (m_profile)
6317	goto unsupported_psr;
6318
6319      psr_field = 0;
6320    }
6321  else if (strncasecmp (p, "APSR", 4) == 0)
6322    {
6323      /* APSR[_<bits>] can be used as a synonym for CPSR[_<flags>] on ARMv7-A
6324	 and ARMv7-R architecture CPUs.  */
6325      is_apsr = TRUE;
6326      psr_field = 0;
6327    }
6328  else if (m_profile)
6329    {
6330      start = p;
6331      do
6332	p++;
6333      while (ISALNUM (*p) || *p == '_');
6334
6335      if (strncasecmp (start, "iapsr", 5) == 0
6336	  || strncasecmp (start, "eapsr", 5) == 0
6337	  || strncasecmp (start, "xpsr", 4) == 0
6338	  || strncasecmp (start, "psr", 3) == 0)
6339	p = start + strcspn (start, "rR") + 1;
6340
6341      psr = (const struct asm_psr *) str_hash_find_n (arm_v7m_psr_hsh, start,
6342						      p - start);
6343
6344      if (!psr)
6345	return FAIL;
6346
6347      /* If APSR is being written, a bitfield may be specified.  Note that
6348	 APSR itself is handled above.  */
6349      if (psr->field <= 3)
6350	{
6351	  psr_field = psr->field;
6352	  is_apsr = TRUE;
6353	  goto check_suffix;
6354	}
6355
6356      *str = p;
6357      /* M-profile MSR instructions have the mask field set to "10", except
6358	 *PSR variants which modify APSR, which may use a different mask (and
6359	 have been handled already).  Do that by setting the PSR_f field
6360	 here.  */
6361      return psr->field | (lhs ? PSR_f : 0);
6362    }
6363  else
6364    goto unsupported_psr;
6365
6366  p += 4;
6367 check_suffix:
6368  if (*p == '_')
6369    {
6370      /* A suffix follows.  */
6371      p++;
6372      start = p;
6373
6374      do
6375	p++;
6376      while (ISALNUM (*p) || *p == '_');
6377
6378      if (is_apsr)
6379	{
6380	  /* APSR uses a notation for bits, rather than fields.  */
6381	  unsigned int nzcvq_bits = 0;
6382	  unsigned int g_bit = 0;
6383	  char *bit;
6384
6385	  for (bit = start; bit != p; bit++)
6386	    {
6387	      switch (TOLOWER (*bit))
6388		{
6389		case 'n':
6390		  nzcvq_bits |= (nzcvq_bits & 0x01) ? 0x20 : 0x01;
6391		  break;
6392
6393		case 'z':
6394		  nzcvq_bits |= (nzcvq_bits & 0x02) ? 0x20 : 0x02;
6395		  break;
6396
6397		case 'c':
6398		  nzcvq_bits |= (nzcvq_bits & 0x04) ? 0x20 : 0x04;
6399		  break;
6400
6401		case 'v':
6402		  nzcvq_bits |= (nzcvq_bits & 0x08) ? 0x20 : 0x08;
6403		  break;
6404
6405		case 'q':
6406		  nzcvq_bits |= (nzcvq_bits & 0x10) ? 0x20 : 0x10;
6407		  break;
6408
6409		case 'g':
6410		  g_bit |= (g_bit & 0x1) ? 0x2 : 0x1;
6411		  break;
6412
6413		default:
6414		  inst.error = _("unexpected bit specified after APSR");
6415		  return FAIL;
6416		}
6417	    }
6418
6419	  if (nzcvq_bits == 0x1f)
6420	    psr_field |= PSR_f;
6421
6422	  if (g_bit == 0x1)
6423	    {
6424	      if (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp))
6425		{
6426		  inst.error = _("selected processor does not "
6427				 "support DSP extension");
6428		  return FAIL;
6429		}
6430
6431	      psr_field |= PSR_s;
6432	    }
6433
6434	  if ((nzcvq_bits & 0x20) != 0
6435	      || (nzcvq_bits != 0x1f && nzcvq_bits != 0)
6436	      || (g_bit & 0x2) != 0)
6437	    {
6438	      inst.error = _("bad bitmask specified after APSR");
6439	      return FAIL;
6440	    }
6441	}
6442      else
6443	{
6444	  psr = (const struct asm_psr *) str_hash_find_n (arm_psr_hsh, start,
6445							  p - start);
6446	  if (!psr)
6447	    goto error;
6448
6449	  psr_field |= psr->field;
6450	}
6451    }
6452  else
6453    {
6454      if (ISALNUM (*p))
6455	goto error;    /* Garbage after "[CS]PSR".  */
6456
6457      /* Unadorned APSR is equivalent to APSR_nzcvq/CPSR_f (for writes).  This
6458	 is deprecated, but allow it anyway.  */
6459      if (is_apsr && lhs)
6460	{
6461	  psr_field |= PSR_f;
6462	  as_tsktsk (_("writing to APSR without specifying a bitmask is "
6463		       "deprecated"));
6464	}
6465      else if (!m_profile)
6466	/* These bits are never right for M-profile devices: don't set them
6467	   (only code paths which read/write APSR reach here).  */
6468	psr_field |= (PSR_c | PSR_f);
6469    }
6470  *str = p;
6471  return psr_field;
6472
6473 unsupported_psr:
6474  inst.error = _("selected processor does not support requested special "
6475		 "purpose register");
6476  return FAIL;
6477
6478 error:
6479  inst.error = _("flag for {c}psr instruction expected");
6480  return FAIL;
6481}
6482
6483static int
6484parse_sys_vldr_vstr (char **str)
6485{
6486  unsigned i;
6487  int val = FAIL;
6488  struct {
6489    const char *name;
6490    int regl;
6491    int regh;
6492  } sysregs[] = {
6493    {"FPSCR",		0x1, 0x0},
6494    {"FPSCR_nzcvqc",	0x2, 0x0},
6495    {"VPR",		0x4, 0x1},
6496    {"P0",		0x5, 0x1},
6497    {"FPCXTNS",		0x6, 0x1},
6498    {"FPCXTS",		0x7, 0x1}
6499  };
6500  char *op_end = strchr (*str, ',');
6501  size_t op_strlen = op_end - *str;
6502
6503  for (i = 0; i < sizeof (sysregs) / sizeof (sysregs[0]); i++)
6504    {
6505      if (!strncmp (*str, sysregs[i].name, op_strlen))
6506	{
6507	  val = sysregs[i].regl | (sysregs[i].regh << 3);
6508	  *str = op_end;
6509	  break;
6510	}
6511    }
6512
6513  return val;
6514}
6515
6516/* Parse the flags argument to CPSI[ED].  Returns FAIL on error, or a
6517   value suitable for splatting into the AIF field of the instruction.	*/
6518
6519static int
6520parse_cps_flags (char **str)
6521{
6522  int val = 0;
6523  int saw_a_flag = 0;
6524  char *s = *str;
6525
6526  for (;;)
6527    switch (*s++)
6528      {
6529      case '\0': case ',':
6530	goto done;
6531
6532      case 'a': case 'A': saw_a_flag = 1; val |= 0x4; break;
6533      case 'i': case 'I': saw_a_flag = 1; val |= 0x2; break;
6534      case 'f': case 'F': saw_a_flag = 1; val |= 0x1; break;
6535
6536      default:
6537	inst.error = _("unrecognized CPS flag");
6538	return FAIL;
6539      }
6540
6541 done:
6542  if (saw_a_flag == 0)
6543    {
6544      inst.error = _("missing CPS flags");
6545      return FAIL;
6546    }
6547
6548  *str = s - 1;
6549  return val;
6550}
6551
6552/* Parse an endian specifier ("BE" or "LE", case insensitive);
6553   returns 0 for big-endian, 1 for little-endian, FAIL for an error.  */
6554
6555static int
6556parse_endian_specifier (char **str)
6557{
6558  int little_endian;
6559  char *s = *str;
6560
6561  if (strncasecmp (s, "BE", 2))
6562    little_endian = 0;
6563  else if (strncasecmp (s, "LE", 2))
6564    little_endian = 1;
6565  else
6566    {
6567      inst.error = _("valid endian specifiers are be or le");
6568      return FAIL;
6569    }
6570
6571  if (ISALNUM (s[2]) || s[2] == '_')
6572    {
6573      inst.error = _("valid endian specifiers are be or le");
6574      return FAIL;
6575    }
6576
6577  *str = s + 2;
6578  return little_endian;
6579}
6580
6581/* Parse a rotation specifier: ROR #0, #8, #16, #24.  *val receives a
6582   value suitable for poking into the rotate field of an sxt or sxta
6583   instruction, or FAIL on error.  */
6584
6585static int
6586parse_ror (char **str)
6587{
6588  int rot;
6589  char *s = *str;
6590
6591  if (strncasecmp (s, "ROR", 3) == 0)
6592    s += 3;
6593  else
6594    {
6595      inst.error = _("missing rotation field after comma");
6596      return FAIL;
6597    }
6598
6599  if (parse_immediate (&s, &rot, 0, 24, FALSE) == FAIL)
6600    return FAIL;
6601
6602  switch (rot)
6603    {
6604    case  0: *str = s; return 0x0;
6605    case  8: *str = s; return 0x1;
6606    case 16: *str = s; return 0x2;
6607    case 24: *str = s; return 0x3;
6608
6609    default:
6610      inst.error = _("rotation can only be 0, 8, 16, or 24");
6611      return FAIL;
6612    }
6613}
6614
6615/* Parse a conditional code (from conds[] below).  The value returned is in the
6616   range 0 .. 14, or FAIL.  */
6617static int
6618parse_cond (char **str)
6619{
6620  char *q;
6621  const struct asm_cond *c;
6622  int n;
6623  /* Condition codes are always 2 characters, so matching up to
6624     3 characters is sufficient.  */
6625  char cond[3];
6626
6627  q = *str;
6628  n = 0;
6629  while (ISALPHA (*q) && n < 3)
6630    {
6631      cond[n] = TOLOWER (*q);
6632      q++;
6633      n++;
6634    }
6635
6636  c = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, cond, n);
6637  if (!c)
6638    {
6639      inst.error = _("condition required");
6640      return FAIL;
6641    }
6642
6643  *str = q;
6644  return c->value;
6645}
6646
6647/* Parse an option for a barrier instruction.  Returns the encoding for the
6648   option, or FAIL.  */
6649static int
6650parse_barrier (char **str)
6651{
6652  char *p, *q;
6653  const struct asm_barrier_opt *o;
6654
6655  p = q = *str;
6656  while (ISALPHA (*q))
6657    q++;
6658
6659  o = (const struct asm_barrier_opt *) str_hash_find_n (arm_barrier_opt_hsh, p,
6660							q - p);
6661  if (!o)
6662    return FAIL;
6663
6664  if (!mark_feature_used (&o->arch))
6665    return FAIL;
6666
6667  *str = q;
6668  return o->value;
6669}
6670
6671/* Parse the operands of a table branch instruction.  Similar to a memory
6672   operand.  */
6673static int
6674parse_tb (char **str)
6675{
6676  char * p = *str;
6677  int reg;
6678
6679  if (skip_past_char (&p, '[') == FAIL)
6680    {
6681      inst.error = _("'[' expected");
6682      return FAIL;
6683    }
6684
6685  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6686    {
6687      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6688      return FAIL;
6689    }
6690  inst.operands[0].reg = reg;
6691
6692  if (skip_past_comma (&p) == FAIL)
6693    {
6694      inst.error = _("',' expected");
6695      return FAIL;
6696    }
6697
6698  if ((reg = arm_reg_parse (&p, REG_TYPE_RN)) == FAIL)
6699    {
6700      inst.error = _(reg_expected_msgs[REG_TYPE_RN]);
6701      return FAIL;
6702    }
6703  inst.operands[0].imm = reg;
6704
6705  if (skip_past_comma (&p) == SUCCESS)
6706    {
6707      if (parse_shift (&p, 0, SHIFT_LSL_IMMEDIATE) == FAIL)
6708	return FAIL;
6709      if (inst.relocs[0].exp.X_add_number != 1)
6710	{
6711	  inst.error = _("invalid shift");
6712	  return FAIL;
6713	}
6714      inst.operands[0].shifted = 1;
6715    }
6716
6717  if (skip_past_char (&p, ']') == FAIL)
6718    {
6719      inst.error = _("']' expected");
6720      return FAIL;
6721    }
6722  *str = p;
6723  return SUCCESS;
6724}
6725
6726/* Parse the operands of a Neon VMOV instruction. See do_neon_mov for more
6727   information on the types the operands can take and how they are encoded.
6728   Up to four operands may be read; this function handles setting the
6729   ".present" field for each read operand itself.
6730   Updates STR and WHICH_OPERAND if parsing is successful and returns SUCCESS,
6731   else returns FAIL.  */
6732
6733static int
6734parse_neon_mov (char **str, int *which_operand)
6735{
6736  int i = *which_operand, val;
6737  enum arm_reg_type rtype;
6738  char *ptr = *str;
6739  struct neon_type_el optype;
6740
6741   if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6742    {
6743      /* Cases 17 or 19.  */
6744      inst.operands[i].reg = val;
6745      inst.operands[i].isvec = 1;
6746      inst.operands[i].isscalar = 2;
6747      inst.operands[i].vectype = optype;
6748      inst.operands[i++].present = 1;
6749
6750      if (skip_past_comma (&ptr) == FAIL)
6751	goto wanted_comma;
6752
6753      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6754	{
6755	  /* Case 17: VMOV<c>.<dt> <Qd[idx]>, <Rt>  */
6756	  inst.operands[i].reg = val;
6757	  inst.operands[i].isreg = 1;
6758	  inst.operands[i].present = 1;
6759	}
6760      else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6761	{
6762	  /* Case 19: VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>  */
6763	  inst.operands[i].reg = val;
6764	  inst.operands[i].isvec = 1;
6765	  inst.operands[i].isscalar = 2;
6766	  inst.operands[i].vectype = optype;
6767	  inst.operands[i++].present = 1;
6768
6769	  if (skip_past_comma (&ptr) == FAIL)
6770	    goto wanted_comma;
6771
6772	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6773	    goto wanted_arm;
6774
6775	  inst.operands[i].reg = val;
6776	  inst.operands[i].isreg = 1;
6777	  inst.operands[i++].present = 1;
6778
6779	  if (skip_past_comma (&ptr) == FAIL)
6780	    goto wanted_comma;
6781
6782	  if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6783	    goto wanted_arm;
6784
6785	  inst.operands[i].reg = val;
6786	  inst.operands[i].isreg = 1;
6787	  inst.operands[i].present = 1;
6788	}
6789      else
6790	{
6791	  first_error (_("expected ARM or MVE vector register"));
6792	  return FAIL;
6793	}
6794    }
6795   else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6796    {
6797      /* Case 4: VMOV<c><q>.<size> <Dn[x]>, <Rd>.  */
6798      inst.operands[i].reg = val;
6799      inst.operands[i].isscalar = 1;
6800      inst.operands[i].vectype = optype;
6801      inst.operands[i++].present = 1;
6802
6803      if (skip_past_comma (&ptr) == FAIL)
6804	goto wanted_comma;
6805
6806      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6807	goto wanted_arm;
6808
6809      inst.operands[i].reg = val;
6810      inst.operands[i].isreg = 1;
6811      inst.operands[i].present = 1;
6812    }
6813  else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype, &optype))
6814	    != FAIL)
6815	   || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype, &optype))
6816	       != FAIL))
6817    {
6818      /* Cases 0, 1, 2, 3, 5 (D only).  */
6819      if (skip_past_comma (&ptr) == FAIL)
6820	goto wanted_comma;
6821
6822      inst.operands[i].reg = val;
6823      inst.operands[i].isreg = 1;
6824      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6825      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6826      inst.operands[i].isvec = 1;
6827      inst.operands[i].vectype = optype;
6828      inst.operands[i++].present = 1;
6829
6830      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6831	{
6832	  /* Case 5: VMOV<c><q> <Dm>, <Rd>, <Rn>.
6833	     Case 13: VMOV <Sd>, <Rm>  */
6834	  inst.operands[i].reg = val;
6835	  inst.operands[i].isreg = 1;
6836	  inst.operands[i].present = 1;
6837
6838	  if (rtype == REG_TYPE_NQ)
6839	    {
6840	      first_error (_("can't use Neon quad register here"));
6841	      return FAIL;
6842	    }
6843	  else if (rtype != REG_TYPE_VFS)
6844	    {
6845	      i++;
6846	      if (skip_past_comma (&ptr) == FAIL)
6847		goto wanted_comma;
6848	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6849		goto wanted_arm;
6850	      inst.operands[i].reg = val;
6851	      inst.operands[i].isreg = 1;
6852	      inst.operands[i].present = 1;
6853	    }
6854	}
6855      else if (((val = arm_typed_reg_parse (&ptr, REG_TYPE_NSDQ, &rtype,
6856		&optype)) != FAIL)
6857	       || ((val = arm_typed_reg_parse (&ptr, REG_TYPE_MQ, &rtype,
6858		   &optype)) != FAIL))
6859	{
6860	  /* Case 0: VMOV<c><q> <Qd>, <Qm>
6861	     Case 1: VMOV<c><q> <Dd>, <Dm>
6862	     Case 8: VMOV.F32 <Sd>, <Sm>
6863	     Case 15: VMOV <Sd>, <Se>, <Rn>, <Rm>  */
6864
6865	  inst.operands[i].reg = val;
6866	  inst.operands[i].isreg = 1;
6867	  inst.operands[i].isquad = (rtype == REG_TYPE_NQ);
6868	  inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6869	  inst.operands[i].isvec = 1;
6870	  inst.operands[i].vectype = optype;
6871	  inst.operands[i].present = 1;
6872
6873	  if (skip_past_comma (&ptr) == SUCCESS)
6874	    {
6875	      /* Case 15.  */
6876	      i++;
6877
6878	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6879		goto wanted_arm;
6880
6881	      inst.operands[i].reg = val;
6882	      inst.operands[i].isreg = 1;
6883	      inst.operands[i++].present = 1;
6884
6885	      if (skip_past_comma (&ptr) == FAIL)
6886		goto wanted_comma;
6887
6888	      if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) == FAIL)
6889		goto wanted_arm;
6890
6891	      inst.operands[i].reg = val;
6892	      inst.operands[i].isreg = 1;
6893	      inst.operands[i].present = 1;
6894	    }
6895	}
6896      else if (parse_qfloat_immediate (&ptr, &inst.operands[i].imm) == SUCCESS)
6897	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<float-imm>
6898	     Case 3: VMOV<c><q>.<dt> <Dd>, #<float-imm>
6899	     Case 10: VMOV.F32 <Sd>, #<imm>
6900	     Case 11: VMOV.F64 <Dd>, #<imm>  */
6901	inst.operands[i].immisfloat = 1;
6902      else if (parse_big_immediate (&ptr, i, NULL, /*allow_symbol_p=*/FALSE)
6903	       == SUCCESS)
6904	  /* Case 2: VMOV<c><q>.<dt> <Qd>, #<imm>
6905	     Case 3: VMOV<c><q>.<dt> <Dd>, #<imm>  */
6906	;
6907      else
6908	{
6909	  first_error (_("expected <Rm> or <Dm> or <Qm> operand"));
6910	  return FAIL;
6911	}
6912    }
6913  else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6914    {
6915      /* Cases 6, 7, 16, 18.  */
6916      inst.operands[i].reg = val;
6917      inst.operands[i].isreg = 1;
6918      inst.operands[i++].present = 1;
6919
6920      if (skip_past_comma (&ptr) == FAIL)
6921	goto wanted_comma;
6922
6923      if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ)) != FAIL)
6924	{
6925	  /* Case 18: VMOV<c>.<dt> <Rt>, <Qn[idx]>  */
6926	  inst.operands[i].reg = val;
6927	  inst.operands[i].isscalar = 2;
6928	  inst.operands[i].present = 1;
6929	  inst.operands[i].vectype = optype;
6930	}
6931      else if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_VFD)) != FAIL)
6932	{
6933	  /* Case 6: VMOV<c><q>.<dt> <Rd>, <Dn[x]>  */
6934	  inst.operands[i].reg = val;
6935	  inst.operands[i].isscalar = 1;
6936	  inst.operands[i].present = 1;
6937	  inst.operands[i].vectype = optype;
6938	}
6939      else if ((val = arm_reg_parse (&ptr, REG_TYPE_RN)) != FAIL)
6940	{
6941	  inst.operands[i].reg = val;
6942	  inst.operands[i].isreg = 1;
6943	  inst.operands[i++].present = 1;
6944
6945	  if (skip_past_comma (&ptr) == FAIL)
6946	    goto wanted_comma;
6947
6948	  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFSD, &rtype, &optype))
6949	      != FAIL)
6950	    {
6951	      /* Case 7: VMOV<c><q> <Rd>, <Rn>, <Dm>  */
6952
6953	      inst.operands[i].reg = val;
6954	      inst.operands[i].isreg = 1;
6955	      inst.operands[i].isvec = 1;
6956	      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);
6957	      inst.operands[i].vectype = optype;
6958	      inst.operands[i].present = 1;
6959
6960	      if (rtype == REG_TYPE_VFS)
6961		{
6962		  /* Case 14.  */
6963		  i++;
6964		  if (skip_past_comma (&ptr) == FAIL)
6965		    goto wanted_comma;
6966		  if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL,
6967						  &optype)) == FAIL)
6968		    {
6969		      first_error (_(reg_expected_msgs[REG_TYPE_VFS]));
6970		      return FAIL;
6971		    }
6972		  inst.operands[i].reg = val;
6973		  inst.operands[i].isreg = 1;
6974		  inst.operands[i].isvec = 1;
6975		  inst.operands[i].issingle = 1;
6976		  inst.operands[i].vectype = optype;
6977		  inst.operands[i].present = 1;
6978		}
6979	    }
6980	  else
6981	    {
6982	      if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6983		       != FAIL)
6984		{
6985		  /* Case 16: VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>  */
6986		  inst.operands[i].reg = val;
6987		  inst.operands[i].isvec = 1;
6988		  inst.operands[i].isscalar = 2;
6989		  inst.operands[i].vectype = optype;
6990		  inst.operands[i++].present = 1;
6991
6992		  if (skip_past_comma (&ptr) == FAIL)
6993		    goto wanted_comma;
6994
6995		  if ((val = parse_scalar (&ptr, 8, &optype, REG_TYPE_MQ))
6996		      == FAIL)
6997		    {
6998		      first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
6999		      return FAIL;
7000		    }
7001		  inst.operands[i].reg = val;
7002		  inst.operands[i].isvec = 1;
7003		  inst.operands[i].isscalar = 2;
7004		  inst.operands[i].vectype = optype;
7005		  inst.operands[i].present = 1;
7006		}
7007	      else
7008		{
7009		  first_error (_("VFP single, double or MVE vector register"
7010			       " expected"));
7011		  return FAIL;
7012		}
7013	    }
7014	}
7015      else if ((val = arm_typed_reg_parse (&ptr, REG_TYPE_VFS, NULL, &optype))
7016	       != FAIL)
7017	{
7018	  /* Case 13.  */
7019	  inst.operands[i].reg = val;
7020	  inst.operands[i].isreg = 1;
7021	  inst.operands[i].isvec = 1;
7022	  inst.operands[i].issingle = 1;
7023	  inst.operands[i].vectype = optype;
7024	  inst.operands[i].present = 1;
7025	}
7026    }
7027  else
7028    {
7029      first_error (_("parse error"));
7030      return FAIL;
7031    }
7032
7033  /* Successfully parsed the operands. Update args.  */
7034  *which_operand = i;
7035  *str = ptr;
7036  return SUCCESS;
7037
7038 wanted_comma:
7039  first_error (_("expected comma"));
7040  return FAIL;
7041
7042 wanted_arm:
7043  first_error (_(reg_expected_msgs[REG_TYPE_RN]));
7044  return FAIL;
7045}
7046
7047/* Use this macro when the operand constraints are different
7048   for ARM and THUMB (e.g. ldrd).  */
7049#define MIX_ARM_THUMB_OPERANDS(arm_operand, thumb_operand) \
7050	((arm_operand) | ((thumb_operand) << 16))
7051
7052/* Matcher codes for parse_operands.  */
7053enum operand_parse_code
7054{
7055  OP_stop,	/* end of line */
7056
7057  OP_RR,	/* ARM register */
7058  OP_RRnpc,	/* ARM register, not r15 */
7059  OP_RRnpcsp,	/* ARM register, neither r15 nor r13 (a.k.a. 'BadReg') */
7060  OP_RRnpcb,	/* ARM register, not r15, in square brackets */
7061  OP_RRnpctw,	/* ARM register, not r15 in Thumb-state or with writeback,
7062		   optional trailing ! */
7063  OP_RRw,	/* ARM register, not r15, optional trailing ! */
7064  OP_RCP,	/* Coprocessor number */
7065  OP_RCN,	/* Coprocessor register */
7066  OP_RF,	/* FPA register */
7067  OP_RVS,	/* VFP single precision register */
7068  OP_RVD,	/* VFP double precision register (0..15) */
7069  OP_RND,       /* Neon double precision register (0..31) */
7070  OP_RNDMQ,     /* Neon double precision (0..31) or MVE vector register.  */
7071  OP_RNDMQR,    /* Neon double precision (0..31), MVE vector or ARM register.
7072		 */
7073  OP_RNSDMQR,    /* Neon single or double precision, MVE vector or ARM register.
7074		 */
7075  OP_RNQ,	/* Neon quad precision register */
7076  OP_RNQMQ,	/* Neon quad or MVE vector register.  */
7077  OP_RVSD,	/* VFP single or double precision register */
7078  OP_RVSD_COND,	/* VFP single, double precision register or condition code.  */
7079  OP_RVSDMQ,	/* VFP single, double precision or MVE vector register.  */
7080  OP_RNSD,      /* Neon single or double precision register */
7081  OP_RNDQ,      /* Neon double or quad precision register */
7082  OP_RNDQMQ,     /* Neon double, quad or MVE vector register.  */
7083  OP_RNDQMQR,   /* Neon double, quad, MVE vector or ARM register.  */
7084  OP_RNSDQ,	/* Neon single, double or quad precision register */
7085  OP_RNSC,      /* Neon scalar D[X] */
7086  OP_RVC,	/* VFP control register */
7087  OP_RMF,	/* Maverick F register */
7088  OP_RMD,	/* Maverick D register */
7089  OP_RMFX,	/* Maverick FX register */
7090  OP_RMDX,	/* Maverick DX register */
7091  OP_RMAX,	/* Maverick AX register */
7092  OP_RMDS,	/* Maverick DSPSC register */
7093  OP_RIWR,	/* iWMMXt wR register */
7094  OP_RIWC,	/* iWMMXt wC register */
7095  OP_RIWG,	/* iWMMXt wCG register */
7096  OP_RXA,	/* XScale accumulator register */
7097
7098  OP_RNSDMQ,	/* Neon single, double or MVE vector register */
7099  OP_RNSDQMQ,	/* Neon single, double or quad register or MVE vector register
7100		 */
7101  OP_RNSDQMQR,	/* Neon single, double or quad register, MVE vector register or
7102		   GPR (no SP/SP)  */
7103  OP_RMQ,	/* MVE vector register.  */
7104  OP_RMQRZ,	/* MVE vector or ARM register including ZR.  */
7105  OP_RMQRR,     /* MVE vector or ARM register.  */
7106
7107  /* New operands for Armv8.1-M Mainline.  */
7108  OP_LR,	/* ARM LR register */
7109  OP_RRe,	/* ARM register, only even numbered.  */
7110  OP_RRo,	/* ARM register, only odd numbered, not r13 or r15.  */
7111  OP_RRnpcsp_I32, /* ARM register (no BadReg) or literal 1 .. 32 */
7112  OP_RR_ZR,	/* ARM register or ZR but no PC */
7113
7114  OP_REGLST,	/* ARM register list */
7115  OP_CLRMLST,	/* CLRM register list */
7116  OP_VRSLST,	/* VFP single-precision register list */
7117  OP_VRDLST,	/* VFP double-precision register list */
7118  OP_VRSDLST,   /* VFP single or double-precision register list (& quad) */
7119  OP_NRDLST,    /* Neon double-precision register list (d0-d31, qN aliases) */
7120  OP_NSTRLST,   /* Neon element/structure list */
7121  OP_VRSDVLST,  /* VFP single or double-precision register list and VPR */
7122  OP_MSTRLST2,	/* MVE vector list with two elements.  */
7123  OP_MSTRLST4,	/* MVE vector list with four elements.  */
7124
7125  OP_RNDQ_I0,   /* Neon D or Q reg, or immediate zero.  */
7126  OP_RVSD_I0,	/* VFP S or D reg, or immediate zero.  */
7127  OP_RSVD_FI0, /* VFP S or D reg, or floating point immediate zero.  */
7128  OP_RSVDMQ_FI0, /* VFP S, D, MVE vector register or floating point immediate
7129		    zero.  */
7130  OP_RR_RNSC,   /* ARM reg or Neon scalar.  */
7131  OP_RNSD_RNSC, /* Neon S or D reg, or Neon scalar.  */
7132  OP_RNSDQ_RNSC, /* Vector S, D or Q reg, or Neon scalar.  */
7133  OP_RNSDQ_RNSC_MQ, /* Vector S, D or Q reg, Neon scalar or MVE vector register.
7134		     */
7135  OP_RNSDQ_RNSC_MQ_RR, /* Vector S, D or Q reg, or MVE vector reg , or Neon
7136			  scalar, or ARM register.  */
7137  OP_RNDQ_RNSC, /* Neon D or Q reg, or Neon scalar.  */
7138  OP_RNDQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, or ARM register.  */
7139  OP_RNDQMQ_RNSC_RR, /* Neon D or Q reg, Neon scalar, MVE vector or ARM
7140			register.  */
7141  OP_RNDQMQ_RNSC, /* Neon D, Q or MVE vector reg, or Neon scalar.  */
7142  OP_RND_RNSC,  /* Neon D reg, or Neon scalar.  */
7143  OP_VMOV,      /* Neon VMOV operands.  */
7144  OP_RNDQ_Ibig,	/* Neon D or Q reg, or big immediate for logic and VMVN.  */
7145  /* Neon D, Q or MVE vector register, or big immediate for logic and VMVN.  */
7146  OP_RNDQMQ_Ibig,
7147  OP_RNDQ_I63b, /* Neon D or Q reg, or immediate for shift.  */
7148  OP_RNDQMQ_I63b_RR, /* Neon D or Q reg, immediate for shift, MVE vector or
7149			ARM register.  */
7150  OP_RIWR_I32z, /* iWMMXt wR register, or immediate 0 .. 32 for iWMMXt2.  */
7151  OP_VLDR,	/* VLDR operand.  */
7152
7153  OP_I0,        /* immediate zero */
7154  OP_I7,	/* immediate value 0 .. 7 */
7155  OP_I15,	/*		   0 .. 15 */
7156  OP_I16,	/*		   1 .. 16 */
7157  OP_I16z,      /*                 0 .. 16 */
7158  OP_I31,	/*		   0 .. 31 */
7159  OP_I31w,	/*		   0 .. 31, optional trailing ! */
7160  OP_I32,	/*		   1 .. 32 */
7161  OP_I32z,	/*		   0 .. 32 */
7162  OP_I48_I64,	/*		   48 or 64 */
7163  OP_I63,	/*		   0 .. 63 */
7164  OP_I63s,	/*		 -64 .. 63 */
7165  OP_I64,	/*		   1 .. 64 */
7166  OP_I64z,	/*		   0 .. 64 */
7167  OP_I127,	/*		   0 .. 127 */
7168  OP_I255,	/*		   0 .. 255 */
7169  OP_I511,	/*		   0 .. 511 */
7170  OP_I4095,	/*		   0 .. 4095 */
7171  OP_I8191,	/*		   0 .. 8191 */
7172  OP_I4b,	/* immediate, prefix optional, 1 .. 4 */
7173  OP_I7b,	/*			       0 .. 7 */
7174  OP_I15b,	/*			       0 .. 15 */
7175  OP_I31b,	/*			       0 .. 31 */
7176
7177  OP_SH,	/* shifter operand */
7178  OP_SHG,	/* shifter operand with possible group relocation */
7179  OP_ADDR,	/* Memory address expression (any mode) */
7180  OP_ADDRMVE,	/* Memory address expression for MVE's VSTR/VLDR.  */
7181  OP_ADDRGLDR,	/* Mem addr expr (any mode) with possible LDR group reloc */
7182  OP_ADDRGLDRS, /* Mem addr expr (any mode) with possible LDRS group reloc */
7183  OP_ADDRGLDC,  /* Mem addr expr (any mode) with possible LDC group reloc */
7184  OP_EXP,	/* arbitrary expression */
7185  OP_EXPi,	/* same, with optional immediate prefix */
7186  OP_EXPr,	/* same, with optional relocation suffix */
7187  OP_EXPs,	/* same, with optional non-first operand relocation suffix */
7188  OP_HALF,	/* 0 .. 65535 or low/high reloc.  */
7189  OP_IROT1,	/* VCADD rotate immediate: 90, 270.  */
7190  OP_IROT2,	/* VCMLA rotate immediate: 0, 90, 180, 270.  */
7191
7192  OP_CPSF,	/* CPS flags */
7193  OP_ENDI,	/* Endianness specifier */
7194  OP_wPSR,	/* CPSR/SPSR/APSR mask for msr (writing).  */
7195  OP_rPSR,	/* CPSR/SPSR/APSR mask for msr (reading).  */
7196  OP_COND,	/* conditional code */
7197  OP_TB,	/* Table branch.  */
7198
7199  OP_APSR_RR,   /* ARM register or "APSR_nzcv".  */
7200
7201  OP_RRnpc_I0,	/* ARM register or literal 0 */
7202  OP_RR_EXr,	/* ARM register or expression with opt. reloc stuff. */
7203  OP_RR_EXi,	/* ARM register or expression with imm prefix */
7204  OP_RF_IF,	/* FPA register or immediate */
7205  OP_RIWR_RIWC, /* iWMMXt R or C reg */
7206  OP_RIWC_RIWG, /* iWMMXt wC or wCG reg */
7207
7208  /* Optional operands.	 */
7209  OP_oI7b,	 /* immediate, prefix optional, 0 .. 7 */
7210  OP_oI31b,	 /*				0 .. 31 */
7211  OP_oI32b,      /*                             1 .. 32 */
7212  OP_oI32z,      /*                             0 .. 32 */
7213  OP_oIffffb,	 /*				0 .. 65535 */
7214  OP_oI255c,	 /*	  curly-brace enclosed, 0 .. 255 */
7215
7216  OP_oRR,	 /* ARM register */
7217  OP_oLR,	 /* ARM LR register */
7218  OP_oRRnpc,	 /* ARM register, not the PC */
7219  OP_oRRnpcsp,	 /* ARM register, neither the PC nor the SP (a.k.a. BadReg) */
7220  OP_oRRw,	 /* ARM register, not r15, optional trailing ! */
7221  OP_oRND,       /* Optional Neon double precision register */
7222  OP_oRNQ,       /* Optional Neon quad precision register */
7223  OP_oRNDQMQ,     /* Optional Neon double, quad or MVE vector register.  */
7224  OP_oRNDQ,      /* Optional Neon double or quad precision register */
7225  OP_oRNSDQ,	 /* Optional single, double or quad precision vector register */
7226  OP_oRNSDQMQ,	 /* Optional single, double or quad register or MVE vector
7227		    register.  */
7228  OP_oRNSDMQ,	 /* Optional single, double register or MVE vector
7229		    register.  */
7230  OP_oSHll,	 /* LSL immediate */
7231  OP_oSHar,	 /* ASR immediate */
7232  OP_oSHllar,	 /* LSL or ASR immediate */
7233  OP_oROR,	 /* ROR 0/8/16/24 */
7234  OP_oBARRIER_I15, /* Option argument for a barrier instruction.  */
7235
7236  OP_oRMQRZ,	/* optional MVE vector or ARM register including ZR.  */
7237
7238  /* Some pre-defined mixed (ARM/THUMB) operands.  */
7239  OP_RR_npcsp		= MIX_ARM_THUMB_OPERANDS (OP_RR, OP_RRnpcsp),
7240  OP_RRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_RRnpc, OP_RRnpcsp),
7241  OP_oRRnpc_npcsp	= MIX_ARM_THUMB_OPERANDS (OP_oRRnpc, OP_oRRnpcsp),
7242
7243  OP_FIRST_OPTIONAL = OP_oI7b
7244};
7245
7246/* Generic instruction operand parser.	This does no encoding and no
7247   semantic validation; it merely squirrels values away in the inst
7248   structure.  Returns SUCCESS or FAIL depending on whether the
7249   specified grammar matched.  */
7250static int
7251parse_operands (char *str, const unsigned int *pattern, bfd_boolean thumb)
7252{
7253  unsigned const int *upat = pattern;
7254  char *backtrack_pos = 0;
7255  const char *backtrack_error = 0;
7256  int i, val = 0, backtrack_index = 0;
7257  enum arm_reg_type rtype;
7258  parse_operand_result result;
7259  unsigned int op_parse_code;
7260  bfd_boolean partial_match;
7261
7262#define po_char_or_fail(chr)			\
7263  do						\
7264    {						\
7265      if (skip_past_char (&str, chr) == FAIL)	\
7266	goto bad_args;				\
7267    }						\
7268  while (0)
7269
7270#define po_reg_or_fail(regtype)					\
7271  do								\
7272    {								\
7273      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7274				 & inst.operands[i].vectype);	\
7275      if (val == FAIL)						\
7276	{							\
7277	  first_error (_(reg_expected_msgs[regtype]));		\
7278	  goto failure;						\
7279	}							\
7280      inst.operands[i].reg = val;				\
7281      inst.operands[i].isreg = 1;				\
7282      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7283      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7284      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7285			     || rtype == REG_TYPE_VFD		\
7286			     || rtype == REG_TYPE_NQ);		\
7287      inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7288    }								\
7289  while (0)
7290
7291#define po_reg_or_goto(regtype, label)				\
7292  do								\
7293    {								\
7294      val = arm_typed_reg_parse (& str, regtype, & rtype,	\
7295				 & inst.operands[i].vectype);	\
7296      if (val == FAIL)						\
7297	goto label;						\
7298								\
7299      inst.operands[i].reg = val;				\
7300      inst.operands[i].isreg = 1;				\
7301      inst.operands[i].isquad = (rtype == REG_TYPE_NQ);		\
7302      inst.operands[i].issingle = (rtype == REG_TYPE_VFS);	\
7303      inst.operands[i].isvec = (rtype == REG_TYPE_VFS		\
7304			     || rtype == REG_TYPE_VFD		\
7305			     || rtype == REG_TYPE_NQ);		\
7306      inst.operands[i].iszr = (rtype == REG_TYPE_ZR);		\
7307    }								\
7308  while (0)
7309
7310#define po_imm_or_fail(min, max, popt)				\
7311  do								\
7312    {								\
7313      if (parse_immediate (&str, &val, min, max, popt) == FAIL)	\
7314	goto failure;						\
7315      inst.operands[i].imm = val;				\
7316    }								\
7317  while (0)
7318
7319#define po_imm1_or_imm2_or_fail(imm1, imm2, popt)		\
7320  do								\
7321    {								\
7322      expressionS exp;						\
7323      my_get_expression (&exp, &str, popt);			\
7324      if (exp.X_op != O_constant)				\
7325	{							\
7326	  inst.error = _("constant expression required");	\
7327	  goto failure;						\
7328	}							\
7329      if (exp.X_add_number != imm1 && exp.X_add_number != imm2) \
7330	{							\
7331	  inst.error = _("immediate value 48 or 64 expected");	\
7332	  goto failure;						\
7333	}							\
7334      inst.operands[i].imm = exp.X_add_number;			\
7335    }								\
7336  while (0)
7337
7338#define po_scalar_or_goto(elsz, label, reg_type)			\
7339  do									\
7340    {									\
7341      val = parse_scalar (& str, elsz, & inst.operands[i].vectype,	\
7342			  reg_type);					\
7343      if (val == FAIL)							\
7344	goto label;							\
7345      inst.operands[i].reg = val;					\
7346      inst.operands[i].isscalar = 1;					\
7347    }									\
7348  while (0)
7349
7350#define po_misc_or_fail(expr)			\
7351  do						\
7352    {						\
7353      if (expr)					\
7354	goto failure;				\
7355    }						\
7356  while (0)
7357
7358#define po_misc_or_fail_no_backtrack(expr)		\
7359  do							\
7360    {							\
7361      result = expr;					\
7362      if (result == PARSE_OPERAND_FAIL_NO_BACKTRACK)	\
7363	backtrack_pos = 0;				\
7364      if (result != PARSE_OPERAND_SUCCESS)		\
7365	goto failure;					\
7366    }							\
7367  while (0)
7368
7369#define po_barrier_or_imm(str)				   \
7370  do							   \
7371    {						 	   \
7372      val = parse_barrier (&str);			   \
7373      if (val == FAIL && ! ISALPHA (*str))		   \
7374	goto immediate;					   \
7375      if (val == FAIL					   \
7376	  /* ISB can only take SY as an option.  */	   \
7377	  || ((inst.instruction & 0xf0) == 0x60		   \
7378	       && val != 0xf))				   \
7379	{						   \
7380	   inst.error = _("invalid barrier type");	   \
7381	   backtrack_pos = 0;				   \
7382	   goto failure;				   \
7383	}						   \
7384    }							   \
7385  while (0)
7386
7387  skip_whitespace (str);
7388
7389  for (i = 0; upat[i] != OP_stop; i++)
7390    {
7391      op_parse_code = upat[i];
7392      if (op_parse_code >= 1<<16)
7393	op_parse_code = thumb ? (op_parse_code >> 16)
7394				: (op_parse_code & ((1<<16)-1));
7395
7396      if (op_parse_code >= OP_FIRST_OPTIONAL)
7397	{
7398	  /* Remember where we are in case we need to backtrack.  */
7399	  backtrack_pos = str;
7400	  backtrack_error = inst.error;
7401	  backtrack_index = i;
7402	}
7403
7404      if (i > 0 && (i > 1 || inst.operands[0].present))
7405	po_char_or_fail (',');
7406
7407      switch (op_parse_code)
7408	{
7409	  /* Registers */
7410	case OP_oRRnpc:
7411	case OP_oRRnpcsp:
7412	case OP_RRnpc:
7413	case OP_RRnpcsp:
7414	case OP_oRR:
7415	case OP_RRe:
7416	case OP_RRo:
7417	case OP_LR:
7418	case OP_oLR:
7419	case OP_RR:    po_reg_or_fail (REG_TYPE_RN);	  break;
7420	case OP_RCP:   po_reg_or_fail (REG_TYPE_CP);	  break;
7421	case OP_RCN:   po_reg_or_fail (REG_TYPE_CN);	  break;
7422	case OP_RF:    po_reg_or_fail (REG_TYPE_FN);	  break;
7423	case OP_RVS:   po_reg_or_fail (REG_TYPE_VFS);	  break;
7424	case OP_RVD:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7425	case OP_oRND:
7426	case OP_RNSDMQR:
7427	  po_reg_or_goto (REG_TYPE_VFS, try_rndmqr);
7428	  break;
7429	try_rndmqr:
7430	case OP_RNDMQR:
7431	  po_reg_or_goto (REG_TYPE_RN, try_rndmq);
7432	  break;
7433	try_rndmq:
7434	case OP_RNDMQ:
7435	  po_reg_or_goto (REG_TYPE_MQ, try_rnd);
7436	  break;
7437	try_rnd:
7438	case OP_RND:   po_reg_or_fail (REG_TYPE_VFD);	  break;
7439	case OP_RVC:
7440	  po_reg_or_goto (REG_TYPE_VFC, coproc_reg);
7441	  break;
7442	  /* Also accept generic coprocessor regs for unknown registers.  */
7443	  coproc_reg:
7444	  po_reg_or_goto (REG_TYPE_CN, vpr_po);
7445	  break;
7446	  /* Also accept P0 or p0 for VPR.P0.  Since P0 is already an
7447	     existing register with a value of 0, this seems like the
7448	     best way to parse P0.  */
7449	  vpr_po:
7450	  if (strncasecmp (str, "P0", 2) == 0)
7451	    {
7452	      str += 2;
7453	      inst.operands[i].isreg = 1;
7454	      inst.operands[i].reg = 13;
7455	    }
7456	  else
7457	    goto failure;
7458	  break;
7459	case OP_RMF:   po_reg_or_fail (REG_TYPE_MVF);	  break;
7460	case OP_RMD:   po_reg_or_fail (REG_TYPE_MVD);	  break;
7461	case OP_RMFX:  po_reg_or_fail (REG_TYPE_MVFX);	  break;
7462	case OP_RMDX:  po_reg_or_fail (REG_TYPE_MVDX);	  break;
7463	case OP_RMAX:  po_reg_or_fail (REG_TYPE_MVAX);	  break;
7464	case OP_RMDS:  po_reg_or_fail (REG_TYPE_DSPSC);	  break;
7465	case OP_RIWR:  po_reg_or_fail (REG_TYPE_MMXWR);	  break;
7466	case OP_RIWC:  po_reg_or_fail (REG_TYPE_MMXWC);	  break;
7467	case OP_RIWG:  po_reg_or_fail (REG_TYPE_MMXWCG);  break;
7468	case OP_RXA:   po_reg_or_fail (REG_TYPE_XSCALE);  break;
7469	case OP_oRNQ:
7470	case OP_RNQMQ:
7471	  po_reg_or_goto (REG_TYPE_MQ, try_nq);
7472	  break;
7473	try_nq:
7474	case OP_RNQ:   po_reg_or_fail (REG_TYPE_NQ);      break;
7475	case OP_RNSD:  po_reg_or_fail (REG_TYPE_NSD);     break;
7476	case OP_RNDQMQR:
7477	  po_reg_or_goto (REG_TYPE_RN, try_rndqmq);
7478	  break;
7479	try_rndqmq:
7480	case OP_oRNDQMQ:
7481	case OP_RNDQMQ:
7482	  po_reg_or_goto (REG_TYPE_MQ, try_rndq);
7483	  break;
7484	try_rndq:
7485	case OP_oRNDQ:
7486	case OP_RNDQ:  po_reg_or_fail (REG_TYPE_NDQ);     break;
7487	case OP_RVSDMQ:
7488	  po_reg_or_goto (REG_TYPE_MQ, try_rvsd);
7489	  break;
7490	try_rvsd:
7491	case OP_RVSD:  po_reg_or_fail (REG_TYPE_VFSD);    break;
7492	case OP_RVSD_COND:
7493	  po_reg_or_goto (REG_TYPE_VFSD, try_cond);
7494	  break;
7495	case OP_oRNSDMQ:
7496	case OP_RNSDMQ:
7497	  po_reg_or_goto (REG_TYPE_NSD, try_mq2);
7498	  break;
7499	  try_mq2:
7500	  po_reg_or_fail (REG_TYPE_MQ);
7501	  break;
7502	case OP_oRNSDQ:
7503	case OP_RNSDQ: po_reg_or_fail (REG_TYPE_NSDQ);    break;
7504	case OP_RNSDQMQR:
7505	  po_reg_or_goto (REG_TYPE_RN, try_mq);
7506	  break;
7507	  try_mq:
7508	case OP_oRNSDQMQ:
7509	case OP_RNSDQMQ:
7510	  po_reg_or_goto (REG_TYPE_MQ, try_nsdq2);
7511	  break;
7512	  try_nsdq2:
7513	  po_reg_or_fail (REG_TYPE_NSDQ);
7514	  inst.error = 0;
7515	  break;
7516	case OP_RMQRR:
7517	  po_reg_or_goto (REG_TYPE_RN, try_rmq);
7518	  break;
7519	try_rmq:
7520	case OP_RMQ:
7521	  po_reg_or_fail (REG_TYPE_MQ);
7522	  break;
7523	/* Neon scalar. Using an element size of 8 means that some invalid
7524	   scalars are accepted here, so deal with those in later code.  */
7525	case OP_RNSC:  po_scalar_or_goto (8, failure, REG_TYPE_VFD);    break;
7526
7527	case OP_RNDQ_I0:
7528	  {
7529	    po_reg_or_goto (REG_TYPE_NDQ, try_imm0);
7530	    break;
7531	    try_imm0:
7532	    po_imm_or_fail (0, 0, TRUE);
7533	  }
7534	  break;
7535
7536	case OP_RVSD_I0:
7537	  po_reg_or_goto (REG_TYPE_VFSD, try_imm0);
7538	  break;
7539
7540	case OP_RSVDMQ_FI0:
7541	  po_reg_or_goto (REG_TYPE_MQ, try_rsvd_fi0);
7542	  break;
7543	try_rsvd_fi0:
7544	case OP_RSVD_FI0:
7545	  {
7546	    po_reg_or_goto (REG_TYPE_VFSD, try_ifimm0);
7547	    break;
7548	    try_ifimm0:
7549	    if (parse_ifimm_zero (&str))
7550	      inst.operands[i].imm = 0;
7551	    else
7552	    {
7553	      inst.error
7554	        = _("only floating point zero is allowed as immediate value");
7555	      goto failure;
7556	    }
7557	  }
7558	  break;
7559
7560	case OP_RR_RNSC:
7561	  {
7562	    po_scalar_or_goto (8, try_rr, REG_TYPE_VFD);
7563	    break;
7564	    try_rr:
7565	    po_reg_or_fail (REG_TYPE_RN);
7566	  }
7567	  break;
7568
7569	case OP_RNSDQ_RNSC_MQ_RR:
7570	  po_reg_or_goto (REG_TYPE_RN, try_rnsdq_rnsc_mq);
7571	  break;
7572	try_rnsdq_rnsc_mq:
7573	case OP_RNSDQ_RNSC_MQ:
7574	  po_reg_or_goto (REG_TYPE_MQ, try_rnsdq_rnsc);
7575	  break;
7576	try_rnsdq_rnsc:
7577	case OP_RNSDQ_RNSC:
7578	  {
7579	    po_scalar_or_goto (8, try_nsdq, REG_TYPE_VFD);
7580	    inst.error = 0;
7581	    break;
7582	    try_nsdq:
7583	    po_reg_or_fail (REG_TYPE_NSDQ);
7584	    inst.error = 0;
7585	  }
7586	  break;
7587
7588	case OP_RNSD_RNSC:
7589	  {
7590	    po_scalar_or_goto (8, try_s_scalar, REG_TYPE_VFD);
7591	    break;
7592	    try_s_scalar:
7593	    po_scalar_or_goto (4, try_nsd, REG_TYPE_VFS);
7594	    break;
7595	    try_nsd:
7596	    po_reg_or_fail (REG_TYPE_NSD);
7597	  }
7598	  break;
7599
7600	case OP_RNDQMQ_RNSC_RR:
7601	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc_rr);
7602	  break;
7603	try_rndq_rnsc_rr:
7604	case OP_RNDQ_RNSC_RR:
7605	  po_reg_or_goto (REG_TYPE_RN, try_rndq_rnsc);
7606	  break;
7607	case OP_RNDQMQ_RNSC:
7608	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_rnsc);
7609	  break;
7610	try_rndq_rnsc:
7611	case OP_RNDQ_RNSC:
7612	  {
7613	    po_scalar_or_goto (8, try_ndq, REG_TYPE_VFD);
7614	    break;
7615	    try_ndq:
7616	    po_reg_or_fail (REG_TYPE_NDQ);
7617	  }
7618	  break;
7619
7620	case OP_RND_RNSC:
7621	  {
7622	    po_scalar_or_goto (8, try_vfd, REG_TYPE_VFD);
7623	    break;
7624	    try_vfd:
7625	    po_reg_or_fail (REG_TYPE_VFD);
7626	  }
7627	  break;
7628
7629	case OP_VMOV:
7630	  /* WARNING: parse_neon_mov can move the operand counter, i. If we're
7631	     not careful then bad things might happen.  */
7632	  po_misc_or_fail (parse_neon_mov (&str, &i) == FAIL);
7633	  break;
7634
7635	case OP_RNDQMQ_Ibig:
7636	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_ibig);
7637	  break;
7638	try_rndq_ibig:
7639	case OP_RNDQ_Ibig:
7640	  {
7641	    po_reg_or_goto (REG_TYPE_NDQ, try_immbig);
7642	    break;
7643	    try_immbig:
7644	    /* There's a possibility of getting a 64-bit immediate here, so
7645	       we need special handling.  */
7646	    if (parse_big_immediate (&str, i, NULL, /*allow_symbol_p=*/FALSE)
7647		== FAIL)
7648	      {
7649		inst.error = _("immediate value is out of range");
7650		goto failure;
7651	      }
7652	  }
7653	  break;
7654
7655	case OP_RNDQMQ_I63b_RR:
7656	  po_reg_or_goto (REG_TYPE_MQ, try_rndq_i63b_rr);
7657	  break;
7658	try_rndq_i63b_rr:
7659	  po_reg_or_goto (REG_TYPE_RN, try_rndq_i63b);
7660	  break;
7661	try_rndq_i63b:
7662	case OP_RNDQ_I63b:
7663	  {
7664	    po_reg_or_goto (REG_TYPE_NDQ, try_shimm);
7665	    break;
7666	    try_shimm:
7667	    po_imm_or_fail (0, 63, TRUE);
7668	  }
7669	  break;
7670
7671	case OP_RRnpcb:
7672	  po_char_or_fail ('[');
7673	  po_reg_or_fail  (REG_TYPE_RN);
7674	  po_char_or_fail (']');
7675	  break;
7676
7677	case OP_RRnpctw:
7678	case OP_RRw:
7679	case OP_oRRw:
7680	  po_reg_or_fail (REG_TYPE_RN);
7681	  if (skip_past_char (&str, '!') == SUCCESS)
7682	    inst.operands[i].writeback = 1;
7683	  break;
7684
7685	  /* Immediates */
7686	case OP_I7:	 po_imm_or_fail (  0,	   7, FALSE);	break;
7687	case OP_I15:	 po_imm_or_fail (  0,	  15, FALSE);	break;
7688	case OP_I16:	 po_imm_or_fail (  1,	  16, FALSE);	break;
7689	case OP_I16z:	 po_imm_or_fail (  0,     16, FALSE);   break;
7690	case OP_I31:	 po_imm_or_fail (  0,	  31, FALSE);	break;
7691	case OP_I32:	 po_imm_or_fail (  1,	  32, FALSE);	break;
7692	case OP_I32z:	 po_imm_or_fail (  0,     32, FALSE);   break;
7693	case OP_I48_I64: po_imm1_or_imm2_or_fail (48, 64, FALSE); break;
7694	case OP_I63s:	 po_imm_or_fail (-64,	  63, FALSE);	break;
7695	case OP_I63:	 po_imm_or_fail (  0,     63, FALSE);   break;
7696	case OP_I64:	 po_imm_or_fail (  1,     64, FALSE);   break;
7697	case OP_I64z:	 po_imm_or_fail (  0,     64, FALSE);   break;
7698	case OP_I127:	 po_imm_or_fail (  0,	 127, FALSE);	break;
7699	case OP_I255:	 po_imm_or_fail (  0,	 255, FALSE);	break;
7700	case OP_I511:	 po_imm_or_fail (  0,	 511, FALSE);	break;
7701	case OP_I4095:	 po_imm_or_fail (  0,	 4095, FALSE);	break;
7702	case OP_I8191:   po_imm_or_fail (  0,	 8191, FALSE);	break;
7703	case OP_I4b:	 po_imm_or_fail (  1,	   4, TRUE);	break;
7704	case OP_oI7b:
7705	case OP_I7b:	 po_imm_or_fail (  0,	   7, TRUE);	break;
7706	case OP_I15b:	 po_imm_or_fail (  0,	  15, TRUE);	break;
7707	case OP_oI31b:
7708	case OP_I31b:	 po_imm_or_fail (  0,	  31, TRUE);	break;
7709	case OP_oI32b:   po_imm_or_fail (  1,     32, TRUE);    break;
7710	case OP_oI32z:   po_imm_or_fail (  0,     32, TRUE);    break;
7711	case OP_oIffffb: po_imm_or_fail (  0, 0xffff, TRUE);	break;
7712
7713	  /* Immediate variants */
7714	case OP_oI255c:
7715	  po_char_or_fail ('{');
7716	  po_imm_or_fail (0, 255, TRUE);
7717	  po_char_or_fail ('}');
7718	  break;
7719
7720	case OP_I31w:
7721	  /* The expression parser chokes on a trailing !, so we have
7722	     to find it first and zap it.  */
7723	  {
7724	    char *s = str;
7725	    while (*s && *s != ',')
7726	      s++;
7727	    if (s[-1] == '!')
7728	      {
7729		s[-1] = '\0';
7730		inst.operands[i].writeback = 1;
7731	      }
7732	    po_imm_or_fail (0, 31, TRUE);
7733	    if (str == s - 1)
7734	      str = s;
7735	  }
7736	  break;
7737
7738	  /* Expressions */
7739	case OP_EXPi:	EXPi:
7740	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7741					      GE_OPT_PREFIX));
7742	  break;
7743
7744	case OP_EXP:
7745	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7746					      GE_NO_PREFIX));
7747	  break;
7748
7749	case OP_EXPr:	EXPr:
7750	  po_misc_or_fail (my_get_expression (&inst.relocs[0].exp, &str,
7751					      GE_NO_PREFIX));
7752	  if (inst.relocs[0].exp.X_op == O_symbol)
7753	    {
7754	      val = parse_reloc (&str);
7755	      if (val == -1)
7756		{
7757		  inst.error = _("unrecognized relocation suffix");
7758		  goto failure;
7759		}
7760	      else if (val != BFD_RELOC_UNUSED)
7761		{
7762		  inst.operands[i].imm = val;
7763		  inst.operands[i].hasreloc = 1;
7764		}
7765	    }
7766	  break;
7767
7768	case OP_EXPs:
7769	  po_misc_or_fail (my_get_expression (&inst.relocs[i].exp, &str,
7770					      GE_NO_PREFIX));
7771	  if (inst.relocs[i].exp.X_op == O_symbol)
7772	    {
7773	      inst.operands[i].hasreloc = 1;
7774	    }
7775	  else if (inst.relocs[i].exp.X_op == O_constant)
7776	    {
7777	      inst.operands[i].imm = inst.relocs[i].exp.X_add_number;
7778	      inst.operands[i].hasreloc = 0;
7779	    }
7780	  break;
7781
7782	  /* Operand for MOVW or MOVT.  */
7783	case OP_HALF:
7784	  po_misc_or_fail (parse_half (&str));
7785	  break;
7786
7787	  /* Register or expression.  */
7788	case OP_RR_EXr:	  po_reg_or_goto (REG_TYPE_RN, EXPr); break;
7789	case OP_RR_EXi:	  po_reg_or_goto (REG_TYPE_RN, EXPi); break;
7790
7791	  /* Register or immediate.  */
7792	case OP_RRnpc_I0: po_reg_or_goto (REG_TYPE_RN, I0);   break;
7793	I0:		  po_imm_or_fail (0, 0, FALSE);	      break;
7794
7795	case OP_RRnpcsp_I32: po_reg_or_goto (REG_TYPE_RN, I32);	break;
7796	I32:		     po_imm_or_fail (1, 32, FALSE);	break;
7797
7798	case OP_RF_IF:    po_reg_or_goto (REG_TYPE_FN, IF);   break;
7799	IF:
7800	  if (!is_immediate_prefix (*str))
7801	    goto bad_args;
7802	  str++;
7803	  val = parse_fpa_immediate (&str);
7804	  if (val == FAIL)
7805	    goto failure;
7806	  /* FPA immediates are encoded as registers 8-15.
7807	     parse_fpa_immediate has already applied the offset.  */
7808	  inst.operands[i].reg = val;
7809	  inst.operands[i].isreg = 1;
7810	  break;
7811
7812	case OP_RIWR_I32z: po_reg_or_goto (REG_TYPE_MMXWR, I32z); break;
7813	I32z:		  po_imm_or_fail (0, 32, FALSE);	  break;
7814
7815	  /* Two kinds of register.  */
7816	case OP_RIWR_RIWC:
7817	  {
7818	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7819	    if (!rege
7820		|| (rege->type != REG_TYPE_MMXWR
7821		    && rege->type != REG_TYPE_MMXWC
7822		    && rege->type != REG_TYPE_MMXWCG))
7823	      {
7824		inst.error = _("iWMMXt data or control register expected");
7825		goto failure;
7826	      }
7827	    inst.operands[i].reg = rege->number;
7828	    inst.operands[i].isreg = (rege->type == REG_TYPE_MMXWR);
7829	  }
7830	  break;
7831
7832	case OP_RIWC_RIWG:
7833	  {
7834	    struct reg_entry *rege = arm_reg_parse_multi (&str);
7835	    if (!rege
7836		|| (rege->type != REG_TYPE_MMXWC
7837		    && rege->type != REG_TYPE_MMXWCG))
7838	      {
7839		inst.error = _("iWMMXt control register expected");
7840		goto failure;
7841	      }
7842	    inst.operands[i].reg = rege->number;
7843	    inst.operands[i].isreg = 1;
7844	  }
7845	  break;
7846
7847	  /* Misc */
7848	case OP_CPSF:	 val = parse_cps_flags (&str);		break;
7849	case OP_ENDI:	 val = parse_endian_specifier (&str);	break;
7850	case OP_oROR:	 val = parse_ror (&str);		break;
7851	try_cond:
7852	case OP_COND:	 val = parse_cond (&str);		break;
7853	case OP_oBARRIER_I15:
7854	  po_barrier_or_imm (str); break;
7855	  immediate:
7856	  if (parse_immediate (&str, &val, 0, 15, TRUE) == FAIL)
7857	    goto failure;
7858	  break;
7859
7860	case OP_wPSR:
7861	case OP_rPSR:
7862	  po_reg_or_goto (REG_TYPE_RNB, try_psr);
7863	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_virt))
7864	    {
7865	      inst.error = _("Banked registers are not available with this "
7866			     "architecture.");
7867	      goto failure;
7868	    }
7869	  break;
7870	  try_psr:
7871	  val = parse_psr (&str, op_parse_code == OP_wPSR);
7872	  break;
7873
7874	case OP_VLDR:
7875	  po_reg_or_goto (REG_TYPE_VFSD, try_sysreg);
7876	  break;
7877	try_sysreg:
7878	  val = parse_sys_vldr_vstr (&str);
7879	  break;
7880
7881	case OP_APSR_RR:
7882	  po_reg_or_goto (REG_TYPE_RN, try_apsr);
7883	  break;
7884	  try_apsr:
7885	  /* Parse "APSR_nvzc" operand (for FMSTAT-equivalent MRS
7886	     instruction).  */
7887	  if (strncasecmp (str, "APSR_", 5) == 0)
7888	    {
7889	      unsigned found = 0;
7890	      str += 5;
7891	      while (found < 15)
7892		switch (*str++)
7893		  {
7894		  case 'c': found = (found & 1) ? 16 : found | 1; break;
7895		  case 'n': found = (found & 2) ? 16 : found | 2; break;
7896		  case 'z': found = (found & 4) ? 16 : found | 4; break;
7897		  case 'v': found = (found & 8) ? 16 : found | 8; break;
7898		  default: found = 16;
7899		  }
7900	      if (found != 15)
7901		goto failure;
7902	      inst.operands[i].isvec = 1;
7903	      /* APSR_nzcv is encoded in instructions as if it were the REG_PC.  */
7904	      inst.operands[i].reg = REG_PC;
7905	    }
7906	  else
7907	    goto failure;
7908	  break;
7909
7910	case OP_TB:
7911	  po_misc_or_fail (parse_tb (&str));
7912	  break;
7913
7914	  /* Register lists.  */
7915	case OP_REGLST:
7916	  val = parse_reg_list (&str, REGLIST_RN);
7917	  if (*str == '^')
7918	    {
7919	      inst.operands[i].writeback = 1;
7920	      str++;
7921	    }
7922	  break;
7923
7924	case OP_CLRMLST:
7925	  val = parse_reg_list (&str, REGLIST_CLRM);
7926	  break;
7927
7928	case OP_VRSLST:
7929	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_S,
7930				    &partial_match);
7931	  break;
7932
7933	case OP_VRDLST:
7934	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg, REGLIST_VFP_D,
7935				    &partial_match);
7936	  break;
7937
7938	case OP_VRSDLST:
7939	  /* Allow Q registers too.  */
7940	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7941				    REGLIST_NEON_D, &partial_match);
7942	  if (val == FAIL)
7943	    {
7944	      inst.error = NULL;
7945	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7946					REGLIST_VFP_S, &partial_match);
7947	      inst.operands[i].issingle = 1;
7948	    }
7949	  break;
7950
7951	case OP_VRSDVLST:
7952	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7953				    REGLIST_VFP_D_VPR, &partial_match);
7954	  if (val == FAIL && !partial_match)
7955	    {
7956	      inst.error = NULL;
7957	      val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7958					REGLIST_VFP_S_VPR, &partial_match);
7959	      inst.operands[i].issingle = 1;
7960	    }
7961	  break;
7962
7963	case OP_NRDLST:
7964	  val = parse_vfp_reg_list (&str, &inst.operands[i].reg,
7965				    REGLIST_NEON_D, &partial_match);
7966	  break;
7967
7968	case OP_MSTRLST4:
7969	case OP_MSTRLST2:
7970	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7971					   1, &inst.operands[i].vectype);
7972	  if (val != (((op_parse_code == OP_MSTRLST2) ? 3 : 7) << 5 | 0xe))
7973	    goto failure;
7974	  break;
7975	case OP_NSTRLST:
7976	  val = parse_neon_el_struct_list (&str, &inst.operands[i].reg,
7977					   0, &inst.operands[i].vectype);
7978	  break;
7979
7980	  /* Addressing modes */
7981	case OP_ADDRMVE:
7982	  po_misc_or_fail (parse_address_group_reloc (&str, i, GROUP_MVE));
7983	  break;
7984
7985	case OP_ADDR:
7986	  po_misc_or_fail (parse_address (&str, i));
7987	  break;
7988
7989	case OP_ADDRGLDR:
7990	  po_misc_or_fail_no_backtrack (
7991	    parse_address_group_reloc (&str, i, GROUP_LDR));
7992	  break;
7993
7994	case OP_ADDRGLDRS:
7995	  po_misc_or_fail_no_backtrack (
7996	    parse_address_group_reloc (&str, i, GROUP_LDRS));
7997	  break;
7998
7999	case OP_ADDRGLDC:
8000	  po_misc_or_fail_no_backtrack (
8001	    parse_address_group_reloc (&str, i, GROUP_LDC));
8002	  break;
8003
8004	case OP_SH:
8005	  po_misc_or_fail (parse_shifter_operand (&str, i));
8006	  break;
8007
8008	case OP_SHG:
8009	  po_misc_or_fail_no_backtrack (
8010	    parse_shifter_operand_group_reloc (&str, i));
8011	  break;
8012
8013	case OP_oSHll:
8014	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_IMMEDIATE));
8015	  break;
8016
8017	case OP_oSHar:
8018	  po_misc_or_fail (parse_shift (&str, i, SHIFT_ASR_IMMEDIATE));
8019	  break;
8020
8021	case OP_oSHllar:
8022	  po_misc_or_fail (parse_shift (&str, i, SHIFT_LSL_OR_ASR_IMMEDIATE));
8023	  break;
8024
8025	case OP_RMQRZ:
8026	case OP_oRMQRZ:
8027	  po_reg_or_goto (REG_TYPE_MQ, try_rr_zr);
8028	  break;
8029
8030	case OP_RR_ZR:
8031	try_rr_zr:
8032	  po_reg_or_goto (REG_TYPE_RN, ZR);
8033	  break;
8034	ZR:
8035	  po_reg_or_fail (REG_TYPE_ZR);
8036	  break;
8037
8038	default:
8039	  as_fatal (_("unhandled operand code %d"), op_parse_code);
8040	}
8041
8042      /* Various value-based sanity checks and shared operations.  We
8043	 do not signal immediate failures for the register constraints;
8044	 this allows a syntax error to take precedence.	 */
8045      switch (op_parse_code)
8046	{
8047	case OP_oRRnpc:
8048	case OP_RRnpc:
8049	case OP_RRnpcb:
8050	case OP_RRw:
8051	case OP_oRRw:
8052	case OP_RRnpc_I0:
8053	  if (inst.operands[i].isreg && inst.operands[i].reg == REG_PC)
8054	    inst.error = BAD_PC;
8055	  break;
8056
8057	case OP_oRRnpcsp:
8058	case OP_RRnpcsp:
8059	case OP_RRnpcsp_I32:
8060	  if (inst.operands[i].isreg)
8061	    {
8062	      if (inst.operands[i].reg == REG_PC)
8063		inst.error = BAD_PC;
8064	      else if (inst.operands[i].reg == REG_SP
8065		       /* The restriction on Rd/Rt/Rt2 on Thumb mode has been
8066			  relaxed since ARMv8-A.  */
8067		       && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
8068		{
8069		  gas_assert (thumb);
8070		  inst.error = BAD_SP;
8071		}
8072	    }
8073	  break;
8074
8075	case OP_RRnpctw:
8076	  if (inst.operands[i].isreg
8077	      && inst.operands[i].reg == REG_PC
8078	      && (inst.operands[i].writeback || thumb))
8079	    inst.error = BAD_PC;
8080	  break;
8081
8082	case OP_RVSD_COND:
8083	case OP_VLDR:
8084	  if (inst.operands[i].isreg)
8085	    break;
8086	/* fall through.  */
8087
8088	case OP_CPSF:
8089	case OP_ENDI:
8090	case OP_oROR:
8091	case OP_wPSR:
8092	case OP_rPSR:
8093	case OP_COND:
8094	case OP_oBARRIER_I15:
8095	case OP_REGLST:
8096	case OP_CLRMLST:
8097	case OP_VRSLST:
8098	case OP_VRDLST:
8099	case OP_VRSDLST:
8100	case OP_VRSDVLST:
8101	case OP_NRDLST:
8102	case OP_NSTRLST:
8103	case OP_MSTRLST2:
8104	case OP_MSTRLST4:
8105	  if (val == FAIL)
8106	    goto failure;
8107	  inst.operands[i].imm = val;
8108	  break;
8109
8110	case OP_LR:
8111	case OP_oLR:
8112	  if (inst.operands[i].reg != REG_LR)
8113	    inst.error = _("operand must be LR register");
8114	  break;
8115
8116	case OP_RMQRZ:
8117	case OP_oRMQRZ:
8118	case OP_RR_ZR:
8119	  if (!inst.operands[i].iszr && inst.operands[i].reg == REG_PC)
8120	    inst.error = BAD_PC;
8121	  break;
8122
8123	case OP_RRe:
8124	  if (inst.operands[i].isreg
8125	      && (inst.operands[i].reg & 0x00000001) != 0)
8126	    inst.error = BAD_ODD;
8127	  break;
8128
8129	case OP_RRo:
8130	  if (inst.operands[i].isreg)
8131	    {
8132	      if ((inst.operands[i].reg & 0x00000001) != 1)
8133		inst.error = BAD_EVEN;
8134	      else if (inst.operands[i].reg == REG_SP)
8135		as_tsktsk (MVE_BAD_SP);
8136	      else if (inst.operands[i].reg == REG_PC)
8137		inst.error = BAD_PC;
8138	    }
8139	  break;
8140
8141	default:
8142	  break;
8143	}
8144
8145      /* If we get here, this operand was successfully parsed.	*/
8146      inst.operands[i].present = 1;
8147      continue;
8148
8149    bad_args:
8150      inst.error = BAD_ARGS;
8151
8152    failure:
8153      if (!backtrack_pos)
8154	{
8155	  /* The parse routine should already have set inst.error, but set a
8156	     default here just in case.  */
8157	  if (!inst.error)
8158	    inst.error = BAD_SYNTAX;
8159	  return FAIL;
8160	}
8161
8162      /* Do not backtrack over a trailing optional argument that
8163	 absorbed some text.  We will only fail again, with the
8164	 'garbage following instruction' error message, which is
8165	 probably less helpful than the current one.  */
8166      if (backtrack_index == i && backtrack_pos != str
8167	  && upat[i+1] == OP_stop)
8168	{
8169	  if (!inst.error)
8170	    inst.error = BAD_SYNTAX;
8171	  return FAIL;
8172	}
8173
8174      /* Try again, skipping the optional argument at backtrack_pos.  */
8175      str = backtrack_pos;
8176      inst.error = backtrack_error;
8177      inst.operands[backtrack_index].present = 0;
8178      i = backtrack_index;
8179      backtrack_pos = 0;
8180    }
8181
8182  /* Check that we have parsed all the arguments.  */
8183  if (*str != '\0' && !inst.error)
8184    inst.error = _("garbage following instruction");
8185
8186  return inst.error ? FAIL : SUCCESS;
8187}
8188
8189#undef po_char_or_fail
8190#undef po_reg_or_fail
8191#undef po_reg_or_goto
8192#undef po_imm_or_fail
8193#undef po_scalar_or_fail
8194#undef po_barrier_or_imm
8195
8196/* Shorthand macro for instruction encoding functions issuing errors.  */
8197#define constraint(expr, err)			\
8198  do						\
8199    {						\
8200      if (expr)					\
8201	{					\
8202	  inst.error = err;			\
8203	  return;				\
8204	}					\
8205    }						\
8206  while (0)
8207
8208/* Reject "bad registers" for Thumb-2 instructions.  Many Thumb-2
8209   instructions are unpredictable if these registers are used.  This
8210   is the BadReg predicate in ARM's Thumb-2 documentation.
8211
8212   Before ARMv8-A, REG_PC and REG_SP were not allowed in quite a few
8213   places, while the restriction on REG_SP was relaxed since ARMv8-A.  */
8214#define reject_bad_reg(reg)					\
8215  do								\
8216   if (reg == REG_PC)						\
8217     {								\
8218       inst.error = BAD_PC;					\
8219       return;							\
8220     }								\
8221   else if (reg == REG_SP					\
8222	    && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))	\
8223     {								\
8224       inst.error = BAD_SP;					\
8225       return;							\
8226     }								\
8227  while (0)
8228
8229/* If REG is R13 (the stack pointer), warn that its use is
8230   deprecated.  */
8231#define warn_deprecated_sp(reg)			\
8232  do						\
8233    if (warn_on_deprecated && reg == REG_SP)	\
8234       as_tsktsk (_("use of r13 is deprecated"));	\
8235  while (0)
8236
8237/* Functions for operand encoding.  ARM, then Thumb.  */
8238
8239#define rotate_left(v, n) (v << (n & 31) | v >> ((32 - n) & 31))
8240
8241/* If the current inst is scalar ARMv8.2 fp16 instruction, do special encoding.
8242
8243   The only binary encoding difference is the Coprocessor number.  Coprocessor
8244   9 is used for half-precision calculations or conversions.  The format of the
8245   instruction is the same as the equivalent Coprocessor 10 instruction that
8246   exists for Single-Precision operation.  */
8247
8248static void
8249do_scalar_fp16_v82_encode (void)
8250{
8251  if (inst.cond < COND_ALWAYS)
8252    as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
8253	       " the behaviour is UNPREDICTABLE"));
8254  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
8255	      _(BAD_FP16));
8256
8257  inst.instruction = (inst.instruction & 0xfffff0ff) | 0x900;
8258  mark_feature_used (&arm_ext_fp16);
8259}
8260
8261/* If VAL can be encoded in the immediate field of an ARM instruction,
8262   return the encoded form.  Otherwise, return FAIL.  */
8263
8264static unsigned int
8265encode_arm_immediate (unsigned int val)
8266{
8267  unsigned int a, i;
8268
8269  if (val <= 0xff)
8270    return val;
8271
8272  for (i = 2; i < 32; i += 2)
8273    if ((a = rotate_left (val, i)) <= 0xff)
8274      return a | (i << 7); /* 12-bit pack: [shift-cnt,const].  */
8275
8276  return FAIL;
8277}
8278
8279/* If VAL can be encoded in the immediate field of a Thumb32 instruction,
8280   return the encoded form.  Otherwise, return FAIL.  */
8281static unsigned int
8282encode_thumb32_immediate (unsigned int val)
8283{
8284  unsigned int a, i;
8285
8286  if (val <= 0xff)
8287    return val;
8288
8289  for (i = 1; i <= 24; i++)
8290    {
8291      a = val >> i;
8292      if ((val & ~(0xffU << i)) == 0)
8293	return ((val >> i) & 0x7f) | ((32 - i) << 7);
8294    }
8295
8296  a = val & 0xff;
8297  if (val == ((a << 16) | a))
8298    return 0x100 | a;
8299  if (val == ((a << 24) | (a << 16) | (a << 8) | a))
8300    return 0x300 | a;
8301
8302  a = val & 0xff00;
8303  if (val == ((a << 16) | a))
8304    return 0x200 | (a >> 8);
8305
8306  return FAIL;
8307}
8308/* Encode a VFP SP or DP register number into inst.instruction.  */
8309
8310static void
8311encode_arm_vfp_reg (int reg, enum vfp_reg_pos pos)
8312{
8313  if ((pos == VFP_REG_Dd || pos == VFP_REG_Dn || pos == VFP_REG_Dm)
8314      && reg > 15)
8315    {
8316      if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_d32))
8317	{
8318	  if (thumb_mode)
8319	    ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
8320				    fpu_vfp_ext_d32);
8321	  else
8322	    ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
8323				    fpu_vfp_ext_d32);
8324	}
8325      else
8326	{
8327	  first_error (_("D register out of range for selected VFP version"));
8328	  return;
8329	}
8330    }
8331
8332  switch (pos)
8333    {
8334    case VFP_REG_Sd:
8335      inst.instruction |= ((reg >> 1) << 12) | ((reg & 1) << 22);
8336      break;
8337
8338    case VFP_REG_Sn:
8339      inst.instruction |= ((reg >> 1) << 16) | ((reg & 1) << 7);
8340      break;
8341
8342    case VFP_REG_Sm:
8343      inst.instruction |= ((reg >> 1) << 0) | ((reg & 1) << 5);
8344      break;
8345
8346    case VFP_REG_Dd:
8347      inst.instruction |= ((reg & 15) << 12) | ((reg >> 4) << 22);
8348      break;
8349
8350    case VFP_REG_Dn:
8351      inst.instruction |= ((reg & 15) << 16) | ((reg >> 4) << 7);
8352      break;
8353
8354    case VFP_REG_Dm:
8355      inst.instruction |= (reg & 15) | ((reg >> 4) << 5);
8356      break;
8357
8358    default:
8359      abort ();
8360    }
8361}
8362
8363/* Encode a <shift> in an ARM-format instruction.  The immediate,
8364   if any, is handled by md_apply_fix.	 */
8365static void
8366encode_arm_shift (int i)
8367{
8368  /* register-shifted register.  */
8369  if (inst.operands[i].immisreg)
8370    {
8371      int op_index;
8372      for (op_index = 0; op_index <= i; ++op_index)
8373	{
8374	  /* Check the operand only when it's presented.  In pre-UAL syntax,
8375	     if the destination register is the same as the first operand, two
8376	     register form of the instruction can be used.  */
8377	  if (inst.operands[op_index].present && inst.operands[op_index].isreg
8378	      && inst.operands[op_index].reg == REG_PC)
8379	    as_warn (UNPRED_REG ("r15"));
8380	}
8381
8382      if (inst.operands[i].imm == REG_PC)
8383	as_warn (UNPRED_REG ("r15"));
8384    }
8385
8386  if (inst.operands[i].shift_kind == SHIFT_RRX)
8387    inst.instruction |= SHIFT_ROR << 5;
8388  else
8389    {
8390      inst.instruction |= inst.operands[i].shift_kind << 5;
8391      if (inst.operands[i].immisreg)
8392	{
8393	  inst.instruction |= SHIFT_BY_REG;
8394	  inst.instruction |= inst.operands[i].imm << 8;
8395	}
8396      else
8397	inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8398    }
8399}
8400
8401static void
8402encode_arm_shifter_operand (int i)
8403{
8404  if (inst.operands[i].isreg)
8405    {
8406      inst.instruction |= inst.operands[i].reg;
8407      encode_arm_shift (i);
8408    }
8409  else
8410    {
8411      inst.instruction |= INST_IMMEDIATE;
8412      if (inst.relocs[0].type != BFD_RELOC_ARM_IMMEDIATE)
8413	inst.instruction |= inst.operands[i].imm;
8414    }
8415}
8416
8417/* Subroutine of encode_arm_addr_mode_2 and encode_arm_addr_mode_3.  */
8418static void
8419encode_arm_addr_mode_common (int i, bfd_boolean is_t)
8420{
8421  /* PR 14260:
8422     Generate an error if the operand is not a register.  */
8423  constraint (!inst.operands[i].isreg,
8424	      _("Instruction does not support =N addresses"));
8425
8426  inst.instruction |= inst.operands[i].reg << 16;
8427
8428  if (inst.operands[i].preind)
8429    {
8430      if (is_t)
8431	{
8432	  inst.error = _("instruction does not accept preindexed addressing");
8433	  return;
8434	}
8435      inst.instruction |= PRE_INDEX;
8436      if (inst.operands[i].writeback)
8437	inst.instruction |= WRITE_BACK;
8438
8439    }
8440  else if (inst.operands[i].postind)
8441    {
8442      gas_assert (inst.operands[i].writeback);
8443      if (is_t)
8444	inst.instruction |= WRITE_BACK;
8445    }
8446  else /* unindexed - only for coprocessor */
8447    {
8448      inst.error = _("instruction does not accept unindexed addressing");
8449      return;
8450    }
8451
8452  if (((inst.instruction & WRITE_BACK) || !(inst.instruction & PRE_INDEX))
8453      && (((inst.instruction & 0x000f0000) >> 16)
8454	  == ((inst.instruction & 0x0000f000) >> 12)))
8455    as_warn ((inst.instruction & LOAD_BIT)
8456	     ? _("destination register same as write-back base")
8457	     : _("source register same as write-back base"));
8458}
8459
8460/* inst.operands[i] was set up by parse_address.  Encode it into an
8461   ARM-format mode 2 load or store instruction.	 If is_t is true,
8462   reject forms that cannot be used with a T instruction (i.e. not
8463   post-indexed).  */
8464static void
8465encode_arm_addr_mode_2 (int i, bfd_boolean is_t)
8466{
8467  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
8468
8469  encode_arm_addr_mode_common (i, is_t);
8470
8471  if (inst.operands[i].immisreg)
8472    {
8473      constraint ((inst.operands[i].imm == REG_PC
8474		   || (is_pc && inst.operands[i].writeback)),
8475		  BAD_PC_ADDRESSING);
8476      inst.instruction |= INST_IMMEDIATE;  /* yes, this is backwards */
8477      inst.instruction |= inst.operands[i].imm;
8478      if (!inst.operands[i].negative)
8479	inst.instruction |= INDEX_UP;
8480      if (inst.operands[i].shifted)
8481	{
8482	  if (inst.operands[i].shift_kind == SHIFT_RRX)
8483	    inst.instruction |= SHIFT_ROR << 5;
8484	  else
8485	    {
8486	      inst.instruction |= inst.operands[i].shift_kind << 5;
8487	      inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
8488	    }
8489	}
8490    }
8491  else /* immediate offset in inst.relocs[0] */
8492    {
8493      if (is_pc && !inst.relocs[0].pc_rel)
8494	{
8495	  const bfd_boolean is_load = ((inst.instruction & LOAD_BIT) != 0);
8496
8497	  /* If is_t is TRUE, it's called from do_ldstt.  ldrt/strt
8498	     cannot use PC in addressing.
8499	     PC cannot be used in writeback addressing, either.  */
8500	  constraint ((is_t || inst.operands[i].writeback),
8501		      BAD_PC_ADDRESSING);
8502
8503	  /* Use of PC in str is deprecated for ARMv7.  */
8504	  if (warn_on_deprecated
8505	      && !is_load
8506	      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7))
8507	    as_tsktsk (_("use of PC in this instruction is deprecated"));
8508	}
8509
8510      if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8511	{
8512	  /* Prefer + for zero encoded value.  */
8513	  if (!inst.operands[i].negative)
8514	    inst.instruction |= INDEX_UP;
8515	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM;
8516	}
8517    }
8518}
8519
8520/* inst.operands[i] was set up by parse_address.  Encode it into an
8521   ARM-format mode 3 load or store instruction.	 Reject forms that
8522   cannot be used with such instructions.  If is_t is true, reject
8523   forms that cannot be used with a T instruction (i.e. not
8524   post-indexed).  */
8525static void
8526encode_arm_addr_mode_3 (int i, bfd_boolean is_t)
8527{
8528  if (inst.operands[i].immisreg && inst.operands[i].shifted)
8529    {
8530      inst.error = _("instruction does not accept scaled register index");
8531      return;
8532    }
8533
8534  encode_arm_addr_mode_common (i, is_t);
8535
8536  if (inst.operands[i].immisreg)
8537    {
8538      constraint ((inst.operands[i].imm == REG_PC
8539		   || (is_t && inst.operands[i].reg == REG_PC)),
8540		  BAD_PC_ADDRESSING);
8541      constraint (inst.operands[i].reg == REG_PC && inst.operands[i].writeback,
8542		  BAD_PC_WRITEBACK);
8543      inst.instruction |= inst.operands[i].imm;
8544      if (!inst.operands[i].negative)
8545	inst.instruction |= INDEX_UP;
8546    }
8547  else /* immediate offset in inst.relocs[0] */
8548    {
8549      constraint ((inst.operands[i].reg == REG_PC && !inst.relocs[0].pc_rel
8550		   && inst.operands[i].writeback),
8551		  BAD_PC_WRITEBACK);
8552      inst.instruction |= HWOFFSET_IMM;
8553      if (inst.relocs[0].type == BFD_RELOC_UNUSED)
8554	{
8555	  /* Prefer + for zero encoded value.  */
8556	  if (!inst.operands[i].negative)
8557	    inst.instruction |= INDEX_UP;
8558
8559	  inst.relocs[0].type = BFD_RELOC_ARM_OFFSET_IMM8;
8560	}
8561    }
8562}
8563
8564/* Write immediate bits [7:0] to the following locations:
8565
8566  |28/24|23     19|18 16|15                    4|3     0|
8567  |  a  |x x x x x|b c d|x x x x x x x x x x x x|e f g h|
8568
8569  This function is used by VMOV/VMVN/VORR/VBIC.  */
8570
8571static void
8572neon_write_immbits (unsigned immbits)
8573{
8574  inst.instruction |= immbits & 0xf;
8575  inst.instruction |= ((immbits >> 4) & 0x7) << 16;
8576  inst.instruction |= ((immbits >> 7) & 0x1) << (thumb_mode ? 28 : 24);
8577}
8578
8579/* Invert low-order SIZE bits of XHI:XLO.  */
8580
8581static void
8582neon_invert_size (unsigned *xlo, unsigned *xhi, int size)
8583{
8584  unsigned immlo = xlo ? *xlo : 0;
8585  unsigned immhi = xhi ? *xhi : 0;
8586
8587  switch (size)
8588    {
8589    case 8:
8590      immlo = (~immlo) & 0xff;
8591      break;
8592
8593    case 16:
8594      immlo = (~immlo) & 0xffff;
8595      break;
8596
8597    case 64:
8598      immhi = (~immhi) & 0xffffffff;
8599      /* fall through.  */
8600
8601    case 32:
8602      immlo = (~immlo) & 0xffffffff;
8603      break;
8604
8605    default:
8606      abort ();
8607    }
8608
8609  if (xlo)
8610    *xlo = immlo;
8611
8612  if (xhi)
8613    *xhi = immhi;
8614}
8615
8616/* True if IMM has form 0bAAAAAAAABBBBBBBBCCCCCCCCDDDDDDDD for bits
8617   A, B, C, D.  */
8618
8619static int
8620neon_bits_same_in_bytes (unsigned imm)
8621{
8622  return ((imm & 0x000000ff) == 0 || (imm & 0x000000ff) == 0x000000ff)
8623	 && ((imm & 0x0000ff00) == 0 || (imm & 0x0000ff00) == 0x0000ff00)
8624	 && ((imm & 0x00ff0000) == 0 || (imm & 0x00ff0000) == 0x00ff0000)
8625	 && ((imm & 0xff000000) == 0 || (imm & 0xff000000) == 0xff000000);
8626}
8627
8628/* For immediate of above form, return 0bABCD.  */
8629
8630static unsigned
8631neon_squash_bits (unsigned imm)
8632{
8633  return (imm & 0x01) | ((imm & 0x0100) >> 7) | ((imm & 0x010000) >> 14)
8634	 | ((imm & 0x01000000) >> 21);
8635}
8636
8637/* Compress quarter-float representation to 0b...000 abcdefgh.  */
8638
8639static unsigned
8640neon_qfloat_bits (unsigned imm)
8641{
8642  return ((imm >> 19) & 0x7f) | ((imm >> 24) & 0x80);
8643}
8644
8645/* Returns CMODE. IMMBITS [7:0] is set to bits suitable for inserting into
8646   the instruction. *OP is passed as the initial value of the op field, and
8647   may be set to a different value depending on the constant (i.e.
8648   "MOV I64, 0bAAAAAAAABBBB..." which uses OP = 1 despite being MOV not
8649   MVN).  If the immediate looks like a repeated pattern then also
8650   try smaller element sizes.  */
8651
8652static int
8653neon_cmode_for_move_imm (unsigned immlo, unsigned immhi, int float_p,
8654			 unsigned *immbits, int *op, int size,
8655			 enum neon_el_type type)
8656{
8657  /* Only permit float immediates (including 0.0/-0.0) if the operand type is
8658     float.  */
8659  if (type == NT_float && !float_p)
8660    return FAIL;
8661
8662  if (type == NT_float && is_quarter_float (immlo) && immhi == 0)
8663    {
8664      if (size != 32 || *op == 1)
8665	return FAIL;
8666      *immbits = neon_qfloat_bits (immlo);
8667      return 0xf;
8668    }
8669
8670  if (size == 64)
8671    {
8672      if (neon_bits_same_in_bytes (immhi)
8673	  && neon_bits_same_in_bytes (immlo))
8674	{
8675	  if (*op == 1)
8676	    return FAIL;
8677	  *immbits = (neon_squash_bits (immhi) << 4)
8678		     | neon_squash_bits (immlo);
8679	  *op = 1;
8680	  return 0xe;
8681	}
8682
8683      if (immhi != immlo)
8684	return FAIL;
8685    }
8686
8687  if (size >= 32)
8688    {
8689      if (immlo == (immlo & 0x000000ff))
8690	{
8691	  *immbits = immlo;
8692	  return 0x0;
8693	}
8694      else if (immlo == (immlo & 0x0000ff00))
8695	{
8696	  *immbits = immlo >> 8;
8697	  return 0x2;
8698	}
8699      else if (immlo == (immlo & 0x00ff0000))
8700	{
8701	  *immbits = immlo >> 16;
8702	  return 0x4;
8703	}
8704      else if (immlo == (immlo & 0xff000000))
8705	{
8706	  *immbits = immlo >> 24;
8707	  return 0x6;
8708	}
8709      else if (immlo == ((immlo & 0x0000ff00) | 0x000000ff))
8710	{
8711	  *immbits = (immlo >> 8) & 0xff;
8712	  return 0xc;
8713	}
8714      else if (immlo == ((immlo & 0x00ff0000) | 0x0000ffff))
8715	{
8716	  *immbits = (immlo >> 16) & 0xff;
8717	  return 0xd;
8718	}
8719
8720      if ((immlo & 0xffff) != (immlo >> 16))
8721	return FAIL;
8722      immlo &= 0xffff;
8723    }
8724
8725  if (size >= 16)
8726    {
8727      if (immlo == (immlo & 0x000000ff))
8728	{
8729	  *immbits = immlo;
8730	  return 0x8;
8731	}
8732      else if (immlo == (immlo & 0x0000ff00))
8733	{
8734	  *immbits = immlo >> 8;
8735	  return 0xa;
8736	}
8737
8738      if ((immlo & 0xff) != (immlo >> 8))
8739	return FAIL;
8740      immlo &= 0xff;
8741    }
8742
8743  if (immlo == (immlo & 0x000000ff))
8744    {
8745      /* Don't allow MVN with 8-bit immediate.  */
8746      if (*op == 1)
8747	return FAIL;
8748      *immbits = immlo;
8749      return 0xe;
8750    }
8751
8752  return FAIL;
8753}
8754
8755#if defined BFD_HOST_64_BIT
8756/* Returns TRUE if double precision value V may be cast
8757   to single precision without loss of accuracy.  */
8758
8759static bfd_boolean
8760is_double_a_single (bfd_int64_t v)
8761{
8762  int exp = (int)((v >> 52) & 0x7FF);
8763  bfd_int64_t mantissa = (v & (bfd_int64_t)0xFFFFFFFFFFFFFULL);
8764
8765  return (exp == 0 || exp == 0x7FF
8766	  || (exp >= 1023 - 126 && exp <= 1023 + 127))
8767    && (mantissa & 0x1FFFFFFFl) == 0;
8768}
8769
8770/* Returns a double precision value casted to single precision
8771   (ignoring the least significant bits in exponent and mantissa).  */
8772
8773static int
8774double_to_single (bfd_int64_t v)
8775{
8776  unsigned int sign = (v >> 63) & 1;
8777  int exp = (v >> 52) & 0x7FF;
8778  bfd_int64_t mantissa = (v & (bfd_int64_t) 0xFFFFFFFFFFFFFULL);
8779
8780  if (exp == 0x7FF)
8781    exp = 0xFF;
8782  else
8783    {
8784      exp = exp - 1023 + 127;
8785      if (exp >= 0xFF)
8786	{
8787	  /* Infinity.  */
8788	  exp = 0x7F;
8789	  mantissa = 0;
8790	}
8791      else if (exp < 0)
8792	{
8793	  /* No denormalized numbers.  */
8794	  exp = 0;
8795	  mantissa = 0;
8796	}
8797    }
8798  mantissa >>= 29;
8799  return (sign << 31) | (exp << 23) | mantissa;
8800}
8801#endif /* BFD_HOST_64_BIT */
8802
8803enum lit_type
8804{
8805  CONST_THUMB,
8806  CONST_ARM,
8807  CONST_VEC
8808};
8809
8810static void do_vfp_nsyn_opcode (const char *);
8811
8812/* inst.relocs[0].exp describes an "=expr" load pseudo-operation.
8813   Determine whether it can be performed with a move instruction; if
8814   it can, convert inst.instruction to that move instruction and
8815   return TRUE; if it can't, convert inst.instruction to a literal-pool
8816   load and return FALSE.  If this is not a valid thing to do in the
8817   current context, set inst.error and return TRUE.
8818
8819   inst.operands[i] describes the destination register.	 */
8820
8821static bfd_boolean
8822move_or_literal_pool (int i, enum lit_type t, bfd_boolean mode_3)
8823{
8824  unsigned long tbit;
8825  bfd_boolean thumb_p = (t == CONST_THUMB);
8826  bfd_boolean arm_p   = (t == CONST_ARM);
8827
8828  if (thumb_p)
8829    tbit = (inst.instruction > 0xffff) ? THUMB2_LOAD_BIT : THUMB_LOAD_BIT;
8830  else
8831    tbit = LOAD_BIT;
8832
8833  if ((inst.instruction & tbit) == 0)
8834    {
8835      inst.error = _("invalid pseudo operation");
8836      return TRUE;
8837    }
8838
8839  if (inst.relocs[0].exp.X_op != O_constant
8840      && inst.relocs[0].exp.X_op != O_symbol
8841      && inst.relocs[0].exp.X_op != O_big)
8842    {
8843      inst.error = _("constant expression expected");
8844      return TRUE;
8845    }
8846
8847  if (inst.relocs[0].exp.X_op == O_constant
8848      || inst.relocs[0].exp.X_op == O_big)
8849    {
8850#if defined BFD_HOST_64_BIT
8851      bfd_int64_t v;
8852#else
8853      offsetT v;
8854#endif
8855      if (inst.relocs[0].exp.X_op == O_big)
8856	{
8857	  LITTLENUM_TYPE w[X_PRECISION];
8858	  LITTLENUM_TYPE * l;
8859
8860	  if (inst.relocs[0].exp.X_add_number == -1)
8861	    {
8862	      gen_to_words (w, X_PRECISION, E_PRECISION);
8863	      l = w;
8864	      /* FIXME: Should we check words w[2..5] ?  */
8865	    }
8866	  else
8867	    l = generic_bignum;
8868
8869#if defined BFD_HOST_64_BIT
8870	  v = ((((bfd_uint64_t) l[3] & LITTLENUM_MASK)
8871		<< LITTLENUM_NUMBER_OF_BITS)
8872	       | (((bfd_int64_t) l[2] & LITTLENUM_MASK)
8873		  << LITTLENUM_NUMBER_OF_BITS)
8874	       | (((bfd_uint64_t) l[1] & LITTLENUM_MASK)
8875		  << LITTLENUM_NUMBER_OF_BITS)
8876	       | (l[0] & LITTLENUM_MASK));
8877#else
8878	  v = ((((valueT) l[1] & LITTLENUM_MASK) << LITTLENUM_NUMBER_OF_BITS)
8879	       | (l[0] & LITTLENUM_MASK));
8880#endif
8881	}
8882      else
8883	v = inst.relocs[0].exp.X_add_number;
8884
8885      if (!inst.operands[i].issingle)
8886	{
8887	  if (thumb_p)
8888	    {
8889	      /* LDR should not use lead in a flag-setting instruction being
8890		 chosen so we do not check whether movs can be used.  */
8891
8892	      if ((ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
8893		  || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8894		  && inst.operands[i].reg != 13
8895		  && inst.operands[i].reg != 15)
8896		{
8897		  /* Check if on thumb2 it can be done with a mov.w, mvn or
8898		     movw instruction.  */
8899		  unsigned int newimm;
8900		  bfd_boolean isNegated = FALSE;
8901
8902		  newimm = encode_thumb32_immediate (v);
8903		  if (newimm == (unsigned int) FAIL)
8904		    {
8905		      newimm = encode_thumb32_immediate (~v);
8906		      isNegated = TRUE;
8907		    }
8908
8909		  /* The number can be loaded with a mov.w or mvn
8910		     instruction.  */
8911		  if (newimm != (unsigned int) FAIL
8912		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
8913		    {
8914		      inst.instruction = (0xf04f0000  /*  MOV.W.  */
8915					  | (inst.operands[i].reg << 8));
8916		      /* Change to MOVN.  */
8917		      inst.instruction |= (isNegated ? 0x200000 : 0);
8918		      inst.instruction |= (newimm & 0x800) << 15;
8919		      inst.instruction |= (newimm & 0x700) << 4;
8920		      inst.instruction |= (newimm & 0x0ff);
8921		      return TRUE;
8922		    }
8923		  /* The number can be loaded with a movw instruction.  */
8924		  else if ((v & ~0xFFFF) == 0
8925			   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m))
8926		    {
8927		      int imm = v & 0xFFFF;
8928
8929		      inst.instruction = 0xf2400000;  /* MOVW.  */
8930		      inst.instruction |= (inst.operands[i].reg << 8);
8931		      inst.instruction |= (imm & 0xf000) << 4;
8932		      inst.instruction |= (imm & 0x0800) << 15;
8933		      inst.instruction |= (imm & 0x0700) << 4;
8934		      inst.instruction |= (imm & 0x00ff);
8935		      /*  In case this replacement is being done on Armv8-M
8936			  Baseline we need to make sure to disable the
8937			  instruction size check, as otherwise GAS will reject
8938			  the use of this T32 instruction.  */
8939		      inst.size_req = 0;
8940		      return TRUE;
8941		    }
8942		}
8943	    }
8944	  else if (arm_p)
8945	    {
8946	      int value = encode_arm_immediate (v);
8947
8948	      if (value != FAIL)
8949		{
8950		  /* This can be done with a mov instruction.  */
8951		  inst.instruction &= LITERAL_MASK;
8952		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MOV << DATA_OP_SHIFT);
8953		  inst.instruction |= value & 0xfff;
8954		  return TRUE;
8955		}
8956
8957	      value = encode_arm_immediate (~ v);
8958	      if (value != FAIL)
8959		{
8960		  /* This can be done with a mvn instruction.  */
8961		  inst.instruction &= LITERAL_MASK;
8962		  inst.instruction |= INST_IMMEDIATE | (OPCODE_MVN << DATA_OP_SHIFT);
8963		  inst.instruction |= value & 0xfff;
8964		  return TRUE;
8965		}
8966	    }
8967	  else if (t == CONST_VEC && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
8968	    {
8969	      int op = 0;
8970	      unsigned immbits = 0;
8971	      unsigned immlo = inst.operands[1].imm;
8972	      unsigned immhi = inst.operands[1].regisimm
8973		? inst.operands[1].reg
8974		: inst.relocs[0].exp.X_unsigned
8975		? 0
8976		: ((bfd_int64_t)((int) immlo)) >> 32;
8977	      int cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8978						   &op, 64, NT_invtype);
8979
8980	      if (cmode == FAIL)
8981		{
8982		  neon_invert_size (&immlo, &immhi, 64);
8983		  op = !op;
8984		  cmode = neon_cmode_for_move_imm (immlo, immhi, FALSE, &immbits,
8985						   &op, 64, NT_invtype);
8986		}
8987
8988	      if (cmode != FAIL)
8989		{
8990		  inst.instruction = (inst.instruction & VLDR_VMOV_SAME)
8991		    | (1 << 23)
8992		    | (cmode << 8)
8993		    | (op << 5)
8994		    | (1 << 4);
8995
8996		  /* Fill other bits in vmov encoding for both thumb and arm.  */
8997		  if (thumb_mode)
8998		    inst.instruction |= (0x7U << 29) | (0xF << 24);
8999		  else
9000		    inst.instruction |= (0xFU << 28) | (0x1 << 25);
9001		  neon_write_immbits (immbits);
9002		  return TRUE;
9003		}
9004	    }
9005	}
9006
9007      if (t == CONST_VEC)
9008	{
9009	  /* Check if vldr Rx, =constant could be optimized to vmov Rx, #constant.  */
9010	  if (inst.operands[i].issingle
9011	      && is_quarter_float (inst.operands[1].imm)
9012	      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3xd))
9013	    {
9014	      inst.operands[1].imm =
9015		neon_qfloat_bits (v);
9016	      do_vfp_nsyn_opcode ("fconsts");
9017	      return TRUE;
9018	    }
9019
9020	  /* If our host does not support a 64-bit type then we cannot perform
9021	     the following optimization.  This mean that there will be a
9022	     discrepancy between the output produced by an assembler built for
9023	     a 32-bit-only host and the output produced from a 64-bit host, but
9024	     this cannot be helped.  */
9025#if defined BFD_HOST_64_BIT
9026	  else if (!inst.operands[1].issingle
9027		   && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v3))
9028	    {
9029	      if (is_double_a_single (v)
9030		  && is_quarter_float (double_to_single (v)))
9031		{
9032		  inst.operands[1].imm =
9033		    neon_qfloat_bits (double_to_single (v));
9034		  do_vfp_nsyn_opcode ("fconstd");
9035		  return TRUE;
9036		}
9037	    }
9038#endif
9039	}
9040    }
9041
9042  if (add_to_lit_pool ((!inst.operands[i].isvec
9043			|| inst.operands[i].issingle) ? 4 : 8) == FAIL)
9044    return TRUE;
9045
9046  inst.operands[1].reg = REG_PC;
9047  inst.operands[1].isreg = 1;
9048  inst.operands[1].preind = 1;
9049  inst.relocs[0].pc_rel = 1;
9050  inst.relocs[0].type = (thumb_p
9051		     ? BFD_RELOC_ARM_THUMB_OFFSET
9052		     : (mode_3
9053			? BFD_RELOC_ARM_HWLITERAL
9054			: BFD_RELOC_ARM_LITERAL));
9055  return FALSE;
9056}
9057
9058/* inst.operands[i] was set up by parse_address.  Encode it into an
9059   ARM-format instruction.  Reject all forms which cannot be encoded
9060   into a coprocessor load/store instruction.  If wb_ok is false,
9061   reject use of writeback; if unind_ok is false, reject use of
9062   unindexed addressing.  If reloc_override is not 0, use it instead
9063   of BFD_ARM_CP_OFF_IMM, unless the initial relocation is a group one
9064   (in which case it is preserved).  */
9065
9066static int
9067encode_arm_cp_address (int i, int wb_ok, int unind_ok, int reloc_override)
9068{
9069  if (!inst.operands[i].isreg)
9070    {
9071      /* PR 18256 */
9072      if (! inst.operands[0].isvec)
9073	{
9074	  inst.error = _("invalid co-processor operand");
9075	  return FAIL;
9076	}
9077      if (move_or_literal_pool (0, CONST_VEC, /*mode_3=*/FALSE))
9078	return SUCCESS;
9079    }
9080
9081  inst.instruction |= inst.operands[i].reg << 16;
9082
9083  gas_assert (!(inst.operands[i].preind && inst.operands[i].postind));
9084
9085  if (!inst.operands[i].preind && !inst.operands[i].postind) /* unindexed */
9086    {
9087      gas_assert (!inst.operands[i].writeback);
9088      if (!unind_ok)
9089	{
9090	  inst.error = _("instruction does not support unindexed addressing");
9091	  return FAIL;
9092	}
9093      inst.instruction |= inst.operands[i].imm;
9094      inst.instruction |= INDEX_UP;
9095      return SUCCESS;
9096    }
9097
9098  if (inst.operands[i].preind)
9099    inst.instruction |= PRE_INDEX;
9100
9101  if (inst.operands[i].writeback)
9102    {
9103      if (inst.operands[i].reg == REG_PC)
9104	{
9105	  inst.error = _("pc may not be used with write-back");
9106	  return FAIL;
9107	}
9108      if (!wb_ok)
9109	{
9110	  inst.error = _("instruction does not support writeback");
9111	  return FAIL;
9112	}
9113      inst.instruction |= WRITE_BACK;
9114    }
9115
9116  if (reloc_override)
9117    inst.relocs[0].type = (bfd_reloc_code_real_type) reloc_override;
9118  else if ((inst.relocs[0].type < BFD_RELOC_ARM_ALU_PC_G0_NC
9119	    || inst.relocs[0].type > BFD_RELOC_ARM_LDC_SB_G2)
9120	   && inst.relocs[0].type != BFD_RELOC_ARM_LDR_PC_G0)
9121    {
9122      if (thumb_mode)
9123	inst.relocs[0].type = BFD_RELOC_ARM_T32_CP_OFF_IMM;
9124      else
9125	inst.relocs[0].type = BFD_RELOC_ARM_CP_OFF_IMM;
9126    }
9127
9128  /* Prefer + for zero encoded value.  */
9129  if (!inst.operands[i].negative)
9130    inst.instruction |= INDEX_UP;
9131
9132  return SUCCESS;
9133}
9134
9135/* Functions for instruction encoding, sorted by sub-architecture.
9136   First some generics; their names are taken from the conventional
9137   bit positions for register arguments in ARM format instructions.  */
9138
9139static void
9140do_noargs (void)
9141{
9142}
9143
9144static void
9145do_rd (void)
9146{
9147  inst.instruction |= inst.operands[0].reg << 12;
9148}
9149
9150static void
9151do_rn (void)
9152{
9153  inst.instruction |= inst.operands[0].reg << 16;
9154}
9155
9156static void
9157do_rd_rm (void)
9158{
9159  inst.instruction |= inst.operands[0].reg << 12;
9160  inst.instruction |= inst.operands[1].reg;
9161}
9162
9163static void
9164do_rm_rn (void)
9165{
9166  inst.instruction |= inst.operands[0].reg;
9167  inst.instruction |= inst.operands[1].reg << 16;
9168}
9169
9170static void
9171do_rd_rn (void)
9172{
9173  inst.instruction |= inst.operands[0].reg << 12;
9174  inst.instruction |= inst.operands[1].reg << 16;
9175}
9176
9177static void
9178do_rn_rd (void)
9179{
9180  inst.instruction |= inst.operands[0].reg << 16;
9181  inst.instruction |= inst.operands[1].reg << 12;
9182}
9183
9184static void
9185do_tt (void)
9186{
9187  inst.instruction |= inst.operands[0].reg << 8;
9188  inst.instruction |= inst.operands[1].reg << 16;
9189}
9190
9191static bfd_boolean
9192check_obsolete (const arm_feature_set *feature, const char *msg)
9193{
9194  if (ARM_CPU_IS_ANY (cpu_variant))
9195    {
9196      as_tsktsk ("%s", msg);
9197      return TRUE;
9198    }
9199  else if (ARM_CPU_HAS_FEATURE (cpu_variant, *feature))
9200    {
9201      as_bad ("%s", msg);
9202      return TRUE;
9203    }
9204
9205  return FALSE;
9206}
9207
9208static void
9209do_rd_rm_rn (void)
9210{
9211  unsigned Rn = inst.operands[2].reg;
9212  /* Enforce restrictions on SWP instruction.  */
9213  if ((inst.instruction & 0x0fbfffff) == 0x01000090)
9214    {
9215      constraint (Rn == inst.operands[0].reg || Rn == inst.operands[1].reg,
9216		  _("Rn must not overlap other operands"));
9217
9218      /* SWP{b} is obsolete for ARMv8-A, and deprecated for ARMv6* and ARMv7.
9219       */
9220      if (!check_obsolete (&arm_ext_v8,
9221			   _("swp{b} use is obsoleted for ARMv8 and later"))
9222	  && warn_on_deprecated
9223	  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6))
9224	as_tsktsk (_("swp{b} use is deprecated for ARMv6 and ARMv7"));
9225    }
9226
9227  inst.instruction |= inst.operands[0].reg << 12;
9228  inst.instruction |= inst.operands[1].reg;
9229  inst.instruction |= Rn << 16;
9230}
9231
9232static void
9233do_rd_rn_rm (void)
9234{
9235  inst.instruction |= inst.operands[0].reg << 12;
9236  inst.instruction |= inst.operands[1].reg << 16;
9237  inst.instruction |= inst.operands[2].reg;
9238}
9239
9240static void
9241do_rm_rd_rn (void)
9242{
9243  constraint ((inst.operands[2].reg == REG_PC), BAD_PC);
9244  constraint (((inst.relocs[0].exp.X_op != O_constant
9245		&& inst.relocs[0].exp.X_op != O_illegal)
9246	       || inst.relocs[0].exp.X_add_number != 0),
9247	      BAD_ADDR_MODE);
9248  inst.instruction |= inst.operands[0].reg;
9249  inst.instruction |= inst.operands[1].reg << 12;
9250  inst.instruction |= inst.operands[2].reg << 16;
9251}
9252
9253static void
9254do_imm0 (void)
9255{
9256  inst.instruction |= inst.operands[0].imm;
9257}
9258
9259static void
9260do_rd_cpaddr (void)
9261{
9262  inst.instruction |= inst.operands[0].reg << 12;
9263  encode_arm_cp_address (1, TRUE, TRUE, 0);
9264}
9265
9266/* ARM instructions, in alphabetical order by function name (except
9267   that wrapper functions appear immediately after the function they
9268   wrap).  */
9269
9270/* This is a pseudo-op of the form "adr rd, label" to be converted
9271   into a relative address of the form "add rd, pc, #label-.-8".  */
9272
9273static void
9274do_adr (void)
9275{
9276  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9277
9278  /* Frag hacking will turn this into a sub instruction if the offset turns
9279     out to be negative.  */
9280  inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
9281  inst.relocs[0].pc_rel = 1;
9282  inst.relocs[0].exp.X_add_number -= 8;
9283
9284  if (support_interwork
9285      && inst.relocs[0].exp.X_op == O_symbol
9286      && inst.relocs[0].exp.X_add_symbol != NULL
9287      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9288      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9289    inst.relocs[0].exp.X_add_number |= 1;
9290}
9291
9292/* This is a pseudo-op of the form "adrl rd, label" to be converted
9293   into a relative address of the form:
9294   add rd, pc, #low(label-.-8)"
9295   add rd, rd, #high(label-.-8)"  */
9296
9297static void
9298do_adrl (void)
9299{
9300  inst.instruction |= (inst.operands[0].reg << 12);  /* Rd */
9301
9302  /* Frag hacking will turn this into a sub instruction if the offset turns
9303     out to be negative.  */
9304  inst.relocs[0].type	       = BFD_RELOC_ARM_ADRL_IMMEDIATE;
9305  inst.relocs[0].pc_rel	       = 1;
9306  inst.size		       = INSN_SIZE * 2;
9307  inst.relocs[0].exp.X_add_number -= 8;
9308
9309  if (support_interwork
9310      && inst.relocs[0].exp.X_op == O_symbol
9311      && inst.relocs[0].exp.X_add_symbol != NULL
9312      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
9313      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
9314    inst.relocs[0].exp.X_add_number |= 1;
9315}
9316
9317static void
9318do_arit (void)
9319{
9320  constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9321	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9322	      THUMB1_RELOC_ONLY);
9323  if (!inst.operands[1].present)
9324    inst.operands[1].reg = inst.operands[0].reg;
9325  inst.instruction |= inst.operands[0].reg << 12;
9326  inst.instruction |= inst.operands[1].reg << 16;
9327  encode_arm_shifter_operand (2);
9328}
9329
9330static void
9331do_barrier (void)
9332{
9333  if (inst.operands[0].present)
9334    inst.instruction |= inst.operands[0].imm;
9335  else
9336    inst.instruction |= 0xf;
9337}
9338
9339static void
9340do_bfc (void)
9341{
9342  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
9343  constraint (msb > 32, _("bit-field extends past end of register"));
9344  /* The instruction encoding stores the LSB and MSB,
9345     not the LSB and width.  */
9346  inst.instruction |= inst.operands[0].reg << 12;
9347  inst.instruction |= inst.operands[1].imm << 7;
9348  inst.instruction |= (msb - 1) << 16;
9349}
9350
9351static void
9352do_bfi (void)
9353{
9354  unsigned int msb;
9355
9356  /* #0 in second position is alternative syntax for bfc, which is
9357     the same instruction but with REG_PC in the Rm field.  */
9358  if (!inst.operands[1].isreg)
9359    inst.operands[1].reg = REG_PC;
9360
9361  msb = inst.operands[2].imm + inst.operands[3].imm;
9362  constraint (msb > 32, _("bit-field extends past end of register"));
9363  /* The instruction encoding stores the LSB and MSB,
9364     not the LSB and width.  */
9365  inst.instruction |= inst.operands[0].reg << 12;
9366  inst.instruction |= inst.operands[1].reg;
9367  inst.instruction |= inst.operands[2].imm << 7;
9368  inst.instruction |= (msb - 1) << 16;
9369}
9370
9371static void
9372do_bfx (void)
9373{
9374  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
9375	      _("bit-field extends past end of register"));
9376  inst.instruction |= inst.operands[0].reg << 12;
9377  inst.instruction |= inst.operands[1].reg;
9378  inst.instruction |= inst.operands[2].imm << 7;
9379  inst.instruction |= (inst.operands[3].imm - 1) << 16;
9380}
9381
9382/* ARM V5 breakpoint instruction (argument parse)
9383     BKPT <16 bit unsigned immediate>
9384     Instruction is not conditional.
9385	The bit pattern given in insns[] has the COND_ALWAYS condition,
9386	and it is an error if the caller tried to override that.  */
9387
9388static void
9389do_bkpt (void)
9390{
9391  /* Top 12 of 16 bits to bits 19:8.  */
9392  inst.instruction |= (inst.operands[0].imm & 0xfff0) << 4;
9393
9394  /* Bottom 4 of 16 bits to bits 3:0.  */
9395  inst.instruction |= inst.operands[0].imm & 0xf;
9396}
9397
9398static void
9399encode_branch (int default_reloc)
9400{
9401  if (inst.operands[0].hasreloc)
9402    {
9403      constraint (inst.operands[0].imm != BFD_RELOC_ARM_PLT32
9404		  && inst.operands[0].imm != BFD_RELOC_ARM_TLS_CALL,
9405		  _("the only valid suffixes here are '(plt)' and '(tlscall)'"));
9406      inst.relocs[0].type = inst.operands[0].imm == BFD_RELOC_ARM_PLT32
9407	? BFD_RELOC_ARM_PLT32
9408	: thumb_mode ? BFD_RELOC_ARM_THM_TLS_CALL : BFD_RELOC_ARM_TLS_CALL;
9409    }
9410  else
9411    inst.relocs[0].type = (bfd_reloc_code_real_type) default_reloc;
9412  inst.relocs[0].pc_rel = 1;
9413}
9414
9415static void
9416do_branch (void)
9417{
9418#ifdef OBJ_ELF
9419  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9420    encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9421  else
9422#endif
9423    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9424}
9425
9426static void
9427do_bl (void)
9428{
9429#ifdef OBJ_ELF
9430  if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
9431    {
9432      if (inst.cond == COND_ALWAYS)
9433	encode_branch (BFD_RELOC_ARM_PCREL_CALL);
9434      else
9435	encode_branch (BFD_RELOC_ARM_PCREL_JUMP);
9436    }
9437  else
9438#endif
9439    encode_branch (BFD_RELOC_ARM_PCREL_BRANCH);
9440}
9441
9442/* ARM V5 branch-link-exchange instruction (argument parse)
9443     BLX <target_addr>		ie BLX(1)
9444     BLX{<condition>} <Rm>	ie BLX(2)
9445   Unfortunately, there are two different opcodes for this mnemonic.
9446   So, the insns[].value is not used, and the code here zaps values
9447	into inst.instruction.
9448   Also, the <target_addr> can be 25 bits, hence has its own reloc.  */
9449
9450static void
9451do_blx (void)
9452{
9453  if (inst.operands[0].isreg)
9454    {
9455      /* Arg is a register; the opcode provided by insns[] is correct.
9456	 It is not illegal to do "blx pc", just useless.  */
9457      if (inst.operands[0].reg == REG_PC)
9458	as_tsktsk (_("use of r15 in blx in ARM mode is not really useful"));
9459
9460      inst.instruction |= inst.operands[0].reg;
9461    }
9462  else
9463    {
9464      /* Arg is an address; this instruction cannot be executed
9465	 conditionally, and the opcode must be adjusted.
9466	 We retain the BFD_RELOC_ARM_PCREL_BLX till the very end
9467	 where we generate out a BFD_RELOC_ARM_PCREL_CALL instead.  */
9468      constraint (inst.cond != COND_ALWAYS, BAD_COND);
9469      inst.instruction = 0xfa000000;
9470      encode_branch (BFD_RELOC_ARM_PCREL_BLX);
9471    }
9472}
9473
9474static void
9475do_bx (void)
9476{
9477  bfd_boolean want_reloc;
9478
9479  if (inst.operands[0].reg == REG_PC)
9480    as_tsktsk (_("use of r15 in bx in ARM mode is not really useful"));
9481
9482  inst.instruction |= inst.operands[0].reg;
9483  /* Output R_ARM_V4BX relocations if is an EABI object that looks like
9484     it is for ARMv4t or earlier.  */
9485  want_reloc = !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5);
9486  if (!ARM_FEATURE_ZERO (selected_object_arch)
9487      && !ARM_CPU_HAS_FEATURE (selected_object_arch, arm_ext_v5))
9488      want_reloc = TRUE;
9489
9490#ifdef OBJ_ELF
9491  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
9492#endif
9493    want_reloc = FALSE;
9494
9495  if (want_reloc)
9496    inst.relocs[0].type = BFD_RELOC_ARM_V4BX;
9497}
9498
9499
9500/* ARM v5TEJ.  Jump to Jazelle code.  */
9501
9502static void
9503do_bxj (void)
9504{
9505  if (inst.operands[0].reg == REG_PC)
9506    as_tsktsk (_("use of r15 in bxj is not really useful"));
9507
9508  inst.instruction |= inst.operands[0].reg;
9509}
9510
9511/* Co-processor data operation:
9512      CDP{cond} <coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}
9513      CDP2	<coproc>, <opcode_1>, <CRd>, <CRn>, <CRm>{, <opcode_2>}	 */
9514static void
9515do_cdp (void)
9516{
9517  inst.instruction |= inst.operands[0].reg << 8;
9518  inst.instruction |= inst.operands[1].imm << 20;
9519  inst.instruction |= inst.operands[2].reg << 12;
9520  inst.instruction |= inst.operands[3].reg << 16;
9521  inst.instruction |= inst.operands[4].reg;
9522  inst.instruction |= inst.operands[5].imm << 5;
9523}
9524
9525static void
9526do_cmp (void)
9527{
9528  inst.instruction |= inst.operands[0].reg << 16;
9529  encode_arm_shifter_operand (1);
9530}
9531
9532/* Transfer between coprocessor and ARM registers.
9533   MRC{cond} <coproc>, <opcode_1>, <Rd>, <CRn>, <CRm>{, <opcode_2>}
9534   MRC2
9535   MCR{cond}
9536   MCR2
9537
9538   No special properties.  */
9539
9540struct deprecated_coproc_regs_s
9541{
9542  unsigned cp;
9543  int opc1;
9544  unsigned crn;
9545  unsigned crm;
9546  int opc2;
9547  arm_feature_set deprecated;
9548  arm_feature_set obsoleted;
9549  const char *dep_msg;
9550  const char *obs_msg;
9551};
9552
9553#define DEPR_ACCESS_V8 \
9554  N_("This coprocessor register access is deprecated in ARMv8")
9555
9556/* Table of all deprecated coprocessor registers.  */
9557static struct deprecated_coproc_regs_s deprecated_coproc_regs[] =
9558{
9559    {15, 0, 7, 10, 5,					/* CP15DMB.  */
9560     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9561     DEPR_ACCESS_V8, NULL},
9562    {15, 0, 7, 10, 4,					/* CP15DSB.  */
9563     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9564     DEPR_ACCESS_V8, NULL},
9565    {15, 0, 7,  5, 4,					/* CP15ISB.  */
9566     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9567     DEPR_ACCESS_V8, NULL},
9568    {14, 6, 1,  0, 0,					/* TEEHBR.  */
9569     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9570     DEPR_ACCESS_V8, NULL},
9571    {14, 6, 0,  0, 0,					/* TEECR.  */
9572     ARM_FEATURE_CORE_LOW (ARM_EXT_V8), ARM_ARCH_NONE,
9573     DEPR_ACCESS_V8, NULL},
9574};
9575
9576#undef DEPR_ACCESS_V8
9577
9578static const size_t deprecated_coproc_reg_count =
9579  sizeof (deprecated_coproc_regs) / sizeof (deprecated_coproc_regs[0]);
9580
9581static void
9582do_co_reg (void)
9583{
9584  unsigned Rd;
9585  size_t i;
9586
9587  Rd = inst.operands[2].reg;
9588  if (thumb_mode)
9589    {
9590      if (inst.instruction == 0xee000010
9591	  || inst.instruction == 0xfe000010)
9592	/* MCR, MCR2  */
9593	reject_bad_reg (Rd);
9594      else if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
9595	/* MRC, MRC2  */
9596	constraint (Rd == REG_SP, BAD_SP);
9597    }
9598  else
9599    {
9600      /* MCR */
9601      if (inst.instruction == 0xe000010)
9602	constraint (Rd == REG_PC, BAD_PC);
9603    }
9604
9605    for (i = 0; i < deprecated_coproc_reg_count; ++i)
9606      {
9607	const struct deprecated_coproc_regs_s *r =
9608	  deprecated_coproc_regs + i;
9609
9610	if (inst.operands[0].reg == r->cp
9611	    && inst.operands[1].imm == r->opc1
9612	    && inst.operands[3].reg == r->crn
9613	    && inst.operands[4].reg == r->crm
9614	    && inst.operands[5].imm == r->opc2)
9615	  {
9616	    if (! ARM_CPU_IS_ANY (cpu_variant)
9617		&& warn_on_deprecated
9618		&& ARM_CPU_HAS_FEATURE (cpu_variant, r->deprecated))
9619	      as_tsktsk ("%s", r->dep_msg);
9620	  }
9621      }
9622
9623  inst.instruction |= inst.operands[0].reg << 8;
9624  inst.instruction |= inst.operands[1].imm << 21;
9625  inst.instruction |= Rd << 12;
9626  inst.instruction |= inst.operands[3].reg << 16;
9627  inst.instruction |= inst.operands[4].reg;
9628  inst.instruction |= inst.operands[5].imm << 5;
9629}
9630
9631/* Transfer between coprocessor register and pair of ARM registers.
9632   MCRR{cond} <coproc>, <opcode>, <Rd>, <Rn>, <CRm>.
9633   MCRR2
9634   MRRC{cond}
9635   MRRC2
9636
9637   Two XScale instructions are special cases of these:
9638
9639     MAR{cond} acc0, <RdLo>, <RdHi> == MCRR{cond} p0, #0, <RdLo>, <RdHi>, c0
9640     MRA{cond} acc0, <RdLo>, <RdHi> == MRRC{cond} p0, #0, <RdLo>, <RdHi>, c0
9641
9642   Result unpredictable if Rd or Rn is R15.  */
9643
9644static void
9645do_co_reg2c (void)
9646{
9647  unsigned Rd, Rn;
9648
9649  Rd = inst.operands[2].reg;
9650  Rn = inst.operands[3].reg;
9651
9652  if (thumb_mode)
9653    {
9654      reject_bad_reg (Rd);
9655      reject_bad_reg (Rn);
9656    }
9657  else
9658    {
9659      constraint (Rd == REG_PC, BAD_PC);
9660      constraint (Rn == REG_PC, BAD_PC);
9661    }
9662
9663  /* Only check the MRRC{2} variants.  */
9664  if ((inst.instruction & 0x0FF00000) == 0x0C500000)
9665    {
9666       /* If Rd == Rn, error that the operation is
9667	  unpredictable (example MRRC p3,#1,r1,r1,c4).  */
9668       constraint (Rd == Rn, BAD_OVERLAP);
9669    }
9670
9671  inst.instruction |= inst.operands[0].reg << 8;
9672  inst.instruction |= inst.operands[1].imm << 4;
9673  inst.instruction |= Rd << 12;
9674  inst.instruction |= Rn << 16;
9675  inst.instruction |= inst.operands[4].reg;
9676}
9677
9678static void
9679do_cpsi (void)
9680{
9681  inst.instruction |= inst.operands[0].imm << 6;
9682  if (inst.operands[1].present)
9683    {
9684      inst.instruction |= CPSI_MMOD;
9685      inst.instruction |= inst.operands[1].imm;
9686    }
9687}
9688
9689static void
9690do_dbg (void)
9691{
9692  inst.instruction |= inst.operands[0].imm;
9693}
9694
9695static void
9696do_div (void)
9697{
9698  unsigned Rd, Rn, Rm;
9699
9700  Rd = inst.operands[0].reg;
9701  Rn = (inst.operands[1].present
9702	? inst.operands[1].reg : Rd);
9703  Rm = inst.operands[2].reg;
9704
9705  constraint ((Rd == REG_PC), BAD_PC);
9706  constraint ((Rn == REG_PC), BAD_PC);
9707  constraint ((Rm == REG_PC), BAD_PC);
9708
9709  inst.instruction |= Rd << 16;
9710  inst.instruction |= Rn << 0;
9711  inst.instruction |= Rm << 8;
9712}
9713
9714static void
9715do_it (void)
9716{
9717  /* There is no IT instruction in ARM mode.  We
9718     process it to do the validation as if in
9719     thumb mode, just in case the code gets
9720     assembled for thumb using the unified syntax.  */
9721
9722  inst.size = 0;
9723  if (unified_syntax)
9724    {
9725      set_pred_insn_type (IT_INSN);
9726      now_pred.mask = (inst.instruction & 0xf) | 0x10;
9727      now_pred.cc = inst.operands[0].imm;
9728    }
9729}
9730
9731/* If there is only one register in the register list,
9732   then return its register number.  Otherwise return -1.  */
9733static int
9734only_one_reg_in_list (int range)
9735{
9736  int i = ffs (range) - 1;
9737  return (i > 15 || range != (1 << i)) ? -1 : i;
9738}
9739
9740static void
9741encode_ldmstm(int from_push_pop_mnem)
9742{
9743  int base_reg = inst.operands[0].reg;
9744  int range = inst.operands[1].imm;
9745  int one_reg;
9746
9747  inst.instruction |= base_reg << 16;
9748  inst.instruction |= range;
9749
9750  if (inst.operands[1].writeback)
9751    inst.instruction |= LDM_TYPE_2_OR_3;
9752
9753  if (inst.operands[0].writeback)
9754    {
9755      inst.instruction |= WRITE_BACK;
9756      /* Check for unpredictable uses of writeback.  */
9757      if (inst.instruction & LOAD_BIT)
9758	{
9759	  /* Not allowed in LDM type 2.	 */
9760	  if ((inst.instruction & LDM_TYPE_2_OR_3)
9761	      && ((range & (1 << REG_PC)) == 0))
9762	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9763	  /* Only allowed if base reg not in list for other types.  */
9764	  else if (range & (1 << base_reg))
9765	    as_warn (_("writeback of base register when in register list is UNPREDICTABLE"));
9766	}
9767      else /* STM.  */
9768	{
9769	  /* Not allowed for type 2.  */
9770	  if (inst.instruction & LDM_TYPE_2_OR_3)
9771	    as_warn (_("writeback of base register is UNPREDICTABLE"));
9772	  /* Only allowed if base reg not in list, or first in list.  */
9773	  else if ((range & (1 << base_reg))
9774		   && (range & ((1 << base_reg) - 1)))
9775	    as_warn (_("if writeback register is in list, it must be the lowest reg in the list"));
9776	}
9777    }
9778
9779  /* If PUSH/POP has only one register, then use the A2 encoding.  */
9780  one_reg = only_one_reg_in_list (range);
9781  if (from_push_pop_mnem && one_reg >= 0)
9782    {
9783      int is_push = (inst.instruction & A_PUSH_POP_OP_MASK) == A1_OPCODE_PUSH;
9784
9785      if (is_push && one_reg == 13 /* SP */)
9786	/* PR 22483: The A2 encoding cannot be used when
9787	   pushing the stack pointer as this is UNPREDICTABLE.  */
9788	return;
9789
9790      inst.instruction &= A_COND_MASK;
9791      inst.instruction |= is_push ? A2_OPCODE_PUSH : A2_OPCODE_POP;
9792      inst.instruction |= one_reg << 12;
9793    }
9794}
9795
9796static void
9797do_ldmstm (void)
9798{
9799  encode_ldmstm (/*from_push_pop_mnem=*/FALSE);
9800}
9801
9802/* ARMv5TE load-consecutive (argument parse)
9803   Mode is like LDRH.
9804
9805     LDRccD R, mode
9806     STRccD R, mode.  */
9807
9808static void
9809do_ldrd (void)
9810{
9811  constraint (inst.operands[0].reg % 2 != 0,
9812	      _("first transfer register must be even"));
9813  constraint (inst.operands[1].present
9814	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9815	      _("can only transfer two consecutive registers"));
9816  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9817  constraint (!inst.operands[2].isreg, _("'[' expected"));
9818
9819  if (!inst.operands[1].present)
9820    inst.operands[1].reg = inst.operands[0].reg + 1;
9821
9822  /* encode_arm_addr_mode_3 will diagnose overlap between the base
9823     register and the first register written; we have to diagnose
9824     overlap between the base and the second register written here.  */
9825
9826  if (inst.operands[2].reg == inst.operands[1].reg
9827      && (inst.operands[2].writeback || inst.operands[2].postind))
9828    as_warn (_("base register written back, and overlaps "
9829	       "second transfer register"));
9830
9831  if (!(inst.instruction & V4_STR_BIT))
9832    {
9833      /* For an index-register load, the index register must not overlap the
9834	destination (even if not write-back).  */
9835      if (inst.operands[2].immisreg
9836	      && ((unsigned) inst.operands[2].imm == inst.operands[0].reg
9837	      || (unsigned) inst.operands[2].imm == inst.operands[1].reg))
9838	as_warn (_("index register overlaps transfer register"));
9839    }
9840  inst.instruction |= inst.operands[0].reg << 12;
9841  encode_arm_addr_mode_3 (2, /*is_t=*/FALSE);
9842}
9843
9844static void
9845do_ldrex (void)
9846{
9847  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
9848	      || inst.operands[1].postind || inst.operands[1].writeback
9849	      || inst.operands[1].immisreg || inst.operands[1].shifted
9850	      || inst.operands[1].negative
9851	      /* This can arise if the programmer has written
9852		   strex rN, rM, foo
9853		 or if they have mistakenly used a register name as the last
9854		 operand,  eg:
9855		   strex rN, rM, rX
9856		 It is very difficult to distinguish between these two cases
9857		 because "rX" might actually be a label. ie the register
9858		 name has been occluded by a symbol of the same name. So we
9859		 just generate a general 'bad addressing mode' type error
9860		 message and leave it up to the programmer to discover the
9861		 true cause and fix their mistake.  */
9862	      || (inst.operands[1].reg == REG_PC),
9863	      BAD_ADDR_MODE);
9864
9865  constraint (inst.relocs[0].exp.X_op != O_constant
9866	      || inst.relocs[0].exp.X_add_number != 0,
9867	      _("offset must be zero in ARM encoding"));
9868
9869  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
9870
9871  inst.instruction |= inst.operands[0].reg << 12;
9872  inst.instruction |= inst.operands[1].reg << 16;
9873  inst.relocs[0].type = BFD_RELOC_UNUSED;
9874}
9875
9876static void
9877do_ldrexd (void)
9878{
9879  constraint (inst.operands[0].reg % 2 != 0,
9880	      _("even register required"));
9881  constraint (inst.operands[1].present
9882	      && inst.operands[1].reg != inst.operands[0].reg + 1,
9883	      _("can only load two consecutive registers"));
9884  /* If op 1 were present and equal to PC, this function wouldn't
9885     have been called in the first place.  */
9886  constraint (inst.operands[0].reg == REG_LR, _("r14 not allowed here"));
9887
9888  inst.instruction |= inst.operands[0].reg << 12;
9889  inst.instruction |= inst.operands[2].reg << 16;
9890}
9891
9892/* In both ARM and thumb state 'ldr pc, #imm'  with an immediate
9893   which is not a multiple of four is UNPREDICTABLE.  */
9894static void
9895check_ldr_r15_aligned (void)
9896{
9897  constraint (!(inst.operands[1].immisreg)
9898	      && (inst.operands[0].reg == REG_PC
9899	      && inst.operands[1].reg == REG_PC
9900	      && (inst.relocs[0].exp.X_add_number & 0x3)),
9901	      _("ldr to register 15 must be 4-byte aligned"));
9902}
9903
9904static void
9905do_ldst (void)
9906{
9907  inst.instruction |= inst.operands[0].reg << 12;
9908  if (!inst.operands[1].isreg)
9909    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/FALSE))
9910      return;
9911  encode_arm_addr_mode_2 (1, /*is_t=*/FALSE);
9912  check_ldr_r15_aligned ();
9913}
9914
9915static void
9916do_ldstt (void)
9917{
9918  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9919     reject [Rn,...].  */
9920  if (inst.operands[1].preind)
9921    {
9922      constraint (inst.relocs[0].exp.X_op != O_constant
9923		  || inst.relocs[0].exp.X_add_number != 0,
9924		  _("this instruction requires a post-indexed address"));
9925
9926      inst.operands[1].preind = 0;
9927      inst.operands[1].postind = 1;
9928      inst.operands[1].writeback = 1;
9929    }
9930  inst.instruction |= inst.operands[0].reg << 12;
9931  encode_arm_addr_mode_2 (1, /*is_t=*/TRUE);
9932}
9933
9934/* Halfword and signed-byte load/store operations.  */
9935
9936static void
9937do_ldstv4 (void)
9938{
9939  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
9940  inst.instruction |= inst.operands[0].reg << 12;
9941  if (!inst.operands[1].isreg)
9942    if (move_or_literal_pool (0, CONST_ARM, /*mode_3=*/TRUE))
9943      return;
9944  encode_arm_addr_mode_3 (1, /*is_t=*/FALSE);
9945}
9946
9947static void
9948do_ldsttv4 (void)
9949{
9950  /* ldrt/strt always use post-indexed addressing.  Turn [Rn] into [Rn]! and
9951     reject [Rn,...].  */
9952  if (inst.operands[1].preind)
9953    {
9954      constraint (inst.relocs[0].exp.X_op != O_constant
9955		  || inst.relocs[0].exp.X_add_number != 0,
9956		  _("this instruction requires a post-indexed address"));
9957
9958      inst.operands[1].preind = 0;
9959      inst.operands[1].postind = 1;
9960      inst.operands[1].writeback = 1;
9961    }
9962  inst.instruction |= inst.operands[0].reg << 12;
9963  encode_arm_addr_mode_3 (1, /*is_t=*/TRUE);
9964}
9965
9966/* Co-processor register load/store.
9967   Format: <LDC|STC>{cond}[L] CP#,CRd,<address>	 */
9968static void
9969do_lstc (void)
9970{
9971  inst.instruction |= inst.operands[0].reg << 8;
9972  inst.instruction |= inst.operands[1].reg << 12;
9973  encode_arm_cp_address (2, TRUE, TRUE, 0);
9974}
9975
9976static void
9977do_mlas (void)
9978{
9979  /* This restriction does not apply to mls (nor to mla in v6 or later).  */
9980  if (inst.operands[0].reg == inst.operands[1].reg
9981      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6)
9982      && !(inst.instruction & 0x00400000))
9983    as_tsktsk (_("Rd and Rm should be different in mla"));
9984
9985  inst.instruction |= inst.operands[0].reg << 16;
9986  inst.instruction |= inst.operands[1].reg;
9987  inst.instruction |= inst.operands[2].reg << 8;
9988  inst.instruction |= inst.operands[3].reg << 12;
9989}
9990
9991static void
9992do_mov (void)
9993{
9994  constraint (inst.relocs[0].type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
9995	      && inst.relocs[0].type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC ,
9996	      THUMB1_RELOC_ONLY);
9997  inst.instruction |= inst.operands[0].reg << 12;
9998  encode_arm_shifter_operand (1);
9999}
10000
10001/* ARM V6T2 16-bit immediate register load: MOV[WT]{cond} Rd, #<imm16>.	 */
10002static void
10003do_mov16 (void)
10004{
10005  bfd_vma imm;
10006  bfd_boolean top;
10007
10008  top = (inst.instruction & 0x00400000) != 0;
10009  constraint (top && inst.relocs[0].type == BFD_RELOC_ARM_MOVW,
10010	      _(":lower16: not allowed in this instruction"));
10011  constraint (!top && inst.relocs[0].type == BFD_RELOC_ARM_MOVT,
10012	      _(":upper16: not allowed in this instruction"));
10013  inst.instruction |= inst.operands[0].reg << 12;
10014  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
10015    {
10016      imm = inst.relocs[0].exp.X_add_number;
10017      /* The value is in two pieces: 0:11, 16:19.  */
10018      inst.instruction |= (imm & 0x00000fff);
10019      inst.instruction |= (imm & 0x0000f000) << 4;
10020    }
10021}
10022
10023static int
10024do_vfp_nsyn_mrs (void)
10025{
10026  if (inst.operands[0].isvec)
10027    {
10028      if (inst.operands[1].reg != 1)
10029	first_error (_("operand 1 must be FPSCR"));
10030      memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
10031      memset (&inst.operands[1], '\0', sizeof (inst.operands[1]));
10032      do_vfp_nsyn_opcode ("fmstat");
10033    }
10034  else if (inst.operands[1].isvec)
10035    do_vfp_nsyn_opcode ("fmrx");
10036  else
10037    return FAIL;
10038
10039  return SUCCESS;
10040}
10041
10042static int
10043do_vfp_nsyn_msr (void)
10044{
10045  if (inst.operands[0].isvec)
10046    do_vfp_nsyn_opcode ("fmxr");
10047  else
10048    return FAIL;
10049
10050  return SUCCESS;
10051}
10052
10053static void
10054do_vmrs (void)
10055{
10056  unsigned Rt = inst.operands[0].reg;
10057
10058  if (thumb_mode && Rt == REG_SP)
10059    {
10060      inst.error = BAD_SP;
10061      return;
10062    }
10063
10064  switch (inst.operands[1].reg)
10065    {
10066    /* MVFR2 is only valid for Armv8-A.  */
10067    case 5:
10068      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10069		  _(BAD_FPU));
10070      break;
10071
10072    /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10073    case 1: /* fpscr.  */
10074      constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10075		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10076		  _(BAD_FPU));
10077      break;
10078
10079    case 14: /* fpcxt_ns.  */
10080    case 15: /* fpcxt_s.  */
10081      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10082		  _("selected processor does not support instruction"));
10083      break;
10084
10085    case  2: /* fpscr_nzcvqc.  */
10086    case 12: /* vpr.  */
10087    case 13: /* p0.  */
10088      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10089		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10090		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10091		  _("selected processor does not support instruction"));
10092      if (inst.operands[0].reg != 2
10093	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10094	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10095      break;
10096
10097    default:
10098      break;
10099    }
10100
10101  /* APSR_ sets isvec. All other refs to PC are illegal.  */
10102  if (!inst.operands[0].isvec && Rt == REG_PC)
10103    {
10104      inst.error = BAD_PC;
10105      return;
10106    }
10107
10108  /* If we get through parsing the register name, we just insert the number
10109     generated into the instruction without further validation.  */
10110  inst.instruction |= (inst.operands[1].reg << 16);
10111  inst.instruction |= (Rt << 12);
10112}
10113
10114static void
10115do_vmsr (void)
10116{
10117  unsigned Rt = inst.operands[1].reg;
10118
10119  if (thumb_mode)
10120    reject_bad_reg (Rt);
10121  else if (Rt == REG_PC)
10122    {
10123      inst.error = BAD_PC;
10124      return;
10125    }
10126
10127  switch (inst.operands[0].reg)
10128    {
10129    /* MVFR2 is only valid for Armv8-A.  */
10130    case 5:
10131      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
10132		  _(BAD_FPU));
10133      break;
10134
10135    /* Check for new Armv8.1-M Mainline changes to <spec_reg>.  */
10136    case  1: /* fpcr.  */
10137      constraint (!(ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10138		    || ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10139		  _(BAD_FPU));
10140      break;
10141
10142    case 14: /* fpcxt_ns.  */
10143    case 15: /* fpcxt_s.  */
10144      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main),
10145		  _("selected processor does not support instruction"));
10146      break;
10147
10148    case  2: /* fpscr_nzcvqc.  */
10149    case 12: /* vpr.  */
10150    case 13: /* p0.  */
10151      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8_1m_main)
10152		  || (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
10153		      && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)),
10154		  _("selected processor does not support instruction"));
10155      if (inst.operands[0].reg != 2
10156	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
10157	as_warn (_("accessing MVE system register without MVE is UNPREDICTABLE"));
10158      break;
10159
10160    default:
10161      break;
10162    }
10163
10164  /* If we get through parsing the register name, we just insert the number
10165     generated into the instruction without further validation.  */
10166  inst.instruction |= (inst.operands[0].reg << 16);
10167  inst.instruction |= (Rt << 12);
10168}
10169
10170static void
10171do_mrs (void)
10172{
10173  unsigned br;
10174
10175  if (do_vfp_nsyn_mrs () == SUCCESS)
10176    return;
10177
10178  constraint (inst.operands[0].reg == REG_PC, BAD_PC);
10179  inst.instruction |= inst.operands[0].reg << 12;
10180
10181  if (inst.operands[1].isreg)
10182    {
10183      br = inst.operands[1].reg;
10184      if (((br & 0x200) == 0) && ((br & 0xf0000) != 0xf0000))
10185	as_bad (_("bad register for mrs"));
10186    }
10187  else
10188    {
10189      /* mrs only accepts CPSR/SPSR/CPSR_all/SPSR_all.  */
10190      constraint ((inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f))
10191		  != (PSR_c|PSR_f),
10192		  _("'APSR', 'CPSR' or 'SPSR' expected"));
10193      br = (15<<16) | (inst.operands[1].imm & SPSR_BIT);
10194    }
10195
10196  inst.instruction |= br;
10197}
10198
10199/* Two possible forms:
10200      "{C|S}PSR_<field>, Rm",
10201      "{C|S}PSR_f, #expression".  */
10202
10203static void
10204do_msr (void)
10205{
10206  if (do_vfp_nsyn_msr () == SUCCESS)
10207    return;
10208
10209  inst.instruction |= inst.operands[0].imm;
10210  if (inst.operands[1].isreg)
10211    inst.instruction |= inst.operands[1].reg;
10212  else
10213    {
10214      inst.instruction |= INST_IMMEDIATE;
10215      inst.relocs[0].type = BFD_RELOC_ARM_IMMEDIATE;
10216      inst.relocs[0].pc_rel = 0;
10217    }
10218}
10219
10220static void
10221do_mul (void)
10222{
10223  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
10224
10225  if (!inst.operands[2].present)
10226    inst.operands[2].reg = inst.operands[0].reg;
10227  inst.instruction |= inst.operands[0].reg << 16;
10228  inst.instruction |= inst.operands[1].reg;
10229  inst.instruction |= inst.operands[2].reg << 8;
10230
10231  if (inst.operands[0].reg == inst.operands[1].reg
10232      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10233    as_tsktsk (_("Rd and Rm should be different in mul"));
10234}
10235
10236/* Long Multiply Parser
10237   UMULL RdLo, RdHi, Rm, Rs
10238   SMULL RdLo, RdHi, Rm, Rs
10239   UMLAL RdLo, RdHi, Rm, Rs
10240   SMLAL RdLo, RdHi, Rm, Rs.  */
10241
10242static void
10243do_mull (void)
10244{
10245  inst.instruction |= inst.operands[0].reg << 12;
10246  inst.instruction |= inst.operands[1].reg << 16;
10247  inst.instruction |= inst.operands[2].reg;
10248  inst.instruction |= inst.operands[3].reg << 8;
10249
10250  /* rdhi and rdlo must be different.  */
10251  if (inst.operands[0].reg == inst.operands[1].reg)
10252    as_tsktsk (_("rdhi and rdlo must be different"));
10253
10254  /* rdhi, rdlo and rm must all be different before armv6.  */
10255  if ((inst.operands[0].reg == inst.operands[2].reg
10256      || inst.operands[1].reg == inst.operands[2].reg)
10257      && !ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6))
10258    as_tsktsk (_("rdhi, rdlo and rm must all be different"));
10259}
10260
10261static void
10262do_nop (void)
10263{
10264  if (inst.operands[0].present
10265      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6k))
10266    {
10267      /* Architectural NOP hints are CPSR sets with no bits selected.  */
10268      inst.instruction &= 0xf0000000;
10269      inst.instruction |= 0x0320f000;
10270      if (inst.operands[0].present)
10271	inst.instruction |= inst.operands[0].imm;
10272    }
10273}
10274
10275/* ARM V6 Pack Halfword Bottom Top instruction (argument parse).
10276   PKHBT {<cond>} <Rd>, <Rn>, <Rm> {, LSL #<shift_imm>}
10277   Condition defaults to COND_ALWAYS.
10278   Error if Rd, Rn or Rm are R15.  */
10279
10280static void
10281do_pkhbt (void)
10282{
10283  inst.instruction |= inst.operands[0].reg << 12;
10284  inst.instruction |= inst.operands[1].reg << 16;
10285  inst.instruction |= inst.operands[2].reg;
10286  if (inst.operands[3].present)
10287    encode_arm_shift (3);
10288}
10289
10290/* ARM V6 PKHTB (Argument Parse).  */
10291
10292static void
10293do_pkhtb (void)
10294{
10295  if (!inst.operands[3].present)
10296    {
10297      /* If the shift specifier is omitted, turn the instruction
10298	 into pkhbt rd, rm, rn. */
10299      inst.instruction &= 0xfff00010;
10300      inst.instruction |= inst.operands[0].reg << 12;
10301      inst.instruction |= inst.operands[1].reg;
10302      inst.instruction |= inst.operands[2].reg << 16;
10303    }
10304  else
10305    {
10306      inst.instruction |= inst.operands[0].reg << 12;
10307      inst.instruction |= inst.operands[1].reg << 16;
10308      inst.instruction |= inst.operands[2].reg;
10309      encode_arm_shift (3);
10310    }
10311}
10312
10313/* ARMv5TE: Preload-Cache
10314   MP Extensions: Preload for write
10315
10316    PLD(W) <addr_mode>
10317
10318  Syntactically, like LDR with B=1, W=0, L=1.  */
10319
10320static void
10321do_pld (void)
10322{
10323  constraint (!inst.operands[0].isreg,
10324	      _("'[' expected after PLD mnemonic"));
10325  constraint (inst.operands[0].postind,
10326	      _("post-indexed expression used in preload instruction"));
10327  constraint (inst.operands[0].writeback,
10328	      _("writeback used in preload instruction"));
10329  constraint (!inst.operands[0].preind,
10330	      _("unindexed addressing used in preload instruction"));
10331  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10332}
10333
10334/* ARMv7: PLI <addr_mode>  */
10335static void
10336do_pli (void)
10337{
10338  constraint (!inst.operands[0].isreg,
10339	      _("'[' expected after PLI mnemonic"));
10340  constraint (inst.operands[0].postind,
10341	      _("post-indexed expression used in preload instruction"));
10342  constraint (inst.operands[0].writeback,
10343	      _("writeback used in preload instruction"));
10344  constraint (!inst.operands[0].preind,
10345	      _("unindexed addressing used in preload instruction"));
10346  encode_arm_addr_mode_2 (0, /*is_t=*/FALSE);
10347  inst.instruction &= ~PRE_INDEX;
10348}
10349
10350static void
10351do_push_pop (void)
10352{
10353  constraint (inst.operands[0].writeback,
10354	      _("push/pop do not support {reglist}^"));
10355  inst.operands[1] = inst.operands[0];
10356  memset (&inst.operands[0], 0, sizeof inst.operands[0]);
10357  inst.operands[0].isreg = 1;
10358  inst.operands[0].writeback = 1;
10359  inst.operands[0].reg = REG_SP;
10360  encode_ldmstm (/*from_push_pop_mnem=*/TRUE);
10361}
10362
10363/* ARM V6 RFE (Return from Exception) loads the PC and CPSR from the
10364   word at the specified address and the following word
10365   respectively.
10366   Unconditionally executed.
10367   Error if Rn is R15.	*/
10368
10369static void
10370do_rfe (void)
10371{
10372  inst.instruction |= inst.operands[0].reg << 16;
10373  if (inst.operands[0].writeback)
10374    inst.instruction |= WRITE_BACK;
10375}
10376
10377/* ARM V6 ssat (argument parse).  */
10378
10379static void
10380do_ssat (void)
10381{
10382  inst.instruction |= inst.operands[0].reg << 12;
10383  inst.instruction |= (inst.operands[1].imm - 1) << 16;
10384  inst.instruction |= inst.operands[2].reg;
10385
10386  if (inst.operands[3].present)
10387    encode_arm_shift (3);
10388}
10389
10390/* ARM V6 usat (argument parse).  */
10391
10392static void
10393do_usat (void)
10394{
10395  inst.instruction |= inst.operands[0].reg << 12;
10396  inst.instruction |= inst.operands[1].imm << 16;
10397  inst.instruction |= inst.operands[2].reg;
10398
10399  if (inst.operands[3].present)
10400    encode_arm_shift (3);
10401}
10402
10403/* ARM V6 ssat16 (argument parse).  */
10404
10405static void
10406do_ssat16 (void)
10407{
10408  inst.instruction |= inst.operands[0].reg << 12;
10409  inst.instruction |= ((inst.operands[1].imm - 1) << 16);
10410  inst.instruction |= inst.operands[2].reg;
10411}
10412
10413static void
10414do_usat16 (void)
10415{
10416  inst.instruction |= inst.operands[0].reg << 12;
10417  inst.instruction |= inst.operands[1].imm << 16;
10418  inst.instruction |= inst.operands[2].reg;
10419}
10420
10421/* ARM V6 SETEND (argument parse).  Sets the E bit in the CPSR while
10422   preserving the other bits.
10423
10424   setend <endian_specifier>, where <endian_specifier> is either
10425   BE or LE.  */
10426
10427static void
10428do_setend (void)
10429{
10430  if (warn_on_deprecated
10431      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
10432      as_tsktsk (_("setend use is deprecated for ARMv8"));
10433
10434  if (inst.operands[0].imm)
10435    inst.instruction |= 0x200;
10436}
10437
10438static void
10439do_shift (void)
10440{
10441  unsigned int Rm = (inst.operands[1].present
10442		     ? inst.operands[1].reg
10443		     : inst.operands[0].reg);
10444
10445  inst.instruction |= inst.operands[0].reg << 12;
10446  inst.instruction |= Rm;
10447  if (inst.operands[2].isreg)  /* Rd, {Rm,} Rs */
10448    {
10449      inst.instruction |= inst.operands[2].reg << 8;
10450      inst.instruction |= SHIFT_BY_REG;
10451      /* PR 12854: Error on extraneous shifts.  */
10452      constraint (inst.operands[2].shifted,
10453		  _("extraneous shift as part of operand to shift insn"));
10454    }
10455  else
10456    inst.relocs[0].type = BFD_RELOC_ARM_SHIFT_IMM;
10457}
10458
10459static void
10460do_smc (void)
10461{
10462  unsigned int value = inst.relocs[0].exp.X_add_number;
10463  constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
10464
10465  inst.relocs[0].type = BFD_RELOC_ARM_SMC;
10466  inst.relocs[0].pc_rel = 0;
10467}
10468
10469static void
10470do_hvc (void)
10471{
10472  inst.relocs[0].type = BFD_RELOC_ARM_HVC;
10473  inst.relocs[0].pc_rel = 0;
10474}
10475
10476static void
10477do_swi (void)
10478{
10479  inst.relocs[0].type = BFD_RELOC_ARM_SWI;
10480  inst.relocs[0].pc_rel = 0;
10481}
10482
10483static void
10484do_setpan (void)
10485{
10486  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10487	      _("selected processor does not support SETPAN instruction"));
10488
10489  inst.instruction |= ((inst.operands[0].imm & 1) << 9);
10490}
10491
10492static void
10493do_t_setpan (void)
10494{
10495  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_pan),
10496	      _("selected processor does not support SETPAN instruction"));
10497
10498  inst.instruction |= (inst.operands[0].imm << 3);
10499}
10500
10501/* ARM V5E (El Segundo) signed-multiply-accumulate (argument parse)
10502   SMLAxy{cond} Rd,Rm,Rs,Rn
10503   SMLAWy{cond} Rd,Rm,Rs,Rn
10504   Error if any register is R15.  */
10505
10506static void
10507do_smla (void)
10508{
10509  inst.instruction |= inst.operands[0].reg << 16;
10510  inst.instruction |= inst.operands[1].reg;
10511  inst.instruction |= inst.operands[2].reg << 8;
10512  inst.instruction |= inst.operands[3].reg << 12;
10513}
10514
10515/* ARM V5E (El Segundo) signed-multiply-accumulate-long (argument parse)
10516   SMLALxy{cond} Rdlo,Rdhi,Rm,Rs
10517   Error if any register is R15.
10518   Warning if Rdlo == Rdhi.  */
10519
10520static void
10521do_smlal (void)
10522{
10523  inst.instruction |= inst.operands[0].reg << 12;
10524  inst.instruction |= inst.operands[1].reg << 16;
10525  inst.instruction |= inst.operands[2].reg;
10526  inst.instruction |= inst.operands[3].reg << 8;
10527
10528  if (inst.operands[0].reg == inst.operands[1].reg)
10529    as_tsktsk (_("rdhi and rdlo must be different"));
10530}
10531
10532/* ARM V5E (El Segundo) signed-multiply (argument parse)
10533   SMULxy{cond} Rd,Rm,Rs
10534   Error if any register is R15.  */
10535
10536static void
10537do_smul (void)
10538{
10539  inst.instruction |= inst.operands[0].reg << 16;
10540  inst.instruction |= inst.operands[1].reg;
10541  inst.instruction |= inst.operands[2].reg << 8;
10542}
10543
10544/* ARM V6 srs (argument parse).  The variable fields in the encoding are
10545   the same for both ARM and Thumb-2.  */
10546
10547static void
10548do_srs (void)
10549{
10550  int reg;
10551
10552  if (inst.operands[0].present)
10553    {
10554      reg = inst.operands[0].reg;
10555      constraint (reg != REG_SP, _("SRS base register must be r13"));
10556    }
10557  else
10558    reg = REG_SP;
10559
10560  inst.instruction |= reg << 16;
10561  inst.instruction |= inst.operands[1].imm;
10562  if (inst.operands[0].writeback || inst.operands[1].writeback)
10563    inst.instruction |= WRITE_BACK;
10564}
10565
10566/* ARM V6 strex (argument parse).  */
10567
10568static void
10569do_strex (void)
10570{
10571  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10572	      || inst.operands[2].postind || inst.operands[2].writeback
10573	      || inst.operands[2].immisreg || inst.operands[2].shifted
10574	      || inst.operands[2].negative
10575	      /* See comment in do_ldrex().  */
10576	      || (inst.operands[2].reg == REG_PC),
10577	      BAD_ADDR_MODE);
10578
10579  constraint (inst.operands[0].reg == inst.operands[1].reg
10580	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10581
10582  constraint (inst.relocs[0].exp.X_op != O_constant
10583	      || inst.relocs[0].exp.X_add_number != 0,
10584	      _("offset must be zero in ARM encoding"));
10585
10586  inst.instruction |= inst.operands[0].reg << 12;
10587  inst.instruction |= inst.operands[1].reg;
10588  inst.instruction |= inst.operands[2].reg << 16;
10589  inst.relocs[0].type = BFD_RELOC_UNUSED;
10590}
10591
10592static void
10593do_t_strexbh (void)
10594{
10595  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
10596	      || inst.operands[2].postind || inst.operands[2].writeback
10597	      || inst.operands[2].immisreg || inst.operands[2].shifted
10598	      || inst.operands[2].negative,
10599	      BAD_ADDR_MODE);
10600
10601  constraint (inst.operands[0].reg == inst.operands[1].reg
10602	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10603
10604  do_rm_rd_rn ();
10605}
10606
10607static void
10608do_strexd (void)
10609{
10610  constraint (inst.operands[1].reg % 2 != 0,
10611	      _("even register required"));
10612  constraint (inst.operands[2].present
10613	      && inst.operands[2].reg != inst.operands[1].reg + 1,
10614	      _("can only store two consecutive registers"));
10615  /* If op 2 were present and equal to PC, this function wouldn't
10616     have been called in the first place.  */
10617  constraint (inst.operands[1].reg == REG_LR, _("r14 not allowed here"));
10618
10619  constraint (inst.operands[0].reg == inst.operands[1].reg
10620	      || inst.operands[0].reg == inst.operands[1].reg + 1
10621	      || inst.operands[0].reg == inst.operands[3].reg,
10622	      BAD_OVERLAP);
10623
10624  inst.instruction |= inst.operands[0].reg << 12;
10625  inst.instruction |= inst.operands[1].reg;
10626  inst.instruction |= inst.operands[3].reg << 16;
10627}
10628
10629/* ARM V8 STRL.  */
10630static void
10631do_stlex (void)
10632{
10633  constraint (inst.operands[0].reg == inst.operands[1].reg
10634	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10635
10636  do_rd_rm_rn ();
10637}
10638
10639static void
10640do_t_stlex (void)
10641{
10642  constraint (inst.operands[0].reg == inst.operands[1].reg
10643	      || inst.operands[0].reg == inst.operands[2].reg, BAD_OVERLAP);
10644
10645  do_rm_rd_rn ();
10646}
10647
10648/* ARM V6 SXTAH extracts a 16-bit value from a register, sign
10649   extends it to 32-bits, and adds the result to a value in another
10650   register.  You can specify a rotation by 0, 8, 16, or 24 bits
10651   before extracting the 16-bit value.
10652   SXTAH{<cond>} <Rd>, <Rn>, <Rm>{, <rotation>}
10653   Condition defaults to COND_ALWAYS.
10654   Error if any register uses R15.  */
10655
10656static void
10657do_sxtah (void)
10658{
10659  inst.instruction |= inst.operands[0].reg << 12;
10660  inst.instruction |= inst.operands[1].reg << 16;
10661  inst.instruction |= inst.operands[2].reg;
10662  inst.instruction |= inst.operands[3].imm << 10;
10663}
10664
10665/* ARM V6 SXTH.
10666
10667   SXTH {<cond>} <Rd>, <Rm>{, <rotation>}
10668   Condition defaults to COND_ALWAYS.
10669   Error if any register uses R15.  */
10670
10671static void
10672do_sxth (void)
10673{
10674  inst.instruction |= inst.operands[0].reg << 12;
10675  inst.instruction |= inst.operands[1].reg;
10676  inst.instruction |= inst.operands[2].imm << 10;
10677}
10678
10679/* VFP instructions.  In a logical order: SP variant first, monad
10680   before dyad, arithmetic then move then load/store.  */
10681
10682static void
10683do_vfp_sp_monadic (void)
10684{
10685  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10686	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10687	      _(BAD_FPU));
10688
10689  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10690  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10691}
10692
10693static void
10694do_vfp_sp_dyadic (void)
10695{
10696  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10697  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10698  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10699}
10700
10701static void
10702do_vfp_sp_compare_z (void)
10703{
10704  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10705}
10706
10707static void
10708do_vfp_dp_sp_cvt (void)
10709{
10710  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10711  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sm);
10712}
10713
10714static void
10715do_vfp_sp_dp_cvt (void)
10716{
10717  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10718  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10719}
10720
10721static void
10722do_vfp_reg_from_sp (void)
10723{
10724  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10725	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10726	     _(BAD_FPU));
10727
10728  inst.instruction |= inst.operands[0].reg << 12;
10729  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sn);
10730}
10731
10732static void
10733do_vfp_reg2_from_sp2 (void)
10734{
10735  constraint (inst.operands[2].imm != 2,
10736	      _("only two consecutive VFP SP registers allowed here"));
10737  inst.instruction |= inst.operands[0].reg << 12;
10738  inst.instruction |= inst.operands[1].reg << 16;
10739  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Sm);
10740}
10741
10742static void
10743do_vfp_sp_from_reg (void)
10744{
10745  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
10746	     && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10747	     _(BAD_FPU));
10748
10749  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sn);
10750  inst.instruction |= inst.operands[1].reg << 12;
10751}
10752
10753static void
10754do_vfp_sp2_from_reg2 (void)
10755{
10756  constraint (inst.operands[0].imm != 2,
10757	      _("only two consecutive VFP SP registers allowed here"));
10758  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sm);
10759  inst.instruction |= inst.operands[1].reg << 12;
10760  inst.instruction |= inst.operands[2].reg << 16;
10761}
10762
10763static void
10764do_vfp_sp_ldst (void)
10765{
10766  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10767  encode_arm_cp_address (1, FALSE, TRUE, 0);
10768}
10769
10770static void
10771do_vfp_dp_ldst (void)
10772{
10773  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10774  encode_arm_cp_address (1, FALSE, TRUE, 0);
10775}
10776
10777
10778static void
10779vfp_sp_ldstm (enum vfp_ldstm_type ldstm_type)
10780{
10781  if (inst.operands[0].writeback)
10782    inst.instruction |= WRITE_BACK;
10783  else
10784    constraint (ldstm_type != VFP_LDSTMIA,
10785		_("this addressing mode requires base-register writeback"));
10786  inst.instruction |= inst.operands[0].reg << 16;
10787  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Sd);
10788  inst.instruction |= inst.operands[1].imm;
10789}
10790
10791static void
10792vfp_dp_ldstm (enum vfp_ldstm_type ldstm_type)
10793{
10794  int count;
10795
10796  if (inst.operands[0].writeback)
10797    inst.instruction |= WRITE_BACK;
10798  else
10799    constraint (ldstm_type != VFP_LDSTMIA && ldstm_type != VFP_LDSTMIAX,
10800		_("this addressing mode requires base-register writeback"));
10801
10802  inst.instruction |= inst.operands[0].reg << 16;
10803  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10804
10805  count = inst.operands[1].imm << 1;
10806  if (ldstm_type == VFP_LDSTMIAX || ldstm_type == VFP_LDSTMDBX)
10807    count += 1;
10808
10809  inst.instruction |= count;
10810}
10811
10812static void
10813do_vfp_sp_ldstmia (void)
10814{
10815  vfp_sp_ldstm (VFP_LDSTMIA);
10816}
10817
10818static void
10819do_vfp_sp_ldstmdb (void)
10820{
10821  vfp_sp_ldstm (VFP_LDSTMDB);
10822}
10823
10824static void
10825do_vfp_dp_ldstmia (void)
10826{
10827  vfp_dp_ldstm (VFP_LDSTMIA);
10828}
10829
10830static void
10831do_vfp_dp_ldstmdb (void)
10832{
10833  vfp_dp_ldstm (VFP_LDSTMDB);
10834}
10835
10836static void
10837do_vfp_xp_ldstmia (void)
10838{
10839  vfp_dp_ldstm (VFP_LDSTMIAX);
10840}
10841
10842static void
10843do_vfp_xp_ldstmdb (void)
10844{
10845  vfp_dp_ldstm (VFP_LDSTMDBX);
10846}
10847
10848static void
10849do_vfp_dp_rd_rm (void)
10850{
10851  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
10852	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10853	      _(BAD_FPU));
10854
10855  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10856  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dm);
10857}
10858
10859static void
10860do_vfp_dp_rn_rd (void)
10861{
10862  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dn);
10863  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10864}
10865
10866static void
10867do_vfp_dp_rd_rn (void)
10868{
10869  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10870  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10871}
10872
10873static void
10874do_vfp_dp_rd_rn_rm (void)
10875{
10876  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10877	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10878	      _(BAD_FPU));
10879
10880  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10881  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dn);
10882  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dm);
10883}
10884
10885static void
10886do_vfp_dp_rd (void)
10887{
10888  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10889}
10890
10891static void
10892do_vfp_dp_rm_rd_rn (void)
10893{
10894  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
10895	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
10896	      _(BAD_FPU));
10897
10898  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dm);
10899  encode_arm_vfp_reg (inst.operands[1].reg, VFP_REG_Dd);
10900  encode_arm_vfp_reg (inst.operands[2].reg, VFP_REG_Dn);
10901}
10902
10903/* VFPv3 instructions.  */
10904static void
10905do_vfp_sp_const (void)
10906{
10907  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10908  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10909  inst.instruction |= (inst.operands[1].imm & 0x0f);
10910}
10911
10912static void
10913do_vfp_dp_const (void)
10914{
10915  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10916  inst.instruction |= (inst.operands[1].imm & 0xf0) << 12;
10917  inst.instruction |= (inst.operands[1].imm & 0x0f);
10918}
10919
10920static void
10921vfp_conv (int srcsize)
10922{
10923  int immbits = srcsize - inst.operands[1].imm;
10924
10925  if (srcsize == 16 && !(immbits >= 0 && immbits <= srcsize))
10926    {
10927      /* If srcsize is 16, inst.operands[1].imm must be in the range 0-16.
10928	 i.e. immbits must be in range 0 - 16.  */
10929      inst.error = _("immediate value out of range, expected range [0, 16]");
10930      return;
10931    }
10932  else if (srcsize == 32 && !(immbits >= 0 && immbits < srcsize))
10933    {
10934      /* If srcsize is 32, inst.operands[1].imm must be in the range 1-32.
10935	 i.e. immbits must be in range 0 - 31.  */
10936      inst.error = _("immediate value out of range, expected range [1, 32]");
10937      return;
10938    }
10939
10940  inst.instruction |= (immbits & 1) << 5;
10941  inst.instruction |= (immbits >> 1);
10942}
10943
10944static void
10945do_vfp_sp_conv_16 (void)
10946{
10947  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10948  vfp_conv (16);
10949}
10950
10951static void
10952do_vfp_dp_conv_16 (void)
10953{
10954  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10955  vfp_conv (16);
10956}
10957
10958static void
10959do_vfp_sp_conv_32 (void)
10960{
10961  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
10962  vfp_conv (32);
10963}
10964
10965static void
10966do_vfp_dp_conv_32 (void)
10967{
10968  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Dd);
10969  vfp_conv (32);
10970}
10971
10972/* FPA instructions.  Also in a logical order.	*/
10973
10974static void
10975do_fpa_cmp (void)
10976{
10977  inst.instruction |= inst.operands[0].reg << 16;
10978  inst.instruction |= inst.operands[1].reg;
10979}
10980
10981static void
10982do_fpa_ldmstm (void)
10983{
10984  inst.instruction |= inst.operands[0].reg << 12;
10985  switch (inst.operands[1].imm)
10986    {
10987    case 1: inst.instruction |= CP_T_X;		 break;
10988    case 2: inst.instruction |= CP_T_Y;		 break;
10989    case 3: inst.instruction |= CP_T_Y | CP_T_X; break;
10990    case 4:					 break;
10991    default: abort ();
10992    }
10993
10994  if (inst.instruction & (PRE_INDEX | INDEX_UP))
10995    {
10996      /* The instruction specified "ea" or "fd", so we can only accept
10997	 [Rn]{!}.  The instruction does not really support stacking or
10998	 unstacking, so we have to emulate these by setting appropriate
10999	 bits and offsets.  */
11000      constraint (inst.relocs[0].exp.X_op != O_constant
11001		  || inst.relocs[0].exp.X_add_number != 0,
11002		  _("this instruction does not support indexing"));
11003
11004      if ((inst.instruction & PRE_INDEX) || inst.operands[2].writeback)
11005	inst.relocs[0].exp.X_add_number = 12 * inst.operands[1].imm;
11006
11007      if (!(inst.instruction & INDEX_UP))
11008	inst.relocs[0].exp.X_add_number = -inst.relocs[0].exp.X_add_number;
11009
11010      if (!(inst.instruction & PRE_INDEX) && inst.operands[2].writeback)
11011	{
11012	  inst.operands[2].preind = 0;
11013	  inst.operands[2].postind = 1;
11014	}
11015    }
11016
11017  encode_arm_cp_address (2, TRUE, TRUE, 0);
11018}
11019
11020/* iWMMXt instructions: strictly in alphabetical order.	 */
11021
11022static void
11023do_iwmmxt_tandorc (void)
11024{
11025  constraint (inst.operands[0].reg != REG_PC, _("only r15 allowed here"));
11026}
11027
11028static void
11029do_iwmmxt_textrc (void)
11030{
11031  inst.instruction |= inst.operands[0].reg << 12;
11032  inst.instruction |= inst.operands[1].imm;
11033}
11034
11035static void
11036do_iwmmxt_textrm (void)
11037{
11038  inst.instruction |= inst.operands[0].reg << 12;
11039  inst.instruction |= inst.operands[1].reg << 16;
11040  inst.instruction |= inst.operands[2].imm;
11041}
11042
11043static void
11044do_iwmmxt_tinsr (void)
11045{
11046  inst.instruction |= inst.operands[0].reg << 16;
11047  inst.instruction |= inst.operands[1].reg << 12;
11048  inst.instruction |= inst.operands[2].imm;
11049}
11050
11051static void
11052do_iwmmxt_tmia (void)
11053{
11054  inst.instruction |= inst.operands[0].reg << 5;
11055  inst.instruction |= inst.operands[1].reg;
11056  inst.instruction |= inst.operands[2].reg << 12;
11057}
11058
11059static void
11060do_iwmmxt_waligni (void)
11061{
11062  inst.instruction |= inst.operands[0].reg << 12;
11063  inst.instruction |= inst.operands[1].reg << 16;
11064  inst.instruction |= inst.operands[2].reg;
11065  inst.instruction |= inst.operands[3].imm << 20;
11066}
11067
11068static void
11069do_iwmmxt_wmerge (void)
11070{
11071  inst.instruction |= inst.operands[0].reg << 12;
11072  inst.instruction |= inst.operands[1].reg << 16;
11073  inst.instruction |= inst.operands[2].reg;
11074  inst.instruction |= inst.operands[3].imm << 21;
11075}
11076
11077static void
11078do_iwmmxt_wmov (void)
11079{
11080  /* WMOV rD, rN is an alias for WOR rD, rN, rN.  */
11081  inst.instruction |= inst.operands[0].reg << 12;
11082  inst.instruction |= inst.operands[1].reg << 16;
11083  inst.instruction |= inst.operands[1].reg;
11084}
11085
11086static void
11087do_iwmmxt_wldstbh (void)
11088{
11089  int reloc;
11090  inst.instruction |= inst.operands[0].reg << 12;
11091  if (thumb_mode)
11092    reloc = BFD_RELOC_ARM_T32_CP_OFF_IMM_S2;
11093  else
11094    reloc = BFD_RELOC_ARM_CP_OFF_IMM_S2;
11095  encode_arm_cp_address (1, TRUE, FALSE, reloc);
11096}
11097
11098static void
11099do_iwmmxt_wldstw (void)
11100{
11101  /* RIWR_RIWC clears .isreg for a control register.  */
11102  if (!inst.operands[0].isreg)
11103    {
11104      constraint (inst.cond != COND_ALWAYS, BAD_COND);
11105      inst.instruction |= 0xf0000000;
11106    }
11107
11108  inst.instruction |= inst.operands[0].reg << 12;
11109  encode_arm_cp_address (1, TRUE, TRUE, 0);
11110}
11111
11112static void
11113do_iwmmxt_wldstd (void)
11114{
11115  inst.instruction |= inst.operands[0].reg << 12;
11116  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2)
11117      && inst.operands[1].immisreg)
11118    {
11119      inst.instruction &= ~0x1a000ff;
11120      inst.instruction |= (0xfU << 28);
11121      if (inst.operands[1].preind)
11122	inst.instruction |= PRE_INDEX;
11123      if (!inst.operands[1].negative)
11124	inst.instruction |= INDEX_UP;
11125      if (inst.operands[1].writeback)
11126	inst.instruction |= WRITE_BACK;
11127      inst.instruction |= inst.operands[1].reg << 16;
11128      inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11129      inst.instruction |= inst.operands[1].imm;
11130    }
11131  else
11132    encode_arm_cp_address (1, TRUE, FALSE, 0);
11133}
11134
11135static void
11136do_iwmmxt_wshufh (void)
11137{
11138  inst.instruction |= inst.operands[0].reg << 12;
11139  inst.instruction |= inst.operands[1].reg << 16;
11140  inst.instruction |= ((inst.operands[2].imm & 0xf0) << 16);
11141  inst.instruction |= (inst.operands[2].imm & 0x0f);
11142}
11143
11144static void
11145do_iwmmxt_wzero (void)
11146{
11147  /* WZERO reg is an alias for WANDN reg, reg, reg.  */
11148  inst.instruction |= inst.operands[0].reg;
11149  inst.instruction |= inst.operands[0].reg << 12;
11150  inst.instruction |= inst.operands[0].reg << 16;
11151}
11152
11153static void
11154do_iwmmxt_wrwrwr_or_imm5 (void)
11155{
11156  if (inst.operands[2].isreg)
11157    do_rd_rn_rm ();
11158  else {
11159    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2),
11160		_("immediate operand requires iWMMXt2"));
11161    do_rd_rn ();
11162    if (inst.operands[2].imm == 0)
11163      {
11164	switch ((inst.instruction >> 20) & 0xf)
11165	  {
11166	  case 4:
11167	  case 5:
11168	  case 6:
11169	  case 7:
11170	    /* w...h wrd, wrn, #0 -> wrorh wrd, wrn, #16.  */
11171	    inst.operands[2].imm = 16;
11172	    inst.instruction = (inst.instruction & 0xff0fffff) | (0x7 << 20);
11173	    break;
11174	  case 8:
11175	  case 9:
11176	  case 10:
11177	  case 11:
11178	    /* w...w wrd, wrn, #0 -> wrorw wrd, wrn, #32.  */
11179	    inst.operands[2].imm = 32;
11180	    inst.instruction = (inst.instruction & 0xff0fffff) | (0xb << 20);
11181	    break;
11182	  case 12:
11183	  case 13:
11184	  case 14:
11185	  case 15:
11186	    {
11187	      /* w...d wrd, wrn, #0 -> wor wrd, wrn, wrn.  */
11188	      unsigned long wrn;
11189	      wrn = (inst.instruction >> 16) & 0xf;
11190	      inst.instruction &= 0xff0fff0f;
11191	      inst.instruction |= wrn;
11192	      /* Bail out here; the instruction is now assembled.  */
11193	      return;
11194	    }
11195	  }
11196      }
11197    /* Map 32 -> 0, etc.  */
11198    inst.operands[2].imm &= 0x1f;
11199    inst.instruction |= (0xfU << 28) | ((inst.operands[2].imm & 0x10) << 4) | (inst.operands[2].imm & 0xf);
11200  }
11201}
11202
11203/* Cirrus Maverick instructions.  Simple 2-, 3-, and 4-register
11204   operations first, then control, shift, and load/store.  */
11205
11206/* Insns like "foo X,Y,Z".  */
11207
11208static void
11209do_mav_triple (void)
11210{
11211  inst.instruction |= inst.operands[0].reg << 16;
11212  inst.instruction |= inst.operands[1].reg;
11213  inst.instruction |= inst.operands[2].reg << 12;
11214}
11215
11216/* Insns like "foo W,X,Y,Z".
11217    where W=MVAX[0:3] and X,Y,Z=MVFX[0:15].  */
11218
11219static void
11220do_mav_quad (void)
11221{
11222  inst.instruction |= inst.operands[0].reg << 5;
11223  inst.instruction |= inst.operands[1].reg << 12;
11224  inst.instruction |= inst.operands[2].reg << 16;
11225  inst.instruction |= inst.operands[3].reg;
11226}
11227
11228/* cfmvsc32<cond> DSPSC,MVDX[15:0].  */
11229static void
11230do_mav_dspsc (void)
11231{
11232  inst.instruction |= inst.operands[1].reg << 12;
11233}
11234
11235/* Maverick shift immediate instructions.
11236   cfsh32<cond> MVFX[15:0],MVFX[15:0],Shift[6:0].
11237   cfsh64<cond> MVDX[15:0],MVDX[15:0],Shift[6:0].  */
11238
11239static void
11240do_mav_shift (void)
11241{
11242  int imm = inst.operands[2].imm;
11243
11244  inst.instruction |= inst.operands[0].reg << 12;
11245  inst.instruction |= inst.operands[1].reg << 16;
11246
11247  /* Bits 0-3 of the insn should have bits 0-3 of the immediate.
11248     Bits 5-7 of the insn should have bits 4-6 of the immediate.
11249     Bit 4 should be 0.	 */
11250  imm = (imm & 0xf) | ((imm & 0x70) << 1);
11251
11252  inst.instruction |= imm;
11253}
11254
11255/* XScale instructions.	 Also sorted arithmetic before move.  */
11256
11257/* Xscale multiply-accumulate (argument parse)
11258     MIAcc   acc0,Rm,Rs
11259     MIAPHcc acc0,Rm,Rs
11260     MIAxycc acc0,Rm,Rs.  */
11261
11262static void
11263do_xsc_mia (void)
11264{
11265  inst.instruction |= inst.operands[1].reg;
11266  inst.instruction |= inst.operands[2].reg << 12;
11267}
11268
11269/* Xscale move-accumulator-register (argument parse)
11270
11271     MARcc   acc0,RdLo,RdHi.  */
11272
11273static void
11274do_xsc_mar (void)
11275{
11276  inst.instruction |= inst.operands[1].reg << 12;
11277  inst.instruction |= inst.operands[2].reg << 16;
11278}
11279
11280/* Xscale move-register-accumulator (argument parse)
11281
11282     MRAcc   RdLo,RdHi,acc0.  */
11283
11284static void
11285do_xsc_mra (void)
11286{
11287  constraint (inst.operands[0].reg == inst.operands[1].reg, BAD_OVERLAP);
11288  inst.instruction |= inst.operands[0].reg << 12;
11289  inst.instruction |= inst.operands[1].reg << 16;
11290}
11291
11292/* Encoding functions relevant only to Thumb.  */
11293
11294/* inst.operands[i] is a shifted-register operand; encode
11295   it into inst.instruction in the format used by Thumb32.  */
11296
11297static void
11298encode_thumb32_shifted_operand (int i)
11299{
11300  unsigned int value = inst.relocs[0].exp.X_add_number;
11301  unsigned int shift = inst.operands[i].shift_kind;
11302
11303  constraint (inst.operands[i].immisreg,
11304	      _("shift by register not allowed in thumb mode"));
11305  inst.instruction |= inst.operands[i].reg;
11306  if (shift == SHIFT_RRX)
11307    inst.instruction |= SHIFT_ROR << 4;
11308  else
11309    {
11310      constraint (inst.relocs[0].exp.X_op != O_constant,
11311		  _("expression too complex"));
11312
11313      constraint (value > 32
11314		  || (value == 32 && (shift == SHIFT_LSL
11315				      || shift == SHIFT_ROR)),
11316		  _("shift expression is too large"));
11317
11318      if (value == 0)
11319	shift = SHIFT_LSL;
11320      else if (value == 32)
11321	value = 0;
11322
11323      inst.instruction |= shift << 4;
11324      inst.instruction |= (value & 0x1c) << 10;
11325      inst.instruction |= (value & 0x03) << 6;
11326    }
11327}
11328
11329
11330/* inst.operands[i] was set up by parse_address.  Encode it into a
11331   Thumb32 format load or store instruction.  Reject forms that cannot
11332   be used with such instructions.  If is_t is true, reject forms that
11333   cannot be used with a T instruction; if is_d is true, reject forms
11334   that cannot be used with a D instruction.  If it is a store insn,
11335   reject PC in Rn.  */
11336
11337static void
11338encode_thumb32_addr_mode (int i, bfd_boolean is_t, bfd_boolean is_d)
11339{
11340  const bfd_boolean is_pc = (inst.operands[i].reg == REG_PC);
11341
11342  constraint (!inst.operands[i].isreg,
11343	      _("Instruction does not support =N addresses"));
11344
11345  inst.instruction |= inst.operands[i].reg << 16;
11346  if (inst.operands[i].immisreg)
11347    {
11348      constraint (is_pc, BAD_PC_ADDRESSING);
11349      constraint (is_t || is_d, _("cannot use register index with this instruction"));
11350      constraint (inst.operands[i].negative,
11351		  _("Thumb does not support negative register indexing"));
11352      constraint (inst.operands[i].postind,
11353		  _("Thumb does not support register post-indexing"));
11354      constraint (inst.operands[i].writeback,
11355		  _("Thumb does not support register indexing with writeback"));
11356      constraint (inst.operands[i].shifted && inst.operands[i].shift_kind != SHIFT_LSL,
11357		  _("Thumb supports only LSL in shifted register indexing"));
11358
11359      inst.instruction |= inst.operands[i].imm;
11360      if (inst.operands[i].shifted)
11361	{
11362	  constraint (inst.relocs[0].exp.X_op != O_constant,
11363		      _("expression too complex"));
11364	  constraint (inst.relocs[0].exp.X_add_number < 0
11365		      || inst.relocs[0].exp.X_add_number > 3,
11366		      _("shift out of range"));
11367	  inst.instruction |= inst.relocs[0].exp.X_add_number << 4;
11368	}
11369      inst.relocs[0].type = BFD_RELOC_UNUSED;
11370    }
11371  else if (inst.operands[i].preind)
11372    {
11373      constraint (is_pc && inst.operands[i].writeback, BAD_PC_WRITEBACK);
11374      constraint (is_t && inst.operands[i].writeback,
11375		  _("cannot use writeback with this instruction"));
11376      constraint (is_pc && ((inst.instruction & THUMB2_LOAD_BIT) == 0),
11377		  BAD_PC_ADDRESSING);
11378
11379      if (is_d)
11380	{
11381	  inst.instruction |= 0x01000000;
11382	  if (inst.operands[i].writeback)
11383	    inst.instruction |= 0x00200000;
11384	}
11385      else
11386	{
11387	  inst.instruction |= 0x00000c00;
11388	  if (inst.operands[i].writeback)
11389	    inst.instruction |= 0x00000100;
11390	}
11391      inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11392    }
11393  else if (inst.operands[i].postind)
11394    {
11395      gas_assert (inst.operands[i].writeback);
11396      constraint (is_pc, _("cannot use post-indexing with PC-relative addressing"));
11397      constraint (is_t, _("cannot use post-indexing with this instruction"));
11398
11399      if (is_d)
11400	inst.instruction |= 0x00200000;
11401      else
11402	inst.instruction |= 0x00000900;
11403      inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_IMM;
11404    }
11405  else /* unindexed - only for coprocessor */
11406    inst.error = _("instruction does not accept unindexed addressing");
11407}
11408
11409/* Table of Thumb instructions which exist in 16- and/or 32-bit
11410   encodings (the latter only in post-V6T2 cores).  The index is the
11411   value used in the insns table below.  When there is more than one
11412   possible 16-bit encoding for the instruction, this table always
11413   holds variant (1).
11414   Also contains several pseudo-instructions used during relaxation.  */
11415#define T16_32_TAB				\
11416  X(_adc,   4140, eb400000),			\
11417  X(_adcs,  4140, eb500000),			\
11418  X(_add,   1c00, eb000000),			\
11419  X(_adds,  1c00, eb100000),			\
11420  X(_addi,  0000, f1000000),			\
11421  X(_addis, 0000, f1100000),			\
11422  X(_add_pc,000f, f20f0000),			\
11423  X(_add_sp,000d, f10d0000),			\
11424  X(_adr,   000f, f20f0000),			\
11425  X(_and,   4000, ea000000),			\
11426  X(_ands,  4000, ea100000),			\
11427  X(_asr,   1000, fa40f000),			\
11428  X(_asrs,  1000, fa50f000),			\
11429  X(_b,     e000, f000b000),			\
11430  X(_bcond, d000, f0008000),			\
11431  X(_bf,    0000, f040e001),			\
11432  X(_bfcsel,0000, f000e001),			\
11433  X(_bfx,   0000, f060e001),			\
11434  X(_bfl,   0000, f000c001),			\
11435  X(_bflx,  0000, f070e001),			\
11436  X(_bic,   4380, ea200000),			\
11437  X(_bics,  4380, ea300000),			\
11438  X(_cinc,  0000, ea509000),			\
11439  X(_cinv,  0000, ea50a000),			\
11440  X(_cmn,   42c0, eb100f00),			\
11441  X(_cmp,   2800, ebb00f00),			\
11442  X(_cneg,  0000, ea50b000),			\
11443  X(_cpsie, b660, f3af8400),			\
11444  X(_cpsid, b670, f3af8600),			\
11445  X(_cpy,   4600, ea4f0000),			\
11446  X(_csel,  0000, ea508000),			\
11447  X(_cset,  0000, ea5f900f),			\
11448  X(_csetm, 0000, ea5fa00f),			\
11449  X(_csinc, 0000, ea509000),			\
11450  X(_csinv, 0000, ea50a000),			\
11451  X(_csneg, 0000, ea50b000),			\
11452  X(_dec_sp,80dd, f1ad0d00),			\
11453  X(_dls,   0000, f040e001),			\
11454  X(_dlstp, 0000, f000e001),			\
11455  X(_eor,   4040, ea800000),			\
11456  X(_eors,  4040, ea900000),			\
11457  X(_inc_sp,00dd, f10d0d00),			\
11458  X(_lctp,  0000, f00fe001),			\
11459  X(_ldmia, c800, e8900000),			\
11460  X(_ldr,   6800, f8500000),			\
11461  X(_ldrb,  7800, f8100000),			\
11462  X(_ldrh,  8800, f8300000),			\
11463  X(_ldrsb, 5600, f9100000),			\
11464  X(_ldrsh, 5e00, f9300000),			\
11465  X(_ldr_pc,4800, f85f0000),			\
11466  X(_ldr_pc2,4800, f85f0000),			\
11467  X(_ldr_sp,9800, f85d0000),			\
11468  X(_le,    0000, f00fc001),			\
11469  X(_letp,  0000, f01fc001),			\
11470  X(_lsl,   0000, fa00f000),			\
11471  X(_lsls,  0000, fa10f000),			\
11472  X(_lsr,   0800, fa20f000),			\
11473  X(_lsrs,  0800, fa30f000),			\
11474  X(_mov,   2000, ea4f0000),			\
11475  X(_movs,  2000, ea5f0000),			\
11476  X(_mul,   4340, fb00f000),                     \
11477  X(_muls,  4340, ffffffff), /* no 32b muls */	\
11478  X(_mvn,   43c0, ea6f0000),			\
11479  X(_mvns,  43c0, ea7f0000),			\
11480  X(_neg,   4240, f1c00000), /* rsb #0 */	\
11481  X(_negs,  4240, f1d00000), /* rsbs #0 */	\
11482  X(_orr,   4300, ea400000),			\
11483  X(_orrs,  4300, ea500000),			\
11484  X(_pop,   bc00, e8bd0000), /* ldmia sp!,... */	\
11485  X(_push,  b400, e92d0000), /* stmdb sp!,... */	\
11486  X(_rev,   ba00, fa90f080),			\
11487  X(_rev16, ba40, fa90f090),			\
11488  X(_revsh, bac0, fa90f0b0),			\
11489  X(_ror,   41c0, fa60f000),			\
11490  X(_rors,  41c0, fa70f000),			\
11491  X(_sbc,   4180, eb600000),			\
11492  X(_sbcs,  4180, eb700000),			\
11493  X(_stmia, c000, e8800000),			\
11494  X(_str,   6000, f8400000),			\
11495  X(_strb,  7000, f8000000),			\
11496  X(_strh,  8000, f8200000),			\
11497  X(_str_sp,9000, f84d0000),			\
11498  X(_sub,   1e00, eba00000),			\
11499  X(_subs,  1e00, ebb00000),			\
11500  X(_subi,  8000, f1a00000),			\
11501  X(_subis, 8000, f1b00000),			\
11502  X(_sxtb,  b240, fa4ff080),			\
11503  X(_sxth,  b200, fa0ff080),			\
11504  X(_tst,   4200, ea100f00),			\
11505  X(_uxtb,  b2c0, fa5ff080),			\
11506  X(_uxth,  b280, fa1ff080),			\
11507  X(_nop,   bf00, f3af8000),			\
11508  X(_yield, bf10, f3af8001),			\
11509  X(_wfe,   bf20, f3af8002),			\
11510  X(_wfi,   bf30, f3af8003),			\
11511  X(_wls,   0000, f040c001),			\
11512  X(_wlstp, 0000, f000c001),			\
11513  X(_sev,   bf40, f3af8004),                    \
11514  X(_sevl,  bf50, f3af8005),			\
11515  X(_udf,   de00, f7f0a000)
11516
11517/* To catch errors in encoding functions, the codes are all offset by
11518   0xF800, putting them in one of the 32-bit prefix ranges, ergo undefined
11519   as 16-bit instructions.  */
11520#define X(a,b,c) T_MNEM##a
11521enum t16_32_codes { T16_32_OFFSET = 0xF7FF, T16_32_TAB };
11522#undef X
11523
11524#define X(a,b,c) 0x##b
11525static const unsigned short thumb_op16[] = { T16_32_TAB };
11526#define THUMB_OP16(n) (thumb_op16[(n) - (T16_32_OFFSET + 1)])
11527#undef X
11528
11529#define X(a,b,c) 0x##c
11530static const unsigned int thumb_op32[] = { T16_32_TAB };
11531#define THUMB_OP32(n)        (thumb_op32[(n) - (T16_32_OFFSET + 1)])
11532#define THUMB_SETS_FLAGS(n)  (THUMB_OP32 (n) & 0x00100000)
11533#undef X
11534#undef T16_32_TAB
11535
11536/* Thumb instruction encoders, in alphabetical order.  */
11537
11538/* ADDW or SUBW.  */
11539
11540static void
11541do_t_add_sub_w (void)
11542{
11543  int Rd, Rn;
11544
11545  Rd = inst.operands[0].reg;
11546  Rn = inst.operands[1].reg;
11547
11548  /* If Rn is REG_PC, this is ADR; if Rn is REG_SP, then this
11549     is the SP-{plus,minus}-immediate form of the instruction.  */
11550  if (Rn == REG_SP)
11551    constraint (Rd == REG_PC, BAD_PC);
11552  else
11553    reject_bad_reg (Rd);
11554
11555  inst.instruction |= (Rn << 16) | (Rd << 8);
11556  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11557}
11558
11559/* Parse an add or subtract instruction.  We get here with inst.instruction
11560   equaling any of THUMB_OPCODE_add, adds, sub, or subs.  */
11561
11562static void
11563do_t_add_sub (void)
11564{
11565  int Rd, Rs, Rn;
11566
11567  Rd = inst.operands[0].reg;
11568  Rs = (inst.operands[1].present
11569	? inst.operands[1].reg    /* Rd, Rs, foo */
11570	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11571
11572  if (Rd == REG_PC)
11573    set_pred_insn_type_last ();
11574
11575  if (unified_syntax)
11576    {
11577      bfd_boolean flags;
11578      bfd_boolean narrow;
11579      int opcode;
11580
11581      flags = (inst.instruction == T_MNEM_adds
11582	       || inst.instruction == T_MNEM_subs);
11583      if (flags)
11584	narrow = !in_pred_block ();
11585      else
11586	narrow = in_pred_block ();
11587      if (!inst.operands[2].isreg)
11588	{
11589	  int add;
11590
11591	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11592	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11593
11594	  add = (inst.instruction == T_MNEM_add
11595		 || inst.instruction == T_MNEM_adds);
11596	  opcode = 0;
11597	  if (inst.size_req != 4)
11598	    {
11599	      /* Attempt to use a narrow opcode, with relaxation if
11600		 appropriate.  */
11601	      if (Rd == REG_SP && Rs == REG_SP && !flags)
11602		opcode = add ? T_MNEM_inc_sp : T_MNEM_dec_sp;
11603	      else if (Rd <= 7 && Rs == REG_SP && add && !flags)
11604		opcode = T_MNEM_add_sp;
11605	      else if (Rd <= 7 && Rs == REG_PC && add && !flags)
11606		opcode = T_MNEM_add_pc;
11607	      else if (Rd <= 7 && Rs <= 7 && narrow)
11608		{
11609		  if (flags)
11610		    opcode = add ? T_MNEM_addis : T_MNEM_subis;
11611		  else
11612		    opcode = add ? T_MNEM_addi : T_MNEM_subi;
11613		}
11614	      if (opcode)
11615		{
11616		  inst.instruction = THUMB_OP16(opcode);
11617		  inst.instruction |= (Rd << 4) | Rs;
11618		  if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
11619		      || (inst.relocs[0].type
11620			  > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC))
11621		  {
11622		    if (inst.size_req == 2)
11623		      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11624		    else
11625		      inst.relax = opcode;
11626		  }
11627		}
11628	      else
11629		constraint (inst.size_req == 2, BAD_HIREG);
11630	    }
11631	  if (inst.size_req == 4
11632	      || (inst.size_req != 2 && !opcode))
11633	    {
11634	      constraint ((inst.relocs[0].type
11635			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
11636			  && (inst.relocs[0].type
11637			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
11638			  THUMB1_RELOC_ONLY);
11639	      if (Rd == REG_PC)
11640		{
11641		  constraint (add, BAD_PC);
11642		  constraint (Rs != REG_LR || inst.instruction != T_MNEM_subs,
11643			     _("only SUBS PC, LR, #const allowed"));
11644		  constraint (inst.relocs[0].exp.X_op != O_constant,
11645			      _("expression too complex"));
11646		  constraint (inst.relocs[0].exp.X_add_number < 0
11647			      || inst.relocs[0].exp.X_add_number > 0xff,
11648			     _("immediate value out of range"));
11649		  inst.instruction = T2_SUBS_PC_LR
11650				     | inst.relocs[0].exp.X_add_number;
11651		  inst.relocs[0].type = BFD_RELOC_UNUSED;
11652		  return;
11653		}
11654	      else if (Rs == REG_PC)
11655		{
11656		  /* Always use addw/subw.  */
11657		  inst.instruction = add ? 0xf20f0000 : 0xf2af0000;
11658		  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMM12;
11659		}
11660	      else
11661		{
11662		  inst.instruction = THUMB_OP32 (inst.instruction);
11663		  inst.instruction = (inst.instruction & 0xe1ffffff)
11664				     | 0x10000000;
11665		  if (flags)
11666		    inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11667		  else
11668		    inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_IMM;
11669		}
11670	      inst.instruction |= Rd << 8;
11671	      inst.instruction |= Rs << 16;
11672	    }
11673	}
11674      else
11675	{
11676	  unsigned int value = inst.relocs[0].exp.X_add_number;
11677	  unsigned int shift = inst.operands[2].shift_kind;
11678
11679	  Rn = inst.operands[2].reg;
11680	  /* See if we can do this with a 16-bit instruction.  */
11681	  if (!inst.operands[2].shifted && inst.size_req != 4)
11682	    {
11683	      if (Rd > 7 || Rs > 7 || Rn > 7)
11684		narrow = FALSE;
11685
11686	      if (narrow)
11687		{
11688		  inst.instruction = ((inst.instruction == T_MNEM_adds
11689				       || inst.instruction == T_MNEM_add)
11690				      ? T_OPCODE_ADD_R3
11691				      : T_OPCODE_SUB_R3);
11692		  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11693		  return;
11694		}
11695
11696	      if (inst.instruction == T_MNEM_add && (Rd == Rs || Rd == Rn))
11697		{
11698		  /* Thumb-1 cores (except v6-M) require at least one high
11699		     register in a narrow non flag setting add.  */
11700		  if (Rd > 7 || Rn > 7
11701		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2)
11702		      || ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_msr))
11703		    {
11704		      if (Rd == Rn)
11705			{
11706			  Rn = Rs;
11707			  Rs = Rd;
11708			}
11709		      inst.instruction = T_OPCODE_ADD_HI;
11710		      inst.instruction |= (Rd & 8) << 4;
11711		      inst.instruction |= (Rd & 7);
11712		      inst.instruction |= Rn << 3;
11713		      return;
11714		    }
11715		}
11716	    }
11717
11718	  constraint (Rd == REG_PC, BAD_PC);
11719	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
11720	    constraint (Rd == REG_SP && Rs != REG_SP, BAD_SP);
11721	  constraint (Rs == REG_PC, BAD_PC);
11722	  reject_bad_reg (Rn);
11723
11724	  /* If we get here, it can't be done in 16 bits.  */
11725	  constraint (inst.operands[2].shifted && inst.operands[2].immisreg,
11726		      _("shift must be constant"));
11727	  inst.instruction = THUMB_OP32 (inst.instruction);
11728	  inst.instruction |= Rd << 8;
11729	  inst.instruction |= Rs << 16;
11730	  constraint (Rd == REG_SP && Rs == REG_SP && value > 3,
11731		      _("shift value over 3 not allowed in thumb mode"));
11732	  constraint (Rd == REG_SP && Rs == REG_SP && shift != SHIFT_LSL,
11733		      _("only LSL shift allowed in thumb mode"));
11734	  encode_thumb32_shifted_operand (2);
11735	}
11736    }
11737  else
11738    {
11739      constraint (inst.instruction == T_MNEM_adds
11740		  || inst.instruction == T_MNEM_subs,
11741		  BAD_THUMB32);
11742
11743      if (!inst.operands[2].isreg) /* Rd, Rs, #imm */
11744	{
11745	  constraint ((Rd > 7 && (Rd != REG_SP || Rs != REG_SP))
11746		      || (Rs > 7 && Rs != REG_SP && Rs != REG_PC),
11747		      BAD_HIREG);
11748
11749	  inst.instruction = (inst.instruction == T_MNEM_add
11750			      ? 0x0000 : 0x8000);
11751	  inst.instruction |= (Rd << 4) | Rs;
11752	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11753	  return;
11754	}
11755
11756      Rn = inst.operands[2].reg;
11757      constraint (inst.operands[2].shifted, _("unshifted register required"));
11758
11759      /* We now have Rd, Rs, and Rn set to registers.  */
11760      if (Rd > 7 || Rs > 7 || Rn > 7)
11761	{
11762	  /* Can't do this for SUB.	 */
11763	  constraint (inst.instruction == T_MNEM_sub, BAD_HIREG);
11764	  inst.instruction = T_OPCODE_ADD_HI;
11765	  inst.instruction |= (Rd & 8) << 4;
11766	  inst.instruction |= (Rd & 7);
11767	  if (Rs == Rd)
11768	    inst.instruction |= Rn << 3;
11769	  else if (Rn == Rd)
11770	    inst.instruction |= Rs << 3;
11771	  else
11772	    constraint (1, _("dest must overlap one source register"));
11773	}
11774      else
11775	{
11776	  inst.instruction = (inst.instruction == T_MNEM_add
11777			      ? T_OPCODE_ADD_R3 : T_OPCODE_SUB_R3);
11778	  inst.instruction |= Rd | (Rs << 3) | (Rn << 6);
11779	}
11780    }
11781}
11782
11783static void
11784do_t_adr (void)
11785{
11786  unsigned Rd;
11787
11788  Rd = inst.operands[0].reg;
11789  reject_bad_reg (Rd);
11790
11791  if (unified_syntax && inst.size_req == 0 && Rd <= 7)
11792    {
11793      /* Defer to section relaxation.  */
11794      inst.relax = inst.instruction;
11795      inst.instruction = THUMB_OP16 (inst.instruction);
11796      inst.instruction |= Rd << 4;
11797    }
11798  else if (unified_syntax && inst.size_req != 2)
11799    {
11800      /* Generate a 32-bit opcode.  */
11801      inst.instruction = THUMB_OP32 (inst.instruction);
11802      inst.instruction |= Rd << 8;
11803      inst.relocs[0].type = BFD_RELOC_ARM_T32_ADD_PC12;
11804      inst.relocs[0].pc_rel = 1;
11805    }
11806  else
11807    {
11808      /* Generate a 16-bit opcode.  */
11809      inst.instruction = THUMB_OP16 (inst.instruction);
11810      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_ADD;
11811      inst.relocs[0].exp.X_add_number -= 4; /* PC relative adjust.  */
11812      inst.relocs[0].pc_rel = 1;
11813      inst.instruction |= Rd << 4;
11814    }
11815
11816  if (inst.relocs[0].exp.X_op == O_symbol
11817      && inst.relocs[0].exp.X_add_symbol != NULL
11818      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
11819      && THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
11820    inst.relocs[0].exp.X_add_number += 1;
11821}
11822
11823/* Arithmetic instructions for which there is just one 16-bit
11824   instruction encoding, and it allows only two low registers.
11825   For maximal compatibility with ARM syntax, we allow three register
11826   operands even when Thumb-32 instructions are not available, as long
11827   as the first two are identical.  For instance, both "sbc r0,r1" and
11828   "sbc r0,r0,r1" are allowed.  */
11829static void
11830do_t_arit3 (void)
11831{
11832  int Rd, Rs, Rn;
11833
11834  Rd = inst.operands[0].reg;
11835  Rs = (inst.operands[1].present
11836	? inst.operands[1].reg    /* Rd, Rs, foo */
11837	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11838  Rn = inst.operands[2].reg;
11839
11840  reject_bad_reg (Rd);
11841  reject_bad_reg (Rs);
11842  if (inst.operands[2].isreg)
11843    reject_bad_reg (Rn);
11844
11845  if (unified_syntax)
11846    {
11847      if (!inst.operands[2].isreg)
11848	{
11849	  /* For an immediate, we always generate a 32-bit opcode;
11850	     section relaxation will shrink it later if possible.  */
11851	  inst.instruction = THUMB_OP32 (inst.instruction);
11852	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11853	  inst.instruction |= Rd << 8;
11854	  inst.instruction |= Rs << 16;
11855	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11856	}
11857      else
11858	{
11859	  bfd_boolean narrow;
11860
11861	  /* See if we can do this with a 16-bit instruction.  */
11862	  if (THUMB_SETS_FLAGS (inst.instruction))
11863	    narrow = !in_pred_block ();
11864	  else
11865	    narrow = in_pred_block ();
11866
11867	  if (Rd > 7 || Rn > 7 || Rs > 7)
11868	    narrow = FALSE;
11869	  if (inst.operands[2].shifted)
11870	    narrow = FALSE;
11871	  if (inst.size_req == 4)
11872	    narrow = FALSE;
11873
11874	  if (narrow
11875	      && Rd == Rs)
11876	    {
11877	      inst.instruction = THUMB_OP16 (inst.instruction);
11878	      inst.instruction |= Rd;
11879	      inst.instruction |= Rn << 3;
11880	      return;
11881	    }
11882
11883	  /* If we get here, it can't be done in 16 bits.  */
11884	  constraint (inst.operands[2].shifted
11885		      && inst.operands[2].immisreg,
11886		      _("shift must be constant"));
11887	  inst.instruction = THUMB_OP32 (inst.instruction);
11888	  inst.instruction |= Rd << 8;
11889	  inst.instruction |= Rs << 16;
11890	  encode_thumb32_shifted_operand (2);
11891	}
11892    }
11893  else
11894    {
11895      /* On its face this is a lie - the instruction does set the
11896	 flags.  However, the only supported mnemonic in this mode
11897	 says it doesn't.  */
11898      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11899
11900      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11901		  _("unshifted register required"));
11902      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
11903      constraint (Rd != Rs,
11904		  _("dest and source1 must be the same register"));
11905
11906      inst.instruction = THUMB_OP16 (inst.instruction);
11907      inst.instruction |= Rd;
11908      inst.instruction |= Rn << 3;
11909    }
11910}
11911
11912/* Similarly, but for instructions where the arithmetic operation is
11913   commutative, so we can allow either of them to be different from
11914   the destination operand in a 16-bit instruction.  For instance, all
11915   three of "adc r0,r1", "adc r0,r0,r1", and "adc r0,r1,r0" are
11916   accepted.  */
11917static void
11918do_t_arit3c (void)
11919{
11920  int Rd, Rs, Rn;
11921
11922  Rd = inst.operands[0].reg;
11923  Rs = (inst.operands[1].present
11924	? inst.operands[1].reg    /* Rd, Rs, foo */
11925	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
11926  Rn = inst.operands[2].reg;
11927
11928  reject_bad_reg (Rd);
11929  reject_bad_reg (Rs);
11930  if (inst.operands[2].isreg)
11931    reject_bad_reg (Rn);
11932
11933  if (unified_syntax)
11934    {
11935      if (!inst.operands[2].isreg)
11936	{
11937	  /* For an immediate, we always generate a 32-bit opcode;
11938	     section relaxation will shrink it later if possible.  */
11939	  inst.instruction = THUMB_OP32 (inst.instruction);
11940	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
11941	  inst.instruction |= Rd << 8;
11942	  inst.instruction |= Rs << 16;
11943	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
11944	}
11945      else
11946	{
11947	  bfd_boolean narrow;
11948
11949	  /* See if we can do this with a 16-bit instruction.  */
11950	  if (THUMB_SETS_FLAGS (inst.instruction))
11951	    narrow = !in_pred_block ();
11952	  else
11953	    narrow = in_pred_block ();
11954
11955	  if (Rd > 7 || Rn > 7 || Rs > 7)
11956	    narrow = FALSE;
11957	  if (inst.operands[2].shifted)
11958	    narrow = FALSE;
11959	  if (inst.size_req == 4)
11960	    narrow = FALSE;
11961
11962	  if (narrow)
11963	    {
11964	      if (Rd == Rs)
11965		{
11966		  inst.instruction = THUMB_OP16 (inst.instruction);
11967		  inst.instruction |= Rd;
11968		  inst.instruction |= Rn << 3;
11969		  return;
11970		}
11971	      if (Rd == Rn)
11972		{
11973		  inst.instruction = THUMB_OP16 (inst.instruction);
11974		  inst.instruction |= Rd;
11975		  inst.instruction |= Rs << 3;
11976		  return;
11977		}
11978	    }
11979
11980	  /* If we get here, it can't be done in 16 bits.  */
11981	  constraint (inst.operands[2].shifted
11982		      && inst.operands[2].immisreg,
11983		      _("shift must be constant"));
11984	  inst.instruction = THUMB_OP32 (inst.instruction);
11985	  inst.instruction |= Rd << 8;
11986	  inst.instruction |= Rs << 16;
11987	  encode_thumb32_shifted_operand (2);
11988	}
11989    }
11990  else
11991    {
11992      /* On its face this is a lie - the instruction does set the
11993	 flags.  However, the only supported mnemonic in this mode
11994	 says it doesn't.  */
11995      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
11996
11997      constraint (!inst.operands[2].isreg || inst.operands[2].shifted,
11998		  _("unshifted register required"));
11999      constraint (Rd > 7 || Rs > 7 || Rn > 7, BAD_HIREG);
12000
12001      inst.instruction = THUMB_OP16 (inst.instruction);
12002      inst.instruction |= Rd;
12003
12004      if (Rd == Rs)
12005	inst.instruction |= Rn << 3;
12006      else if (Rd == Rn)
12007	inst.instruction |= Rs << 3;
12008      else
12009	constraint (1, _("dest must overlap one source register"));
12010    }
12011}
12012
12013static void
12014do_t_bfc (void)
12015{
12016  unsigned Rd;
12017  unsigned int msb = inst.operands[1].imm + inst.operands[2].imm;
12018  constraint (msb > 32, _("bit-field extends past end of register"));
12019  /* The instruction encoding stores the LSB and MSB,
12020     not the LSB and width.  */
12021  Rd = inst.operands[0].reg;
12022  reject_bad_reg (Rd);
12023  inst.instruction |= Rd << 8;
12024  inst.instruction |= (inst.operands[1].imm & 0x1c) << 10;
12025  inst.instruction |= (inst.operands[1].imm & 0x03) << 6;
12026  inst.instruction |= msb - 1;
12027}
12028
12029static void
12030do_t_bfi (void)
12031{
12032  int Rd, Rn;
12033  unsigned int msb;
12034
12035  Rd = inst.operands[0].reg;
12036  reject_bad_reg (Rd);
12037
12038  /* #0 in second position is alternative syntax for bfc, which is
12039     the same instruction but with REG_PC in the Rm field.  */
12040  if (!inst.operands[1].isreg)
12041    Rn = REG_PC;
12042  else
12043    {
12044      Rn = inst.operands[1].reg;
12045      reject_bad_reg (Rn);
12046    }
12047
12048  msb = inst.operands[2].imm + inst.operands[3].imm;
12049  constraint (msb > 32, _("bit-field extends past end of register"));
12050  /* The instruction encoding stores the LSB and MSB,
12051     not the LSB and width.  */
12052  inst.instruction |= Rd << 8;
12053  inst.instruction |= Rn << 16;
12054  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12055  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12056  inst.instruction |= msb - 1;
12057}
12058
12059static void
12060do_t_bfx (void)
12061{
12062  unsigned Rd, Rn;
12063
12064  Rd = inst.operands[0].reg;
12065  Rn = inst.operands[1].reg;
12066
12067  reject_bad_reg (Rd);
12068  reject_bad_reg (Rn);
12069
12070  constraint (inst.operands[2].imm + inst.operands[3].imm > 32,
12071	      _("bit-field extends past end of register"));
12072  inst.instruction |= Rd << 8;
12073  inst.instruction |= Rn << 16;
12074  inst.instruction |= (inst.operands[2].imm & 0x1c) << 10;
12075  inst.instruction |= (inst.operands[2].imm & 0x03) << 6;
12076  inst.instruction |= inst.operands[3].imm - 1;
12077}
12078
12079/* ARM V5 Thumb BLX (argument parse)
12080	BLX <target_addr>	which is BLX(1)
12081	BLX <Rm>		which is BLX(2)
12082   Unfortunately, there are two different opcodes for this mnemonic.
12083   So, the insns[].value is not used, and the code here zaps values
12084	into inst.instruction.
12085
12086   ??? How to take advantage of the additional two bits of displacement
12087   available in Thumb32 mode?  Need new relocation?  */
12088
12089static void
12090do_t_blx (void)
12091{
12092  set_pred_insn_type_last ();
12093
12094  if (inst.operands[0].isreg)
12095    {
12096      constraint (inst.operands[0].reg == REG_PC, BAD_PC);
12097      /* We have a register, so this is BLX(2).  */
12098      inst.instruction |= inst.operands[0].reg << 3;
12099    }
12100  else
12101    {
12102      /* No register.  This must be BLX(1).  */
12103      inst.instruction = 0xf000e800;
12104      encode_branch (BFD_RELOC_THUMB_PCREL_BLX);
12105    }
12106}
12107
12108static void
12109do_t_branch (void)
12110{
12111  int opcode;
12112  int cond;
12113  bfd_reloc_code_real_type reloc;
12114
12115  cond = inst.cond;
12116  set_pred_insn_type (IF_INSIDE_IT_LAST_INSN);
12117
12118  if (in_pred_block ())
12119    {
12120      /* Conditional branches inside IT blocks are encoded as unconditional
12121	 branches.  */
12122      cond = COND_ALWAYS;
12123    }
12124  else
12125    cond = inst.cond;
12126
12127  if (cond != COND_ALWAYS)
12128    opcode = T_MNEM_bcond;
12129  else
12130    opcode = inst.instruction;
12131
12132  if (unified_syntax
12133      && (inst.size_req == 4
12134	  || (inst.size_req != 2
12135	      && (inst.operands[0].hasreloc
12136		  || inst.relocs[0].exp.X_op == O_constant))))
12137    {
12138      inst.instruction = THUMB_OP32(opcode);
12139      if (cond == COND_ALWAYS)
12140	reloc = BFD_RELOC_THUMB_PCREL_BRANCH25;
12141      else
12142	{
12143	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2),
12144		      _("selected architecture does not support "
12145			"wide conditional branch instruction"));
12146
12147	  gas_assert (cond != 0xF);
12148	  inst.instruction |= cond << 22;
12149	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH20;
12150	}
12151    }
12152  else
12153    {
12154      inst.instruction = THUMB_OP16(opcode);
12155      if (cond == COND_ALWAYS)
12156	reloc = BFD_RELOC_THUMB_PCREL_BRANCH12;
12157      else
12158	{
12159	  inst.instruction |= cond << 8;
12160	  reloc = BFD_RELOC_THUMB_PCREL_BRANCH9;
12161	}
12162      /* Allow section relaxation.  */
12163      if (unified_syntax && inst.size_req != 2)
12164	inst.relax = opcode;
12165    }
12166  inst.relocs[0].type = reloc;
12167  inst.relocs[0].pc_rel = 1;
12168}
12169
12170/* Actually do the work for Thumb state bkpt and hlt.  The only difference
12171   between the two is the maximum immediate allowed - which is passed in
12172   RANGE.  */
12173static void
12174do_t_bkpt_hlt1 (int range)
12175{
12176  constraint (inst.cond != COND_ALWAYS,
12177	      _("instruction is always unconditional"));
12178  if (inst.operands[0].present)
12179    {
12180      constraint (inst.operands[0].imm > range,
12181		  _("immediate value out of range"));
12182      inst.instruction |= inst.operands[0].imm;
12183    }
12184
12185  set_pred_insn_type (NEUTRAL_IT_INSN);
12186}
12187
12188static void
12189do_t_hlt (void)
12190{
12191  do_t_bkpt_hlt1 (63);
12192}
12193
12194static void
12195do_t_bkpt (void)
12196{
12197  do_t_bkpt_hlt1 (255);
12198}
12199
12200static void
12201do_t_branch23 (void)
12202{
12203  set_pred_insn_type_last ();
12204  encode_branch (BFD_RELOC_THUMB_PCREL_BRANCH23);
12205
12206  /* md_apply_fix blows up with 'bl foo(PLT)' where foo is defined in
12207     this file.  We used to simply ignore the PLT reloc type here --
12208     the branch encoding is now needed to deal with TLSCALL relocs.
12209     So if we see a PLT reloc now, put it back to how it used to be to
12210     keep the preexisting behaviour.  */
12211  if (inst.relocs[0].type == BFD_RELOC_ARM_PLT32)
12212    inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH23;
12213
12214#if defined(OBJ_COFF)
12215  /* If the destination of the branch is a defined symbol which does not have
12216     the THUMB_FUNC attribute, then we must be calling a function which has
12217     the (interfacearm) attribute.  We look for the Thumb entry point to that
12218     function and change the branch to refer to that function instead.	*/
12219  if (	 inst.relocs[0].exp.X_op == O_symbol
12220      && inst.relocs[0].exp.X_add_symbol != NULL
12221      && S_IS_DEFINED (inst.relocs[0].exp.X_add_symbol)
12222      && ! THUMB_IS_FUNC (inst.relocs[0].exp.X_add_symbol))
12223    inst.relocs[0].exp.X_add_symbol
12224      = find_real_start (inst.relocs[0].exp.X_add_symbol);
12225#endif
12226}
12227
12228static void
12229do_t_bx (void)
12230{
12231  set_pred_insn_type_last ();
12232  inst.instruction |= inst.operands[0].reg << 3;
12233  /* ??? FIXME: Should add a hacky reloc here if reg is REG_PC.	 The reloc
12234     should cause the alignment to be checked once it is known.	 This is
12235     because BX PC only works if the instruction is word aligned.  */
12236}
12237
12238static void
12239do_t_bxj (void)
12240{
12241  int Rm;
12242
12243  set_pred_insn_type_last ();
12244  Rm = inst.operands[0].reg;
12245  reject_bad_reg (Rm);
12246  inst.instruction |= Rm << 16;
12247}
12248
12249static void
12250do_t_clz (void)
12251{
12252  unsigned Rd;
12253  unsigned Rm;
12254
12255  Rd = inst.operands[0].reg;
12256  Rm = inst.operands[1].reg;
12257
12258  reject_bad_reg (Rd);
12259  reject_bad_reg (Rm);
12260
12261  inst.instruction |= Rd << 8;
12262  inst.instruction |= Rm << 16;
12263  inst.instruction |= Rm;
12264}
12265
12266/* For the Armv8.1-M conditional instructions.  */
12267static void
12268do_t_cond (void)
12269{
12270  unsigned Rd, Rn, Rm;
12271  signed int cond;
12272
12273  constraint (inst.cond != COND_ALWAYS, BAD_COND);
12274
12275  Rd = inst.operands[0].reg;
12276  switch (inst.instruction)
12277    {
12278      case T_MNEM_csinc:
12279      case T_MNEM_csinv:
12280      case T_MNEM_csneg:
12281      case T_MNEM_csel:
12282	Rn = inst.operands[1].reg;
12283	Rm = inst.operands[2].reg;
12284	cond = inst.operands[3].imm;
12285	constraint (Rn == REG_SP, BAD_SP);
12286	constraint (Rm == REG_SP, BAD_SP);
12287	break;
12288
12289      case T_MNEM_cinc:
12290      case T_MNEM_cinv:
12291      case T_MNEM_cneg:
12292	Rn = inst.operands[1].reg;
12293	cond = inst.operands[2].imm;
12294	/* Invert the last bit to invert the cond.  */
12295	cond = TOGGLE_BIT (cond, 0);
12296	constraint (Rn == REG_SP, BAD_SP);
12297	Rm = Rn;
12298	break;
12299
12300      case T_MNEM_csetm:
12301      case T_MNEM_cset:
12302	cond = inst.operands[1].imm;
12303	/* Invert the last bit to invert the cond.  */
12304	cond = TOGGLE_BIT (cond, 0);
12305	Rn = REG_PC;
12306	Rm = REG_PC;
12307	break;
12308
12309      default: abort ();
12310    }
12311
12312  set_pred_insn_type (OUTSIDE_PRED_INSN);
12313  inst.instruction = THUMB_OP32 (inst.instruction);
12314  inst.instruction |= Rd << 8;
12315  inst.instruction |= Rn << 16;
12316  inst.instruction |= Rm;
12317  inst.instruction |= cond << 4;
12318}
12319
12320static void
12321do_t_csdb (void)
12322{
12323  set_pred_insn_type (OUTSIDE_PRED_INSN);
12324}
12325
12326static void
12327do_t_cps (void)
12328{
12329  set_pred_insn_type (OUTSIDE_PRED_INSN);
12330  inst.instruction |= inst.operands[0].imm;
12331}
12332
12333static void
12334do_t_cpsi (void)
12335{
12336  set_pred_insn_type (OUTSIDE_PRED_INSN);
12337  if (unified_syntax
12338      && (inst.operands[1].present || inst.size_req == 4)
12339      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6_notm))
12340    {
12341      unsigned int imod = (inst.instruction & 0x0030) >> 4;
12342      inst.instruction = 0xf3af8000;
12343      inst.instruction |= imod << 9;
12344      inst.instruction |= inst.operands[0].imm << 5;
12345      if (inst.operands[1].present)
12346	inst.instruction |= 0x100 | inst.operands[1].imm;
12347    }
12348  else
12349    {
12350      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1)
12351		  && (inst.operands[0].imm & 4),
12352		  _("selected processor does not support 'A' form "
12353		    "of this instruction"));
12354      constraint (inst.operands[1].present || inst.size_req == 4,
12355		  _("Thumb does not support the 2-argument "
12356		    "form of this instruction"));
12357      inst.instruction |= inst.operands[0].imm;
12358    }
12359}
12360
12361/* THUMB CPY instruction (argument parse).  */
12362
12363static void
12364do_t_cpy (void)
12365{
12366  if (inst.size_req == 4)
12367    {
12368      inst.instruction = THUMB_OP32 (T_MNEM_mov);
12369      inst.instruction |= inst.operands[0].reg << 8;
12370      inst.instruction |= inst.operands[1].reg;
12371    }
12372  else
12373    {
12374      inst.instruction |= (inst.operands[0].reg & 0x8) << 4;
12375      inst.instruction |= (inst.operands[0].reg & 0x7);
12376      inst.instruction |= inst.operands[1].reg << 3;
12377    }
12378}
12379
12380static void
12381do_t_cbz (void)
12382{
12383  set_pred_insn_type (OUTSIDE_PRED_INSN);
12384  constraint (inst.operands[0].reg > 7, BAD_HIREG);
12385  inst.instruction |= inst.operands[0].reg;
12386  inst.relocs[0].pc_rel = 1;
12387  inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH7;
12388}
12389
12390static void
12391do_t_dbg (void)
12392{
12393  inst.instruction |= inst.operands[0].imm;
12394}
12395
12396static void
12397do_t_div (void)
12398{
12399  unsigned Rd, Rn, Rm;
12400
12401  Rd = inst.operands[0].reg;
12402  Rn = (inst.operands[1].present
12403	? inst.operands[1].reg : Rd);
12404  Rm = inst.operands[2].reg;
12405
12406  reject_bad_reg (Rd);
12407  reject_bad_reg (Rn);
12408  reject_bad_reg (Rm);
12409
12410  inst.instruction |= Rd << 8;
12411  inst.instruction |= Rn << 16;
12412  inst.instruction |= Rm;
12413}
12414
12415static void
12416do_t_hint (void)
12417{
12418  if (unified_syntax && inst.size_req == 4)
12419    inst.instruction = THUMB_OP32 (inst.instruction);
12420  else
12421    inst.instruction = THUMB_OP16 (inst.instruction);
12422}
12423
12424static void
12425do_t_it (void)
12426{
12427  unsigned int cond = inst.operands[0].imm;
12428
12429  set_pred_insn_type (IT_INSN);
12430  now_pred.mask = (inst.instruction & 0xf) | 0x10;
12431  now_pred.cc = cond;
12432  now_pred.warn_deprecated = FALSE;
12433  now_pred.type = SCALAR_PRED;
12434
12435  /* If the condition is a negative condition, invert the mask.  */
12436  if ((cond & 0x1) == 0x0)
12437    {
12438      unsigned int mask = inst.instruction & 0x000f;
12439
12440      if ((mask & 0x7) == 0)
12441	{
12442	  /* No conversion needed.  */
12443	  now_pred.block_length = 1;
12444	}
12445      else if ((mask & 0x3) == 0)
12446	{
12447	  mask ^= 0x8;
12448	  now_pred.block_length = 2;
12449	}
12450      else if ((mask & 0x1) == 0)
12451	{
12452	  mask ^= 0xC;
12453	  now_pred.block_length = 3;
12454	}
12455      else
12456	{
12457	  mask ^= 0xE;
12458	  now_pred.block_length = 4;
12459	}
12460
12461      inst.instruction &= 0xfff0;
12462      inst.instruction |= mask;
12463    }
12464
12465  inst.instruction |= cond << 4;
12466}
12467
12468/* Helper function used for both push/pop and ldm/stm.  */
12469static void
12470encode_thumb2_multi (bfd_boolean do_io, int base, unsigned mask,
12471		     bfd_boolean writeback)
12472{
12473  bfd_boolean load, store;
12474
12475  gas_assert (base != -1 || !do_io);
12476  load = do_io && ((inst.instruction & (1 << 20)) != 0);
12477  store = do_io && !load;
12478
12479  if (mask & (1 << 13))
12480    inst.error =  _("SP not allowed in register list");
12481
12482  if (do_io && (mask & (1 << base)) != 0
12483      && writeback)
12484    inst.error = _("having the base register in the register list when "
12485		   "using write back is UNPREDICTABLE");
12486
12487  if (load)
12488    {
12489      if (mask & (1 << 15))
12490	{
12491	  if (mask & (1 << 14))
12492	    inst.error = _("LR and PC should not both be in register list");
12493	  else
12494	    set_pred_insn_type_last ();
12495	}
12496    }
12497  else if (store)
12498    {
12499      if (mask & (1 << 15))
12500	inst.error = _("PC not allowed in register list");
12501    }
12502
12503  if (do_io && ((mask & (mask - 1)) == 0))
12504    {
12505      /* Single register transfers implemented as str/ldr.  */
12506      if (writeback)
12507	{
12508	  if (inst.instruction & (1 << 23))
12509	    inst.instruction = 0x00000b04; /* ia! -> [base], #4 */
12510	  else
12511	    inst.instruction = 0x00000d04; /* db! -> [base, #-4]! */
12512	}
12513      else
12514	{
12515	  if (inst.instruction & (1 << 23))
12516	    inst.instruction = 0x00800000; /* ia -> [base] */
12517	  else
12518	    inst.instruction = 0x00000c04; /* db -> [base, #-4] */
12519	}
12520
12521      inst.instruction |= 0xf8400000;
12522      if (load)
12523	inst.instruction |= 0x00100000;
12524
12525      mask = ffs (mask) - 1;
12526      mask <<= 12;
12527    }
12528  else if (writeback)
12529    inst.instruction |= WRITE_BACK;
12530
12531  inst.instruction |= mask;
12532  if (do_io)
12533    inst.instruction |= base << 16;
12534}
12535
12536static void
12537do_t_ldmstm (void)
12538{
12539  /* This really doesn't seem worth it.  */
12540  constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
12541	      _("expression too complex"));
12542  constraint (inst.operands[1].writeback,
12543	      _("Thumb load/store multiple does not support {reglist}^"));
12544
12545  if (unified_syntax)
12546    {
12547      bfd_boolean narrow;
12548      unsigned mask;
12549
12550      narrow = FALSE;
12551      /* See if we can use a 16-bit instruction.  */
12552      if (inst.instruction < 0xffff /* not ldmdb/stmdb */
12553	  && inst.size_req != 4
12554	  && !(inst.operands[1].imm & ~0xff))
12555	{
12556	  mask = 1 << inst.operands[0].reg;
12557
12558	  if (inst.operands[0].reg <= 7)
12559	    {
12560	      if (inst.instruction == T_MNEM_stmia
12561		  ? inst.operands[0].writeback
12562		  : (inst.operands[0].writeback
12563		     == !(inst.operands[1].imm & mask)))
12564		{
12565		  if (inst.instruction == T_MNEM_stmia
12566		      && (inst.operands[1].imm & mask)
12567		      && (inst.operands[1].imm & (mask - 1)))
12568		    as_warn (_("value stored for r%d is UNKNOWN"),
12569			     inst.operands[0].reg);
12570
12571		  inst.instruction = THUMB_OP16 (inst.instruction);
12572		  inst.instruction |= inst.operands[0].reg << 8;
12573		  inst.instruction |= inst.operands[1].imm;
12574		  narrow = TRUE;
12575		}
12576	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12577		{
12578		  /* This means 1 register in reg list one of 3 situations:
12579		     1. Instruction is stmia, but without writeback.
12580		     2. lmdia without writeback, but with Rn not in
12581			reglist.
12582		     3. ldmia with writeback, but with Rn in reglist.
12583		     Case 3 is UNPREDICTABLE behaviour, so we handle
12584		     case 1 and 2 which can be converted into a 16-bit
12585		     str or ldr. The SP cases are handled below.  */
12586		  unsigned long opcode;
12587		  /* First, record an error for Case 3.  */
12588		  if (inst.operands[1].imm & mask
12589		      && inst.operands[0].writeback)
12590		    inst.error =
12591			_("having the base register in the register list when "
12592			  "using write back is UNPREDICTABLE");
12593
12594		  opcode = (inst.instruction == T_MNEM_stmia ? T_MNEM_str
12595							     : T_MNEM_ldr);
12596		  inst.instruction = THUMB_OP16 (opcode);
12597		  inst.instruction |= inst.operands[0].reg << 3;
12598		  inst.instruction |= (ffs (inst.operands[1].imm)-1);
12599		  narrow = TRUE;
12600		}
12601	    }
12602	  else if (inst.operands[0] .reg == REG_SP)
12603	    {
12604	      if (inst.operands[0].writeback)
12605		{
12606		  inst.instruction =
12607			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12608				    ? T_MNEM_push : T_MNEM_pop);
12609		  inst.instruction |= inst.operands[1].imm;
12610		  narrow = TRUE;
12611		}
12612	      else if ((inst.operands[1].imm & (inst.operands[1].imm-1)) == 0)
12613		{
12614		  inst.instruction =
12615			THUMB_OP16 (inst.instruction == T_MNEM_stmia
12616				    ? T_MNEM_str_sp : T_MNEM_ldr_sp);
12617		  inst.instruction |= ((ffs (inst.operands[1].imm)-1) << 8);
12618		  narrow = TRUE;
12619		}
12620	    }
12621	}
12622
12623      if (!narrow)
12624	{
12625	  if (inst.instruction < 0xffff)
12626	    inst.instruction = THUMB_OP32 (inst.instruction);
12627
12628	  encode_thumb2_multi (TRUE /* do_io */, inst.operands[0].reg,
12629			       inst.operands[1].imm,
12630			       inst.operands[0].writeback);
12631	}
12632    }
12633  else
12634    {
12635      constraint (inst.operands[0].reg > 7
12636		  || (inst.operands[1].imm & ~0xff), BAD_HIREG);
12637      constraint (inst.instruction != T_MNEM_ldmia
12638		  && inst.instruction != T_MNEM_stmia,
12639		  _("Thumb-2 instruction only valid in unified syntax"));
12640      if (inst.instruction == T_MNEM_stmia)
12641	{
12642	  if (!inst.operands[0].writeback)
12643	    as_warn (_("this instruction will write back the base register"));
12644	  if ((inst.operands[1].imm & (1 << inst.operands[0].reg))
12645	      && (inst.operands[1].imm & ((1 << inst.operands[0].reg) - 1)))
12646	    as_warn (_("value stored for r%d is UNKNOWN"),
12647		     inst.operands[0].reg);
12648	}
12649      else
12650	{
12651	  if (!inst.operands[0].writeback
12652	      && !(inst.operands[1].imm & (1 << inst.operands[0].reg)))
12653	    as_warn (_("this instruction will write back the base register"));
12654	  else if (inst.operands[0].writeback
12655		   && (inst.operands[1].imm & (1 << inst.operands[0].reg)))
12656	    as_warn (_("this instruction will not write back the base register"));
12657	}
12658
12659      inst.instruction = THUMB_OP16 (inst.instruction);
12660      inst.instruction |= inst.operands[0].reg << 8;
12661      inst.instruction |= inst.operands[1].imm;
12662    }
12663}
12664
12665static void
12666do_t_ldrex (void)
12667{
12668  constraint (!inst.operands[1].isreg || !inst.operands[1].preind
12669	      || inst.operands[1].postind || inst.operands[1].writeback
12670	      || inst.operands[1].immisreg || inst.operands[1].shifted
12671	      || inst.operands[1].negative,
12672	      BAD_ADDR_MODE);
12673
12674  constraint ((inst.operands[1].reg == REG_PC), BAD_PC);
12675
12676  inst.instruction |= inst.operands[0].reg << 12;
12677  inst.instruction |= inst.operands[1].reg << 16;
12678  inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
12679}
12680
12681static void
12682do_t_ldrexd (void)
12683{
12684  if (!inst.operands[1].present)
12685    {
12686      constraint (inst.operands[0].reg == REG_LR,
12687		  _("r14 not allowed as first register "
12688		    "when second register is omitted"));
12689      inst.operands[1].reg = inst.operands[0].reg + 1;
12690    }
12691  constraint (inst.operands[0].reg == inst.operands[1].reg,
12692	      BAD_OVERLAP);
12693
12694  inst.instruction |= inst.operands[0].reg << 12;
12695  inst.instruction |= inst.operands[1].reg << 8;
12696  inst.instruction |= inst.operands[2].reg << 16;
12697}
12698
12699static void
12700do_t_ldst (void)
12701{
12702  unsigned long opcode;
12703  int Rn;
12704
12705  if (inst.operands[0].isreg
12706      && !inst.operands[0].preind
12707      && inst.operands[0].reg == REG_PC)
12708    set_pred_insn_type_last ();
12709
12710  opcode = inst.instruction;
12711  if (unified_syntax)
12712    {
12713      if (!inst.operands[1].isreg)
12714	{
12715	  if (opcode <= 0xffff)
12716	    inst.instruction = THUMB_OP32 (opcode);
12717	  if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12718	    return;
12719	}
12720      if (inst.operands[1].isreg
12721	  && !inst.operands[1].writeback
12722	  && !inst.operands[1].shifted && !inst.operands[1].postind
12723	  && !inst.operands[1].negative && inst.operands[0].reg <= 7
12724	  && opcode <= 0xffff
12725	  && inst.size_req != 4)
12726	{
12727	  /* Insn may have a 16-bit form.  */
12728	  Rn = inst.operands[1].reg;
12729	  if (inst.operands[1].immisreg)
12730	    {
12731	      inst.instruction = THUMB_OP16 (opcode);
12732	      /* [Rn, Rik] */
12733	      if (Rn <= 7 && inst.operands[1].imm <= 7)
12734		goto op16;
12735	      else if (opcode != T_MNEM_ldr && opcode != T_MNEM_str)
12736		reject_bad_reg (inst.operands[1].imm);
12737	    }
12738	  else if ((Rn <= 7 && opcode != T_MNEM_ldrsh
12739		    && opcode != T_MNEM_ldrsb)
12740		   || ((Rn == REG_PC || Rn == REG_SP) && opcode == T_MNEM_ldr)
12741		   || (Rn == REG_SP && opcode == T_MNEM_str))
12742	    {
12743	      /* [Rn, #const] */
12744	      if (Rn > 7)
12745		{
12746		  if (Rn == REG_PC)
12747		    {
12748		      if (inst.relocs[0].pc_rel)
12749			opcode = T_MNEM_ldr_pc2;
12750		      else
12751			opcode = T_MNEM_ldr_pc;
12752		    }
12753		  else
12754		    {
12755		      if (opcode == T_MNEM_ldr)
12756			opcode = T_MNEM_ldr_sp;
12757		      else
12758			opcode = T_MNEM_str_sp;
12759		    }
12760		  inst.instruction = inst.operands[0].reg << 8;
12761		}
12762	      else
12763		{
12764		  inst.instruction = inst.operands[0].reg;
12765		  inst.instruction |= inst.operands[1].reg << 3;
12766		}
12767	      inst.instruction |= THUMB_OP16 (opcode);
12768	      if (inst.size_req == 2)
12769		inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12770	      else
12771		inst.relax = opcode;
12772	      return;
12773	    }
12774	}
12775      /* Definitely a 32-bit variant.  */
12776
12777      /* Warning for Erratum 752419.  */
12778      if (opcode == T_MNEM_ldr
12779	  && inst.operands[0].reg == REG_SP
12780	  && inst.operands[1].writeback == 1
12781	  && !inst.operands[1].immisreg)
12782	{
12783	  if (no_cpu_selected ()
12784	      || (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7)
12785		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a)
12786		  && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7r)))
12787	    as_warn (_("This instruction may be unpredictable "
12788		       "if executed on M-profile cores "
12789		       "with interrupts enabled."));
12790	}
12791
12792      /* Do some validations regarding addressing modes.  */
12793      if (inst.operands[1].immisreg)
12794	reject_bad_reg (inst.operands[1].imm);
12795
12796      constraint (inst.operands[1].writeback == 1
12797		  && inst.operands[0].reg == inst.operands[1].reg,
12798		  BAD_OVERLAP);
12799
12800      inst.instruction = THUMB_OP32 (opcode);
12801      inst.instruction |= inst.operands[0].reg << 12;
12802      encode_thumb32_addr_mode (1, /*is_t=*/FALSE, /*is_d=*/FALSE);
12803      check_ldr_r15_aligned ();
12804      return;
12805    }
12806
12807  constraint (inst.operands[0].reg > 7, BAD_HIREG);
12808
12809  if (inst.instruction == T_MNEM_ldrsh || inst.instruction == T_MNEM_ldrsb)
12810    {
12811      /* Only [Rn,Rm] is acceptable.  */
12812      constraint (inst.operands[1].reg > 7 || inst.operands[1].imm > 7, BAD_HIREG);
12813      constraint (!inst.operands[1].isreg || !inst.operands[1].immisreg
12814		  || inst.operands[1].postind || inst.operands[1].shifted
12815		  || inst.operands[1].negative,
12816		  _("Thumb does not support this addressing mode"));
12817      inst.instruction = THUMB_OP16 (inst.instruction);
12818      goto op16;
12819    }
12820
12821  inst.instruction = THUMB_OP16 (inst.instruction);
12822  if (!inst.operands[1].isreg)
12823    if (move_or_literal_pool (0, CONST_THUMB, /*mode_3=*/FALSE))
12824      return;
12825
12826  constraint (!inst.operands[1].preind
12827	      || inst.operands[1].shifted
12828	      || inst.operands[1].writeback,
12829	      _("Thumb does not support this addressing mode"));
12830  if (inst.operands[1].reg == REG_PC || inst.operands[1].reg == REG_SP)
12831    {
12832      constraint (inst.instruction & 0x0600,
12833		  _("byte or halfword not valid for base register"));
12834      constraint (inst.operands[1].reg == REG_PC
12835		  && !(inst.instruction & THUMB_LOAD_BIT),
12836		  _("r15 based store not allowed"));
12837      constraint (inst.operands[1].immisreg,
12838		  _("invalid base register for register offset"));
12839
12840      if (inst.operands[1].reg == REG_PC)
12841	inst.instruction = T_OPCODE_LDR_PC;
12842      else if (inst.instruction & THUMB_LOAD_BIT)
12843	inst.instruction = T_OPCODE_LDR_SP;
12844      else
12845	inst.instruction = T_OPCODE_STR_SP;
12846
12847      inst.instruction |= inst.operands[0].reg << 8;
12848      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12849      return;
12850    }
12851
12852  constraint (inst.operands[1].reg > 7, BAD_HIREG);
12853  if (!inst.operands[1].immisreg)
12854    {
12855      /* Immediate offset.  */
12856      inst.instruction |= inst.operands[0].reg;
12857      inst.instruction |= inst.operands[1].reg << 3;
12858      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_OFFSET;
12859      return;
12860    }
12861
12862  /* Register offset.  */
12863  constraint (inst.operands[1].imm > 7, BAD_HIREG);
12864  constraint (inst.operands[1].negative,
12865	      _("Thumb does not support this addressing mode"));
12866
12867 op16:
12868  switch (inst.instruction)
12869    {
12870    case T_OPCODE_STR_IW: inst.instruction = T_OPCODE_STR_RW; break;
12871    case T_OPCODE_STR_IH: inst.instruction = T_OPCODE_STR_RH; break;
12872    case T_OPCODE_STR_IB: inst.instruction = T_OPCODE_STR_RB; break;
12873    case T_OPCODE_LDR_IW: inst.instruction = T_OPCODE_LDR_RW; break;
12874    case T_OPCODE_LDR_IH: inst.instruction = T_OPCODE_LDR_RH; break;
12875    case T_OPCODE_LDR_IB: inst.instruction = T_OPCODE_LDR_RB; break;
12876    case 0x5600 /* ldrsb */:
12877    case 0x5e00 /* ldrsh */: break;
12878    default: abort ();
12879    }
12880
12881  inst.instruction |= inst.operands[0].reg;
12882  inst.instruction |= inst.operands[1].reg << 3;
12883  inst.instruction |= inst.operands[1].imm << 6;
12884}
12885
12886static void
12887do_t_ldstd (void)
12888{
12889  if (!inst.operands[1].present)
12890    {
12891      inst.operands[1].reg = inst.operands[0].reg + 1;
12892      constraint (inst.operands[0].reg == REG_LR,
12893		  _("r14 not allowed here"));
12894      constraint (inst.operands[0].reg == REG_R12,
12895		  _("r12 not allowed here"));
12896    }
12897
12898  if (inst.operands[2].writeback
12899      && (inst.operands[0].reg == inst.operands[2].reg
12900      || inst.operands[1].reg == inst.operands[2].reg))
12901    as_warn (_("base register written back, and overlaps "
12902	       "one of transfer registers"));
12903
12904  inst.instruction |= inst.operands[0].reg << 12;
12905  inst.instruction |= inst.operands[1].reg << 8;
12906  encode_thumb32_addr_mode (2, /*is_t=*/FALSE, /*is_d=*/TRUE);
12907}
12908
12909static void
12910do_t_ldstt (void)
12911{
12912  inst.instruction |= inst.operands[0].reg << 12;
12913  encode_thumb32_addr_mode (1, /*is_t=*/TRUE, /*is_d=*/FALSE);
12914}
12915
12916static void
12917do_t_mla (void)
12918{
12919  unsigned Rd, Rn, Rm, Ra;
12920
12921  Rd = inst.operands[0].reg;
12922  Rn = inst.operands[1].reg;
12923  Rm = inst.operands[2].reg;
12924  Ra = inst.operands[3].reg;
12925
12926  reject_bad_reg (Rd);
12927  reject_bad_reg (Rn);
12928  reject_bad_reg (Rm);
12929  reject_bad_reg (Ra);
12930
12931  inst.instruction |= Rd << 8;
12932  inst.instruction |= Rn << 16;
12933  inst.instruction |= Rm;
12934  inst.instruction |= Ra << 12;
12935}
12936
12937static void
12938do_t_mlal (void)
12939{
12940  unsigned RdLo, RdHi, Rn, Rm;
12941
12942  RdLo = inst.operands[0].reg;
12943  RdHi = inst.operands[1].reg;
12944  Rn = inst.operands[2].reg;
12945  Rm = inst.operands[3].reg;
12946
12947  reject_bad_reg (RdLo);
12948  reject_bad_reg (RdHi);
12949  reject_bad_reg (Rn);
12950  reject_bad_reg (Rm);
12951
12952  inst.instruction |= RdLo << 12;
12953  inst.instruction |= RdHi << 8;
12954  inst.instruction |= Rn << 16;
12955  inst.instruction |= Rm;
12956}
12957
12958static void
12959do_t_mov_cmp (void)
12960{
12961  unsigned Rn, Rm;
12962
12963  Rn = inst.operands[0].reg;
12964  Rm = inst.operands[1].reg;
12965
12966  if (Rn == REG_PC)
12967    set_pred_insn_type_last ();
12968
12969  if (unified_syntax)
12970    {
12971      int r0off = (inst.instruction == T_MNEM_mov
12972		   || inst.instruction == T_MNEM_movs) ? 8 : 16;
12973      unsigned long opcode;
12974      bfd_boolean narrow;
12975      bfd_boolean low_regs;
12976
12977      low_regs = (Rn <= 7 && Rm <= 7);
12978      opcode = inst.instruction;
12979      if (in_pred_block ())
12980	narrow = opcode != T_MNEM_movs;
12981      else
12982	narrow = opcode != T_MNEM_movs || low_regs;
12983      if (inst.size_req == 4
12984	  || inst.operands[1].shifted)
12985	narrow = FALSE;
12986
12987      /* MOVS PC, LR is encoded as SUBS PC, LR, #0.  */
12988      if (opcode == T_MNEM_movs && inst.operands[1].isreg
12989	  && !inst.operands[1].shifted
12990	  && Rn == REG_PC
12991	  && Rm == REG_LR)
12992	{
12993	  inst.instruction = T2_SUBS_PC_LR;
12994	  return;
12995	}
12996
12997      if (opcode == T_MNEM_cmp)
12998	{
12999	  constraint (Rn == REG_PC, BAD_PC);
13000	  if (narrow)
13001	    {
13002	      /* In the Thumb-2 ISA, use of R13 as Rm is deprecated,
13003		 but valid.  */
13004	      warn_deprecated_sp (Rm);
13005	      /* R15 was documented as a valid choice for Rm in ARMv6,
13006		 but as UNPREDICTABLE in ARMv7.  ARM's proprietary
13007		 tools reject R15, so we do too.  */
13008	      constraint (Rm == REG_PC, BAD_PC);
13009	    }
13010	  else
13011	    reject_bad_reg (Rm);
13012	}
13013      else if (opcode == T_MNEM_mov
13014	       || opcode == T_MNEM_movs)
13015	{
13016	  if (inst.operands[1].isreg)
13017	    {
13018	      if (opcode == T_MNEM_movs)
13019		{
13020		  reject_bad_reg (Rn);
13021		  reject_bad_reg (Rm);
13022		}
13023	      else if (narrow)
13024		{
13025		  /* This is mov.n.  */
13026		  if ((Rn == REG_SP || Rn == REG_PC)
13027		      && (Rm == REG_SP || Rm == REG_PC))
13028		    {
13029		      as_tsktsk (_("Use of r%u as a source register is "
13030				 "deprecated when r%u is the destination "
13031				 "register."), Rm, Rn);
13032		    }
13033		}
13034	      else
13035		{
13036		  /* This is mov.w.  */
13037		  constraint (Rn == REG_PC, BAD_PC);
13038		  constraint (Rm == REG_PC, BAD_PC);
13039		  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13040		    constraint (Rn == REG_SP && Rm == REG_SP, BAD_SP);
13041		}
13042	    }
13043	  else
13044	    reject_bad_reg (Rn);
13045	}
13046
13047      if (!inst.operands[1].isreg)
13048	{
13049	  /* Immediate operand.  */
13050	  if (!in_pred_block () && opcode == T_MNEM_mov)
13051	    narrow = 0;
13052	  if (low_regs && narrow)
13053	    {
13054	      inst.instruction = THUMB_OP16 (opcode);
13055	      inst.instruction |= Rn << 8;
13056	      if (inst.relocs[0].type < BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
13057		  || inst.relocs[0].type > BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
13058		{
13059		  if (inst.size_req == 2)
13060		    inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13061		  else
13062		    inst.relax = opcode;
13063		}
13064	    }
13065	  else
13066	    {
13067	      constraint ((inst.relocs[0].type
13068			   >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC)
13069			  && (inst.relocs[0].type
13070			      <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC) ,
13071			  THUMB1_RELOC_ONLY);
13072
13073	      inst.instruction = THUMB_OP32 (inst.instruction);
13074	      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13075	      inst.instruction |= Rn << r0off;
13076	      inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13077	    }
13078	}
13079      else if (inst.operands[1].shifted && inst.operands[1].immisreg
13080	       && (inst.instruction == T_MNEM_mov
13081		   || inst.instruction == T_MNEM_movs))
13082	{
13083	  /* Register shifts are encoded as separate shift instructions.  */
13084	  bfd_boolean flags = (inst.instruction == T_MNEM_movs);
13085
13086	  if (in_pred_block ())
13087	    narrow = !flags;
13088	  else
13089	    narrow = flags;
13090
13091	  if (inst.size_req == 4)
13092	    narrow = FALSE;
13093
13094	  if (!low_regs || inst.operands[1].imm > 7)
13095	    narrow = FALSE;
13096
13097	  if (Rn != Rm)
13098	    narrow = FALSE;
13099
13100	  switch (inst.operands[1].shift_kind)
13101	    {
13102	    case SHIFT_LSL:
13103	      opcode = narrow ? T_OPCODE_LSL_R : THUMB_OP32 (T_MNEM_lsl);
13104	      break;
13105	    case SHIFT_ASR:
13106	      opcode = narrow ? T_OPCODE_ASR_R : THUMB_OP32 (T_MNEM_asr);
13107	      break;
13108	    case SHIFT_LSR:
13109	      opcode = narrow ? T_OPCODE_LSR_R : THUMB_OP32 (T_MNEM_lsr);
13110	      break;
13111	    case SHIFT_ROR:
13112	      opcode = narrow ? T_OPCODE_ROR_R : THUMB_OP32 (T_MNEM_ror);
13113	      break;
13114	    default:
13115	      abort ();
13116	    }
13117
13118	  inst.instruction = opcode;
13119	  if (narrow)
13120	    {
13121	      inst.instruction |= Rn;
13122	      inst.instruction |= inst.operands[1].imm << 3;
13123	    }
13124	  else
13125	    {
13126	      if (flags)
13127		inst.instruction |= CONDS_BIT;
13128
13129	      inst.instruction |= Rn << 8;
13130	      inst.instruction |= Rm << 16;
13131	      inst.instruction |= inst.operands[1].imm;
13132	    }
13133	}
13134      else if (!narrow)
13135	{
13136	  /* Some mov with immediate shift have narrow variants.
13137	     Register shifts are handled above.  */
13138	  if (low_regs && inst.operands[1].shifted
13139	      && (inst.instruction == T_MNEM_mov
13140		  || inst.instruction == T_MNEM_movs))
13141	    {
13142	      if (in_pred_block ())
13143		narrow = (inst.instruction == T_MNEM_mov);
13144	      else
13145		narrow = (inst.instruction == T_MNEM_movs);
13146	    }
13147
13148	  if (narrow)
13149	    {
13150	      switch (inst.operands[1].shift_kind)
13151		{
13152		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13153		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13154		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13155		default: narrow = FALSE; break;
13156		}
13157	    }
13158
13159	  if (narrow)
13160	    {
13161	      inst.instruction |= Rn;
13162	      inst.instruction |= Rm << 3;
13163	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
13164	    }
13165	  else
13166	    {
13167	      inst.instruction = THUMB_OP32 (inst.instruction);
13168	      inst.instruction |= Rn << r0off;
13169	      encode_thumb32_shifted_operand (1);
13170	    }
13171	}
13172      else
13173	switch (inst.instruction)
13174	  {
13175	  case T_MNEM_mov:
13176	    /* In v4t or v5t a move of two lowregs produces unpredictable
13177	       results. Don't allow this.  */
13178	    if (low_regs)
13179	      {
13180		constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6),
13181			    "MOV Rd, Rs with two low registers is not "
13182			    "permitted on this architecture");
13183		ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
13184					arm_ext_v6);
13185	      }
13186
13187	    inst.instruction = T_OPCODE_MOV_HR;
13188	    inst.instruction |= (Rn & 0x8) << 4;
13189	    inst.instruction |= (Rn & 0x7);
13190	    inst.instruction |= Rm << 3;
13191	    break;
13192
13193	  case T_MNEM_movs:
13194	    /* We know we have low registers at this point.
13195	       Generate LSLS Rd, Rs, #0.  */
13196	    inst.instruction = T_OPCODE_LSL_I;
13197	    inst.instruction |= Rn;
13198	    inst.instruction |= Rm << 3;
13199	    break;
13200
13201	  case T_MNEM_cmp:
13202	    if (low_regs)
13203	      {
13204		inst.instruction = T_OPCODE_CMP_LR;
13205		inst.instruction |= Rn;
13206		inst.instruction |= Rm << 3;
13207	      }
13208	    else
13209	      {
13210		inst.instruction = T_OPCODE_CMP_HR;
13211		inst.instruction |= (Rn & 0x8) << 4;
13212		inst.instruction |= (Rn & 0x7);
13213		inst.instruction |= Rm << 3;
13214	      }
13215	    break;
13216	  }
13217      return;
13218    }
13219
13220  inst.instruction = THUMB_OP16 (inst.instruction);
13221
13222  /* PR 10443: Do not silently ignore shifted operands.  */
13223  constraint (inst.operands[1].shifted,
13224	      _("shifts in CMP/MOV instructions are only supported in unified syntax"));
13225
13226  if (inst.operands[1].isreg)
13227    {
13228      if (Rn < 8 && Rm < 8)
13229	{
13230	  /* A move of two lowregs is encoded as ADD Rd, Rs, #0
13231	     since a MOV instruction produces unpredictable results.  */
13232	  if (inst.instruction == T_OPCODE_MOV_I8)
13233	    inst.instruction = T_OPCODE_ADD_I3;
13234	  else
13235	    inst.instruction = T_OPCODE_CMP_LR;
13236
13237	  inst.instruction |= Rn;
13238	  inst.instruction |= Rm << 3;
13239	}
13240      else
13241	{
13242	  if (inst.instruction == T_OPCODE_MOV_I8)
13243	    inst.instruction = T_OPCODE_MOV_HR;
13244	  else
13245	    inst.instruction = T_OPCODE_CMP_HR;
13246	  do_t_cpy ();
13247	}
13248    }
13249  else
13250    {
13251      constraint (Rn > 7,
13252		  _("only lo regs allowed with immediate"));
13253      inst.instruction |= Rn << 8;
13254      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_IMM;
13255    }
13256}
13257
13258static void
13259do_t_mov16 (void)
13260{
13261  unsigned Rd;
13262  bfd_vma imm;
13263  bfd_boolean top;
13264
13265  top = (inst.instruction & 0x00800000) != 0;
13266  if (inst.relocs[0].type == BFD_RELOC_ARM_MOVW)
13267    {
13268      constraint (top, _(":lower16: not allowed in this instruction"));
13269      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVW;
13270    }
13271  else if (inst.relocs[0].type == BFD_RELOC_ARM_MOVT)
13272    {
13273      constraint (!top, _(":upper16: not allowed in this instruction"));
13274      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_MOVT;
13275    }
13276
13277  Rd = inst.operands[0].reg;
13278  reject_bad_reg (Rd);
13279
13280  inst.instruction |= Rd << 8;
13281  if (inst.relocs[0].type == BFD_RELOC_UNUSED)
13282    {
13283      imm = inst.relocs[0].exp.X_add_number;
13284      inst.instruction |= (imm & 0xf000) << 4;
13285      inst.instruction |= (imm & 0x0800) << 15;
13286      inst.instruction |= (imm & 0x0700) << 4;
13287      inst.instruction |= (imm & 0x00ff);
13288    }
13289}
13290
13291static void
13292do_t_mvn_tst (void)
13293{
13294  unsigned Rn, Rm;
13295
13296  Rn = inst.operands[0].reg;
13297  Rm = inst.operands[1].reg;
13298
13299  if (inst.instruction == T_MNEM_cmp
13300      || inst.instruction == T_MNEM_cmn)
13301    constraint (Rn == REG_PC, BAD_PC);
13302  else
13303    reject_bad_reg (Rn);
13304  reject_bad_reg (Rm);
13305
13306  if (unified_syntax)
13307    {
13308      int r0off = (inst.instruction == T_MNEM_mvn
13309		   || inst.instruction == T_MNEM_mvns) ? 8 : 16;
13310      bfd_boolean narrow;
13311
13312      if (inst.size_req == 4
13313	  || inst.instruction > 0xffff
13314	  || inst.operands[1].shifted
13315	  || Rn > 7 || Rm > 7)
13316	narrow = FALSE;
13317      else if (inst.instruction == T_MNEM_cmn
13318	       || inst.instruction == T_MNEM_tst)
13319	narrow = TRUE;
13320      else if (THUMB_SETS_FLAGS (inst.instruction))
13321	narrow = !in_pred_block ();
13322      else
13323	narrow = in_pred_block ();
13324
13325      if (!inst.operands[1].isreg)
13326	{
13327	  /* For an immediate, we always generate a 32-bit opcode;
13328	     section relaxation will shrink it later if possible.  */
13329	  if (inst.instruction < 0xffff)
13330	    inst.instruction = THUMB_OP32 (inst.instruction);
13331	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13332	  inst.instruction |= Rn << r0off;
13333	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13334	}
13335      else
13336	{
13337	  /* See if we can do this with a 16-bit instruction.  */
13338	  if (narrow)
13339	    {
13340	      inst.instruction = THUMB_OP16 (inst.instruction);
13341	      inst.instruction |= Rn;
13342	      inst.instruction |= Rm << 3;
13343	    }
13344	  else
13345	    {
13346	      constraint (inst.operands[1].shifted
13347			  && inst.operands[1].immisreg,
13348			  _("shift must be constant"));
13349	      if (inst.instruction < 0xffff)
13350		inst.instruction = THUMB_OP32 (inst.instruction);
13351	      inst.instruction |= Rn << r0off;
13352	      encode_thumb32_shifted_operand (1);
13353	    }
13354	}
13355    }
13356  else
13357    {
13358      constraint (inst.instruction > 0xffff
13359		  || inst.instruction == T_MNEM_mvns, BAD_THUMB32);
13360      constraint (!inst.operands[1].isreg || inst.operands[1].shifted,
13361		  _("unshifted register required"));
13362      constraint (Rn > 7 || Rm > 7,
13363		  BAD_HIREG);
13364
13365      inst.instruction = THUMB_OP16 (inst.instruction);
13366      inst.instruction |= Rn;
13367      inst.instruction |= Rm << 3;
13368    }
13369}
13370
13371static void
13372do_t_mrs (void)
13373{
13374  unsigned Rd;
13375
13376  if (do_vfp_nsyn_mrs () == SUCCESS)
13377    return;
13378
13379  Rd = inst.operands[0].reg;
13380  reject_bad_reg (Rd);
13381  inst.instruction |= Rd << 8;
13382
13383  if (inst.operands[1].isreg)
13384    {
13385      unsigned br = inst.operands[1].reg;
13386      if (((br & 0x200) == 0) && ((br & 0xf000) != 0xf000))
13387	as_bad (_("bad register for mrs"));
13388
13389      inst.instruction |= br & (0xf << 16);
13390      inst.instruction |= (br & 0x300) >> 4;
13391      inst.instruction |= (br & SPSR_BIT) >> 2;
13392    }
13393  else
13394    {
13395      int flags = inst.operands[1].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13396
13397      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13398	{
13399	  /* PR gas/12698:  The constraint is only applied for m_profile.
13400	     If the user has specified -march=all, we want to ignore it as
13401	     we are building for any CPU type, including non-m variants.  */
13402	  bfd_boolean m_profile =
13403	    !ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13404	  constraint ((flags != 0) && m_profile, _("selected processor does "
13405						   "not support requested special purpose register"));
13406	}
13407      else
13408	/* mrs only accepts APSR/CPSR/SPSR/CPSR_all/SPSR_all (for non-M profile
13409	   devices).  */
13410	constraint ((flags & ~SPSR_BIT) != (PSR_c|PSR_f),
13411		    _("'APSR', 'CPSR' or 'SPSR' expected"));
13412
13413      inst.instruction |= (flags & SPSR_BIT) >> 2;
13414      inst.instruction |= inst.operands[1].imm & 0xff;
13415      inst.instruction |= 0xf0000;
13416    }
13417}
13418
13419static void
13420do_t_msr (void)
13421{
13422  int flags;
13423  unsigned Rn;
13424
13425  if (do_vfp_nsyn_msr () == SUCCESS)
13426    return;
13427
13428  constraint (!inst.operands[1].isreg,
13429	      _("Thumb encoding does not support an immediate here"));
13430
13431  if (inst.operands[0].isreg)
13432    flags = (int)(inst.operands[0].reg);
13433  else
13434    flags = inst.operands[0].imm;
13435
13436  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_m))
13437    {
13438      int bits = inst.operands[0].imm & (PSR_c|PSR_x|PSR_s|PSR_f|SPSR_BIT);
13439
13440      /* PR gas/12698:  The constraint is only applied for m_profile.
13441	 If the user has specified -march=all, we want to ignore it as
13442	 we are building for any CPU type, including non-m variants.  */
13443      bfd_boolean m_profile =
13444	!ARM_FEATURE_CORE_EQUAL (selected_cpu, arm_arch_any);
13445      constraint (((ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13446	   && (bits & ~(PSR_s | PSR_f)) != 0)
13447	  || (!ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6_dsp)
13448	      && bits != PSR_f)) && m_profile,
13449	  _("selected processor does not support requested special "
13450	    "purpose register"));
13451    }
13452  else
13453     constraint ((flags & 0xff) != 0, _("selected processor does not support "
13454		 "requested special purpose register"));
13455
13456  Rn = inst.operands[1].reg;
13457  reject_bad_reg (Rn);
13458
13459  inst.instruction |= (flags & SPSR_BIT) >> 2;
13460  inst.instruction |= (flags & 0xf0000) >> 8;
13461  inst.instruction |= (flags & 0x300) >> 4;
13462  inst.instruction |= (flags & 0xff);
13463  inst.instruction |= Rn << 16;
13464}
13465
13466static void
13467do_t_mul (void)
13468{
13469  bfd_boolean narrow;
13470  unsigned Rd, Rn, Rm;
13471
13472  if (!inst.operands[2].present)
13473    inst.operands[2].reg = inst.operands[0].reg;
13474
13475  Rd = inst.operands[0].reg;
13476  Rn = inst.operands[1].reg;
13477  Rm = inst.operands[2].reg;
13478
13479  if (unified_syntax)
13480    {
13481      if (inst.size_req == 4
13482	  || (Rd != Rn
13483	      && Rd != Rm)
13484	  || Rn > 7
13485	  || Rm > 7)
13486	narrow = FALSE;
13487      else if (inst.instruction == T_MNEM_muls)
13488	narrow = !in_pred_block ();
13489      else
13490	narrow = in_pred_block ();
13491    }
13492  else
13493    {
13494      constraint (inst.instruction == T_MNEM_muls, BAD_THUMB32);
13495      constraint (Rn > 7 || Rm > 7,
13496		  BAD_HIREG);
13497      narrow = TRUE;
13498    }
13499
13500  if (narrow)
13501    {
13502      /* 16-bit MULS/Conditional MUL.  */
13503      inst.instruction = THUMB_OP16 (inst.instruction);
13504      inst.instruction |= Rd;
13505
13506      if (Rd == Rn)
13507	inst.instruction |= Rm << 3;
13508      else if (Rd == Rm)
13509	inst.instruction |= Rn << 3;
13510      else
13511	constraint (1, _("dest must overlap one source register"));
13512    }
13513  else
13514    {
13515      constraint (inst.instruction != T_MNEM_mul,
13516		  _("Thumb-2 MUL must not set flags"));
13517      /* 32-bit MUL.  */
13518      inst.instruction = THUMB_OP32 (inst.instruction);
13519      inst.instruction |= Rd << 8;
13520      inst.instruction |= Rn << 16;
13521      inst.instruction |= Rm << 0;
13522
13523      reject_bad_reg (Rd);
13524      reject_bad_reg (Rn);
13525      reject_bad_reg (Rm);
13526    }
13527}
13528
13529static void
13530do_t_mull (void)
13531{
13532  unsigned RdLo, RdHi, Rn, Rm;
13533
13534  RdLo = inst.operands[0].reg;
13535  RdHi = inst.operands[1].reg;
13536  Rn = inst.operands[2].reg;
13537  Rm = inst.operands[3].reg;
13538
13539  reject_bad_reg (RdLo);
13540  reject_bad_reg (RdHi);
13541  reject_bad_reg (Rn);
13542  reject_bad_reg (Rm);
13543
13544  inst.instruction |= RdLo << 12;
13545  inst.instruction |= RdHi << 8;
13546  inst.instruction |= Rn << 16;
13547  inst.instruction |= Rm;
13548
13549 if (RdLo == RdHi)
13550    as_tsktsk (_("rdhi and rdlo must be different"));
13551}
13552
13553static void
13554do_t_nop (void)
13555{
13556  set_pred_insn_type (NEUTRAL_IT_INSN);
13557
13558  if (unified_syntax)
13559    {
13560      if (inst.size_req == 4 || inst.operands[0].imm > 15)
13561	{
13562	  inst.instruction = THUMB_OP32 (inst.instruction);
13563	  inst.instruction |= inst.operands[0].imm;
13564	}
13565      else
13566	{
13567	  /* PR9722: Check for Thumb2 availability before
13568	     generating a thumb2 nop instruction.  */
13569	  if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v6t2))
13570	    {
13571	      inst.instruction = THUMB_OP16 (inst.instruction);
13572	      inst.instruction |= inst.operands[0].imm << 4;
13573	    }
13574	  else
13575	    inst.instruction = 0x46c0;
13576	}
13577    }
13578  else
13579    {
13580      constraint (inst.operands[0].present,
13581		  _("Thumb does not support NOP with hints"));
13582      inst.instruction = 0x46c0;
13583    }
13584}
13585
13586static void
13587do_t_neg (void)
13588{
13589  if (unified_syntax)
13590    {
13591      bfd_boolean narrow;
13592
13593      if (THUMB_SETS_FLAGS (inst.instruction))
13594	narrow = !in_pred_block ();
13595      else
13596	narrow = in_pred_block ();
13597      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13598	narrow = FALSE;
13599      if (inst.size_req == 4)
13600	narrow = FALSE;
13601
13602      if (!narrow)
13603	{
13604	  inst.instruction = THUMB_OP32 (inst.instruction);
13605	  inst.instruction |= inst.operands[0].reg << 8;
13606	  inst.instruction |= inst.operands[1].reg << 16;
13607	}
13608      else
13609	{
13610	  inst.instruction = THUMB_OP16 (inst.instruction);
13611	  inst.instruction |= inst.operands[0].reg;
13612	  inst.instruction |= inst.operands[1].reg << 3;
13613	}
13614    }
13615  else
13616    {
13617      constraint (inst.operands[0].reg > 7 || inst.operands[1].reg > 7,
13618		  BAD_HIREG);
13619      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
13620
13621      inst.instruction = THUMB_OP16 (inst.instruction);
13622      inst.instruction |= inst.operands[0].reg;
13623      inst.instruction |= inst.operands[1].reg << 3;
13624    }
13625}
13626
13627static void
13628do_t_orn (void)
13629{
13630  unsigned Rd, Rn;
13631
13632  Rd = inst.operands[0].reg;
13633  Rn = inst.operands[1].present ? inst.operands[1].reg : Rd;
13634
13635  reject_bad_reg (Rd);
13636  /* Rn == REG_SP is unpredictable; Rn == REG_PC is MVN.  */
13637  reject_bad_reg (Rn);
13638
13639  inst.instruction |= Rd << 8;
13640  inst.instruction |= Rn << 16;
13641
13642  if (!inst.operands[2].isreg)
13643    {
13644      inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13645      inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13646    }
13647  else
13648    {
13649      unsigned Rm;
13650
13651      Rm = inst.operands[2].reg;
13652      reject_bad_reg (Rm);
13653
13654      constraint (inst.operands[2].shifted
13655		  && inst.operands[2].immisreg,
13656		  _("shift must be constant"));
13657      encode_thumb32_shifted_operand (2);
13658    }
13659}
13660
13661static void
13662do_t_pkhbt (void)
13663{
13664  unsigned Rd, Rn, Rm;
13665
13666  Rd = inst.operands[0].reg;
13667  Rn = inst.operands[1].reg;
13668  Rm = inst.operands[2].reg;
13669
13670  reject_bad_reg (Rd);
13671  reject_bad_reg (Rn);
13672  reject_bad_reg (Rm);
13673
13674  inst.instruction |= Rd << 8;
13675  inst.instruction |= Rn << 16;
13676  inst.instruction |= Rm;
13677  if (inst.operands[3].present)
13678    {
13679      unsigned int val = inst.relocs[0].exp.X_add_number;
13680      constraint (inst.relocs[0].exp.X_op != O_constant,
13681		  _("expression too complex"));
13682      inst.instruction |= (val & 0x1c) << 10;
13683      inst.instruction |= (val & 0x03) << 6;
13684    }
13685}
13686
13687static void
13688do_t_pkhtb (void)
13689{
13690  if (!inst.operands[3].present)
13691    {
13692      unsigned Rtmp;
13693
13694      inst.instruction &= ~0x00000020;
13695
13696      /* PR 10168.  Swap the Rm and Rn registers.  */
13697      Rtmp = inst.operands[1].reg;
13698      inst.operands[1].reg = inst.operands[2].reg;
13699      inst.operands[2].reg = Rtmp;
13700    }
13701  do_t_pkhbt ();
13702}
13703
13704static void
13705do_t_pld (void)
13706{
13707  if (inst.operands[0].immisreg)
13708    reject_bad_reg (inst.operands[0].imm);
13709
13710  encode_thumb32_addr_mode (0, /*is_t=*/FALSE, /*is_d=*/FALSE);
13711}
13712
13713static void
13714do_t_push_pop (void)
13715{
13716  unsigned mask;
13717
13718  constraint (inst.operands[0].writeback,
13719	      _("push/pop do not support {reglist}^"));
13720  constraint (inst.relocs[0].type != BFD_RELOC_UNUSED,
13721	      _("expression too complex"));
13722
13723  mask = inst.operands[0].imm;
13724  if (inst.size_req != 4 && (mask & ~0xff) == 0)
13725    inst.instruction = THUMB_OP16 (inst.instruction) | mask;
13726  else if (inst.size_req != 4
13727	   && (mask & ~0xff) == (1U << (inst.instruction == T_MNEM_push
13728				       ? REG_LR : REG_PC)))
13729    {
13730      inst.instruction = THUMB_OP16 (inst.instruction);
13731      inst.instruction |= THUMB_PP_PC_LR;
13732      inst.instruction |= mask & 0xff;
13733    }
13734  else if (unified_syntax)
13735    {
13736      inst.instruction = THUMB_OP32 (inst.instruction);
13737      encode_thumb2_multi (TRUE /* do_io */, 13, mask, TRUE);
13738    }
13739  else
13740    {
13741      inst.error = _("invalid register list to push/pop instruction");
13742      return;
13743    }
13744}
13745
13746static void
13747do_t_clrm (void)
13748{
13749  if (unified_syntax)
13750    encode_thumb2_multi (FALSE /* do_io */, -1, inst.operands[0].imm, FALSE);
13751  else
13752    {
13753      inst.error = _("invalid register list to push/pop instruction");
13754      return;
13755    }
13756}
13757
13758static void
13759do_t_vscclrm (void)
13760{
13761  if (inst.operands[0].issingle)
13762    {
13763      inst.instruction |= (inst.operands[0].reg & 0x1) << 22;
13764      inst.instruction |= (inst.operands[0].reg & 0x1e) << 11;
13765      inst.instruction |= inst.operands[0].imm;
13766    }
13767  else
13768    {
13769      inst.instruction |= (inst.operands[0].reg & 0x10) << 18;
13770      inst.instruction |= (inst.operands[0].reg & 0xf) << 12;
13771      inst.instruction |= 1 << 8;
13772      inst.instruction |= inst.operands[0].imm << 1;
13773    }
13774}
13775
13776static void
13777do_t_rbit (void)
13778{
13779  unsigned Rd, Rm;
13780
13781  Rd = inst.operands[0].reg;
13782  Rm = inst.operands[1].reg;
13783
13784  reject_bad_reg (Rd);
13785  reject_bad_reg (Rm);
13786
13787  inst.instruction |= Rd << 8;
13788  inst.instruction |= Rm << 16;
13789  inst.instruction |= Rm;
13790}
13791
13792static void
13793do_t_rev (void)
13794{
13795  unsigned Rd, Rm;
13796
13797  Rd = inst.operands[0].reg;
13798  Rm = inst.operands[1].reg;
13799
13800  reject_bad_reg (Rd);
13801  reject_bad_reg (Rm);
13802
13803  if (Rd <= 7 && Rm <= 7
13804      && inst.size_req != 4)
13805    {
13806      inst.instruction = THUMB_OP16 (inst.instruction);
13807      inst.instruction |= Rd;
13808      inst.instruction |= Rm << 3;
13809    }
13810  else if (unified_syntax)
13811    {
13812      inst.instruction = THUMB_OP32 (inst.instruction);
13813      inst.instruction |= Rd << 8;
13814      inst.instruction |= Rm << 16;
13815      inst.instruction |= Rm;
13816    }
13817  else
13818    inst.error = BAD_HIREG;
13819}
13820
13821static void
13822do_t_rrx (void)
13823{
13824  unsigned Rd, Rm;
13825
13826  Rd = inst.operands[0].reg;
13827  Rm = inst.operands[1].reg;
13828
13829  reject_bad_reg (Rd);
13830  reject_bad_reg (Rm);
13831
13832  inst.instruction |= Rd << 8;
13833  inst.instruction |= Rm;
13834}
13835
13836static void
13837do_t_rsb (void)
13838{
13839  unsigned Rd, Rs;
13840
13841  Rd = inst.operands[0].reg;
13842  Rs = (inst.operands[1].present
13843	? inst.operands[1].reg    /* Rd, Rs, foo */
13844	: inst.operands[0].reg);  /* Rd, foo -> Rd, Rd, foo */
13845
13846  reject_bad_reg (Rd);
13847  reject_bad_reg (Rs);
13848  if (inst.operands[2].isreg)
13849    reject_bad_reg (inst.operands[2].reg);
13850
13851  inst.instruction |= Rd << 8;
13852  inst.instruction |= Rs << 16;
13853  if (!inst.operands[2].isreg)
13854    {
13855      bfd_boolean narrow;
13856
13857      if ((inst.instruction & 0x00100000) != 0)
13858	narrow = !in_pred_block ();
13859      else
13860	narrow = in_pred_block ();
13861
13862      if (Rd > 7 || Rs > 7)
13863	narrow = FALSE;
13864
13865      if (inst.size_req == 4 || !unified_syntax)
13866	narrow = FALSE;
13867
13868      if (inst.relocs[0].exp.X_op != O_constant
13869	  || inst.relocs[0].exp.X_add_number != 0)
13870	narrow = FALSE;
13871
13872      /* Turn rsb #0 into 16-bit neg.  We should probably do this via
13873	 relaxation, but it doesn't seem worth the hassle.  */
13874      if (narrow)
13875	{
13876	  inst.relocs[0].type = BFD_RELOC_UNUSED;
13877	  inst.instruction = THUMB_OP16 (T_MNEM_negs);
13878	  inst.instruction |= Rs << 3;
13879	  inst.instruction |= Rd;
13880	}
13881      else
13882	{
13883	  inst.instruction = (inst.instruction & 0xe1ffffff) | 0x10000000;
13884	  inst.relocs[0].type = BFD_RELOC_ARM_T32_IMMEDIATE;
13885	}
13886    }
13887  else
13888    encode_thumb32_shifted_operand (2);
13889}
13890
13891static void
13892do_t_setend (void)
13893{
13894  if (warn_on_deprecated
13895      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
13896      as_tsktsk (_("setend use is deprecated for ARMv8"));
13897
13898  set_pred_insn_type (OUTSIDE_PRED_INSN);
13899  if (inst.operands[0].imm)
13900    inst.instruction |= 0x8;
13901}
13902
13903static void
13904do_t_shift (void)
13905{
13906  if (!inst.operands[1].present)
13907    inst.operands[1].reg = inst.operands[0].reg;
13908
13909  if (unified_syntax)
13910    {
13911      bfd_boolean narrow;
13912      int shift_kind;
13913
13914      switch (inst.instruction)
13915	{
13916	case T_MNEM_asr:
13917	case T_MNEM_asrs: shift_kind = SHIFT_ASR; break;
13918	case T_MNEM_lsl:
13919	case T_MNEM_lsls: shift_kind = SHIFT_LSL; break;
13920	case T_MNEM_lsr:
13921	case T_MNEM_lsrs: shift_kind = SHIFT_LSR; break;
13922	case T_MNEM_ror:
13923	case T_MNEM_rors: shift_kind = SHIFT_ROR; break;
13924	default: abort ();
13925	}
13926
13927      if (THUMB_SETS_FLAGS (inst.instruction))
13928	narrow = !in_pred_block ();
13929      else
13930	narrow = in_pred_block ();
13931      if (inst.operands[0].reg > 7 || inst.operands[1].reg > 7)
13932	narrow = FALSE;
13933      if (!inst.operands[2].isreg && shift_kind == SHIFT_ROR)
13934	narrow = FALSE;
13935      if (inst.operands[2].isreg
13936	  && (inst.operands[1].reg != inst.operands[0].reg
13937	      || inst.operands[2].reg > 7))
13938	narrow = FALSE;
13939      if (inst.size_req == 4)
13940	narrow = FALSE;
13941
13942      reject_bad_reg (inst.operands[0].reg);
13943      reject_bad_reg (inst.operands[1].reg);
13944
13945      if (!narrow)
13946	{
13947	  if (inst.operands[2].isreg)
13948	    {
13949	      reject_bad_reg (inst.operands[2].reg);
13950	      inst.instruction = THUMB_OP32 (inst.instruction);
13951	      inst.instruction |= inst.operands[0].reg << 8;
13952	      inst.instruction |= inst.operands[1].reg << 16;
13953	      inst.instruction |= inst.operands[2].reg;
13954
13955	      /* PR 12854: Error on extraneous shifts.  */
13956	      constraint (inst.operands[2].shifted,
13957			  _("extraneous shift as part of operand to shift insn"));
13958	    }
13959	  else
13960	    {
13961	      inst.operands[1].shifted = 1;
13962	      inst.operands[1].shift_kind = shift_kind;
13963	      inst.instruction = THUMB_OP32 (THUMB_SETS_FLAGS (inst.instruction)
13964					     ? T_MNEM_movs : T_MNEM_mov);
13965	      inst.instruction |= inst.operands[0].reg << 8;
13966	      encode_thumb32_shifted_operand (1);
13967	      /* Prevent the incorrect generation of an ARM_IMMEDIATE fixup.  */
13968	      inst.relocs[0].type = BFD_RELOC_UNUSED;
13969	    }
13970	}
13971      else
13972	{
13973	  if (inst.operands[2].isreg)
13974	    {
13975	      switch (shift_kind)
13976		{
13977		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_R; break;
13978		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_R; break;
13979		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_R; break;
13980		case SHIFT_ROR: inst.instruction = T_OPCODE_ROR_R; break;
13981		default: abort ();
13982		}
13983
13984	      inst.instruction |= inst.operands[0].reg;
13985	      inst.instruction |= inst.operands[2].reg << 3;
13986
13987	      /* PR 12854: Error on extraneous shifts.  */
13988	      constraint (inst.operands[2].shifted,
13989			  _("extraneous shift as part of operand to shift insn"));
13990	    }
13991	  else
13992	    {
13993	      switch (shift_kind)
13994		{
13995		case SHIFT_ASR: inst.instruction = T_OPCODE_ASR_I; break;
13996		case SHIFT_LSL: inst.instruction = T_OPCODE_LSL_I; break;
13997		case SHIFT_LSR: inst.instruction = T_OPCODE_LSR_I; break;
13998		default: abort ();
13999		}
14000	      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14001	      inst.instruction |= inst.operands[0].reg;
14002	      inst.instruction |= inst.operands[1].reg << 3;
14003	    }
14004	}
14005    }
14006  else
14007    {
14008      constraint (inst.operands[0].reg > 7
14009		  || inst.operands[1].reg > 7, BAD_HIREG);
14010      constraint (THUMB_SETS_FLAGS (inst.instruction), BAD_THUMB32);
14011
14012      if (inst.operands[2].isreg)  /* Rd, {Rs,} Rn */
14013	{
14014	  constraint (inst.operands[2].reg > 7, BAD_HIREG);
14015	  constraint (inst.operands[0].reg != inst.operands[1].reg,
14016		      _("source1 and dest must be same register"));
14017
14018	  switch (inst.instruction)
14019	    {
14020	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_R; break;
14021	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_R; break;
14022	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_R; break;
14023	    case T_MNEM_ror: inst.instruction = T_OPCODE_ROR_R; break;
14024	    default: abort ();
14025	    }
14026
14027	  inst.instruction |= inst.operands[0].reg;
14028	  inst.instruction |= inst.operands[2].reg << 3;
14029
14030	  /* PR 12854: Error on extraneous shifts.  */
14031	  constraint (inst.operands[2].shifted,
14032		      _("extraneous shift as part of operand to shift insn"));
14033	}
14034      else
14035	{
14036	  switch (inst.instruction)
14037	    {
14038	    case T_MNEM_asr: inst.instruction = T_OPCODE_ASR_I; break;
14039	    case T_MNEM_lsl: inst.instruction = T_OPCODE_LSL_I; break;
14040	    case T_MNEM_lsr: inst.instruction = T_OPCODE_LSR_I; break;
14041	    case T_MNEM_ror: inst.error = _("ror #imm not supported"); return;
14042	    default: abort ();
14043	    }
14044	  inst.relocs[0].type = BFD_RELOC_ARM_THUMB_SHIFT;
14045	  inst.instruction |= inst.operands[0].reg;
14046	  inst.instruction |= inst.operands[1].reg << 3;
14047	}
14048    }
14049}
14050
14051static void
14052do_t_simd (void)
14053{
14054  unsigned Rd, Rn, Rm;
14055
14056  Rd = inst.operands[0].reg;
14057  Rn = inst.operands[1].reg;
14058  Rm = inst.operands[2].reg;
14059
14060  reject_bad_reg (Rd);
14061  reject_bad_reg (Rn);
14062  reject_bad_reg (Rm);
14063
14064  inst.instruction |= Rd << 8;
14065  inst.instruction |= Rn << 16;
14066  inst.instruction |= Rm;
14067}
14068
14069static void
14070do_t_simd2 (void)
14071{
14072  unsigned Rd, Rn, Rm;
14073
14074  Rd = inst.operands[0].reg;
14075  Rm = inst.operands[1].reg;
14076  Rn = inst.operands[2].reg;
14077
14078  reject_bad_reg (Rd);
14079  reject_bad_reg (Rn);
14080  reject_bad_reg (Rm);
14081
14082  inst.instruction |= Rd << 8;
14083  inst.instruction |= Rn << 16;
14084  inst.instruction |= Rm;
14085}
14086
14087static void
14088do_t_smc (void)
14089{
14090  unsigned int value = inst.relocs[0].exp.X_add_number;
14091  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v7a),
14092	      _("SMC is not permitted on this architecture"));
14093  constraint (inst.relocs[0].exp.X_op != O_constant,
14094	      _("expression too complex"));
14095  constraint (value > 0xf, _("immediate too large (bigger than 0xF)"));
14096
14097  inst.relocs[0].type = BFD_RELOC_UNUSED;
14098  inst.instruction |= (value & 0x000f) << 16;
14099
14100  /* PR gas/15623: SMC instructions must be last in an IT block.  */
14101  set_pred_insn_type_last ();
14102}
14103
14104static void
14105do_t_hvc (void)
14106{
14107  unsigned int value = inst.relocs[0].exp.X_add_number;
14108
14109  inst.relocs[0].type = BFD_RELOC_UNUSED;
14110  inst.instruction |= (value & 0x0fff);
14111  inst.instruction |= (value & 0xf000) << 4;
14112}
14113
14114static void
14115do_t_ssat_usat (int bias)
14116{
14117  unsigned Rd, Rn;
14118
14119  Rd = inst.operands[0].reg;
14120  Rn = inst.operands[2].reg;
14121
14122  reject_bad_reg (Rd);
14123  reject_bad_reg (Rn);
14124
14125  inst.instruction |= Rd << 8;
14126  inst.instruction |= inst.operands[1].imm - bias;
14127  inst.instruction |= Rn << 16;
14128
14129  if (inst.operands[3].present)
14130    {
14131      offsetT shift_amount = inst.relocs[0].exp.X_add_number;
14132
14133      inst.relocs[0].type = BFD_RELOC_UNUSED;
14134
14135      constraint (inst.relocs[0].exp.X_op != O_constant,
14136		  _("expression too complex"));
14137
14138      if (shift_amount != 0)
14139	{
14140	  constraint (shift_amount > 31,
14141		      _("shift expression is too large"));
14142
14143	  if (inst.operands[3].shift_kind == SHIFT_ASR)
14144	    inst.instruction |= 0x00200000;  /* sh bit.  */
14145
14146	  inst.instruction |= (shift_amount & 0x1c) << 10;
14147	  inst.instruction |= (shift_amount & 0x03) << 6;
14148	}
14149    }
14150}
14151
14152static void
14153do_t_ssat (void)
14154{
14155  do_t_ssat_usat (1);
14156}
14157
14158static void
14159do_t_ssat16 (void)
14160{
14161  unsigned Rd, Rn;
14162
14163  Rd = inst.operands[0].reg;
14164  Rn = inst.operands[2].reg;
14165
14166  reject_bad_reg (Rd);
14167  reject_bad_reg (Rn);
14168
14169  inst.instruction |= Rd << 8;
14170  inst.instruction |= inst.operands[1].imm - 1;
14171  inst.instruction |= Rn << 16;
14172}
14173
14174static void
14175do_t_strex (void)
14176{
14177  constraint (!inst.operands[2].isreg || !inst.operands[2].preind
14178	      || inst.operands[2].postind || inst.operands[2].writeback
14179	      || inst.operands[2].immisreg || inst.operands[2].shifted
14180	      || inst.operands[2].negative,
14181	      BAD_ADDR_MODE);
14182
14183  constraint (inst.operands[2].reg == REG_PC, BAD_PC);
14184
14185  inst.instruction |= inst.operands[0].reg << 8;
14186  inst.instruction |= inst.operands[1].reg << 12;
14187  inst.instruction |= inst.operands[2].reg << 16;
14188  inst.relocs[0].type = BFD_RELOC_ARM_T32_OFFSET_U8;
14189}
14190
14191static void
14192do_t_strexd (void)
14193{
14194  if (!inst.operands[2].present)
14195    inst.operands[2].reg = inst.operands[1].reg + 1;
14196
14197  constraint (inst.operands[0].reg == inst.operands[1].reg
14198	      || inst.operands[0].reg == inst.operands[2].reg
14199	      || inst.operands[0].reg == inst.operands[3].reg,
14200	      BAD_OVERLAP);
14201
14202  inst.instruction |= inst.operands[0].reg;
14203  inst.instruction |= inst.operands[1].reg << 12;
14204  inst.instruction |= inst.operands[2].reg << 8;
14205  inst.instruction |= inst.operands[3].reg << 16;
14206}
14207
14208static void
14209do_t_sxtah (void)
14210{
14211  unsigned Rd, Rn, Rm;
14212
14213  Rd = inst.operands[0].reg;
14214  Rn = inst.operands[1].reg;
14215  Rm = inst.operands[2].reg;
14216
14217  reject_bad_reg (Rd);
14218  reject_bad_reg (Rn);
14219  reject_bad_reg (Rm);
14220
14221  inst.instruction |= Rd << 8;
14222  inst.instruction |= Rn << 16;
14223  inst.instruction |= Rm;
14224  inst.instruction |= inst.operands[3].imm << 4;
14225}
14226
14227static void
14228do_t_sxth (void)
14229{
14230  unsigned Rd, Rm;
14231
14232  Rd = inst.operands[0].reg;
14233  Rm = inst.operands[1].reg;
14234
14235  reject_bad_reg (Rd);
14236  reject_bad_reg (Rm);
14237
14238  if (inst.instruction <= 0xffff
14239      && inst.size_req != 4
14240      && Rd <= 7 && Rm <= 7
14241      && (!inst.operands[2].present || inst.operands[2].imm == 0))
14242    {
14243      inst.instruction = THUMB_OP16 (inst.instruction);
14244      inst.instruction |= Rd;
14245      inst.instruction |= Rm << 3;
14246    }
14247  else if (unified_syntax)
14248    {
14249      if (inst.instruction <= 0xffff)
14250	inst.instruction = THUMB_OP32 (inst.instruction);
14251      inst.instruction |= Rd << 8;
14252      inst.instruction |= Rm;
14253      inst.instruction |= inst.operands[2].imm << 4;
14254    }
14255  else
14256    {
14257      constraint (inst.operands[2].present && inst.operands[2].imm != 0,
14258		  _("Thumb encoding does not support rotation"));
14259      constraint (1, BAD_HIREG);
14260    }
14261}
14262
14263static void
14264do_t_swi (void)
14265{
14266  inst.relocs[0].type = BFD_RELOC_ARM_SWI;
14267}
14268
14269static void
14270do_t_tb (void)
14271{
14272  unsigned Rn, Rm;
14273  int half;
14274
14275  half = (inst.instruction & 0x10) != 0;
14276  set_pred_insn_type_last ();
14277  constraint (inst.operands[0].immisreg,
14278	      _("instruction requires register index"));
14279
14280  Rn = inst.operands[0].reg;
14281  Rm = inst.operands[0].imm;
14282
14283  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8))
14284    constraint (Rn == REG_SP, BAD_SP);
14285  reject_bad_reg (Rm);
14286
14287  constraint (!half && inst.operands[0].shifted,
14288	      _("instruction does not allow shifted index"));
14289  inst.instruction |= (Rn << 16) | Rm;
14290}
14291
14292static void
14293do_t_udf (void)
14294{
14295  if (!inst.operands[0].present)
14296    inst.operands[0].imm = 0;
14297
14298  if ((unsigned int) inst.operands[0].imm > 255 || inst.size_req == 4)
14299    {
14300      constraint (inst.size_req == 2,
14301                  _("immediate value out of range"));
14302      inst.instruction = THUMB_OP32 (inst.instruction);
14303      inst.instruction |= (inst.operands[0].imm & 0xf000u) << 4;
14304      inst.instruction |= (inst.operands[0].imm & 0x0fffu) << 0;
14305    }
14306  else
14307    {
14308      inst.instruction = THUMB_OP16 (inst.instruction);
14309      inst.instruction |= inst.operands[0].imm;
14310    }
14311
14312  set_pred_insn_type (NEUTRAL_IT_INSN);
14313}
14314
14315
14316static void
14317do_t_usat (void)
14318{
14319  do_t_ssat_usat (0);
14320}
14321
14322static void
14323do_t_usat16 (void)
14324{
14325  unsigned Rd, Rn;
14326
14327  Rd = inst.operands[0].reg;
14328  Rn = inst.operands[2].reg;
14329
14330  reject_bad_reg (Rd);
14331  reject_bad_reg (Rn);
14332
14333  inst.instruction |= Rd << 8;
14334  inst.instruction |= inst.operands[1].imm;
14335  inst.instruction |= Rn << 16;
14336}
14337
14338/* Checking the range of the branch offset (VAL) with NBITS bits
14339   and IS_SIGNED signedness.  Also checks the LSB to be 0.  */
14340static int
14341v8_1_branch_value_check (int val, int nbits, int is_signed)
14342{
14343  gas_assert (nbits > 0 && nbits <= 32);
14344  if (is_signed)
14345    {
14346      int cmp = (1 << (nbits - 1));
14347      if ((val < -cmp) || (val >= cmp) || (val & 0x01))
14348	return FAIL;
14349    }
14350  else
14351    {
14352      if ((val <= 0) || (val >= (1 << nbits)) || (val & 0x1))
14353	return FAIL;
14354    }
14355    return SUCCESS;
14356}
14357
14358/* For branches in Armv8.1-M Mainline.  */
14359static void
14360do_t_branch_future (void)
14361{
14362  unsigned long insn = inst.instruction;
14363
14364  inst.instruction = THUMB_OP32 (inst.instruction);
14365  if (inst.operands[0].hasreloc == 0)
14366    {
14367      if (v8_1_branch_value_check (inst.operands[0].imm, 5, FALSE) == FAIL)
14368	as_bad (BAD_BRANCH_OFF);
14369
14370      inst.instruction |= ((inst.operands[0].imm & 0x1f) >> 1) << 23;
14371    }
14372  else
14373    {
14374      inst.relocs[0].type = BFD_RELOC_THUMB_PCREL_BRANCH5;
14375      inst.relocs[0].pc_rel = 1;
14376    }
14377
14378  switch (insn)
14379    {
14380      case T_MNEM_bf:
14381	if (inst.operands[1].hasreloc == 0)
14382	  {
14383	    int val = inst.operands[1].imm;
14384	    if (v8_1_branch_value_check (inst.operands[1].imm, 17, TRUE) == FAIL)
14385	      as_bad (BAD_BRANCH_OFF);
14386
14387	    int immA = (val & 0x0001f000) >> 12;
14388	    int immB = (val & 0x00000ffc) >> 2;
14389	    int immC = (val & 0x00000002) >> 1;
14390	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14391	  }
14392	else
14393	  {
14394	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF17;
14395	    inst.relocs[1].pc_rel = 1;
14396	  }
14397	break;
14398
14399      case T_MNEM_bfl:
14400	if (inst.operands[1].hasreloc == 0)
14401	  {
14402	    int val = inst.operands[1].imm;
14403	    if (v8_1_branch_value_check (inst.operands[1].imm, 19, TRUE) == FAIL)
14404	      as_bad (BAD_BRANCH_OFF);
14405
14406	    int immA = (val & 0x0007f000) >> 12;
14407	    int immB = (val & 0x00000ffc) >> 2;
14408	    int immC = (val & 0x00000002) >> 1;
14409	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14410	  }
14411	  else
14412	  {
14413	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF19;
14414	    inst.relocs[1].pc_rel = 1;
14415	  }
14416	break;
14417
14418      case T_MNEM_bfcsel:
14419	/* Operand 1.  */
14420	if (inst.operands[1].hasreloc == 0)
14421	  {
14422	    int val = inst.operands[1].imm;
14423	    int immA = (val & 0x00001000) >> 12;
14424	    int immB = (val & 0x00000ffc) >> 2;
14425	    int immC = (val & 0x00000002) >> 1;
14426	    inst.instruction |= (immA << 16) | (immB << 1) | (immC << 11);
14427	  }
14428	  else
14429	  {
14430	    inst.relocs[1].type = BFD_RELOC_ARM_THUMB_BF13;
14431	    inst.relocs[1].pc_rel = 1;
14432	  }
14433
14434	/* Operand 2.  */
14435	if (inst.operands[2].hasreloc == 0)
14436	  {
14437	      constraint ((inst.operands[0].hasreloc != 0), BAD_ARGS);
14438	      int val2 = inst.operands[2].imm;
14439	      int val0 = inst.operands[0].imm & 0x1f;
14440	      int diff = val2 - val0;
14441	      if (diff == 4)
14442		inst.instruction |= 1 << 17; /* T bit.  */
14443	      else if (diff != 2)
14444		as_bad (_("out of range label-relative fixup value"));
14445	  }
14446	else
14447	  {
14448	      constraint ((inst.operands[0].hasreloc == 0), BAD_ARGS);
14449	      inst.relocs[2].type = BFD_RELOC_THUMB_PCREL_BFCSEL;
14450	      inst.relocs[2].pc_rel = 1;
14451	  }
14452
14453	/* Operand 3.  */
14454	constraint (inst.cond != COND_ALWAYS, BAD_COND);
14455	inst.instruction |= (inst.operands[3].imm & 0xf) << 18;
14456	break;
14457
14458      case T_MNEM_bfx:
14459      case T_MNEM_bflx:
14460	inst.instruction |= inst.operands[1].reg << 16;
14461	break;
14462
14463      default: abort ();
14464    }
14465}
14466
14467/* Helper function for do_t_loloop to handle relocations.  */
14468static void
14469v8_1_loop_reloc (int is_le)
14470{
14471  if (inst.relocs[0].exp.X_op == O_constant)
14472    {
14473      int value = inst.relocs[0].exp.X_add_number;
14474      value = (is_le) ? -value : value;
14475
14476      if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
14477	as_bad (BAD_BRANCH_OFF);
14478
14479      int imml, immh;
14480
14481      immh = (value & 0x00000ffc) >> 2;
14482      imml = (value & 0x00000002) >> 1;
14483
14484      inst.instruction |= (imml << 11) | (immh << 1);
14485    }
14486  else
14487    {
14488      inst.relocs[0].type = BFD_RELOC_ARM_THUMB_LOOP12;
14489      inst.relocs[0].pc_rel = 1;
14490    }
14491}
14492
14493/* For shifts with four operands in MVE.  */
14494static void
14495do_mve_scalar_shift1 (void)
14496{
14497  unsigned int value = inst.operands[2].imm;
14498
14499  inst.instruction |= inst.operands[0].reg << 16;
14500  inst.instruction |= inst.operands[1].reg << 8;
14501
14502  /* Setting the bit for saturation.  */
14503  inst.instruction |= ((value == 64) ? 0: 1) << 7;
14504
14505  /* Assuming Rm is already checked not to be 11x1.  */
14506  constraint (inst.operands[3].reg == inst.operands[0].reg, BAD_OVERLAP);
14507  constraint (inst.operands[3].reg == inst.operands[1].reg, BAD_OVERLAP);
14508  inst.instruction |= inst.operands[3].reg << 12;
14509}
14510
14511/* For shifts in MVE.  */
14512static void
14513do_mve_scalar_shift (void)
14514{
14515  if (!inst.operands[2].present)
14516    {
14517      inst.operands[2] = inst.operands[1];
14518      inst.operands[1].reg = 0xf;
14519    }
14520
14521  inst.instruction |= inst.operands[0].reg << 16;
14522  inst.instruction |= inst.operands[1].reg << 8;
14523
14524  if (inst.operands[2].isreg)
14525    {
14526      /* Assuming Rm is already checked not to be 11x1.  */
14527      constraint (inst.operands[2].reg == inst.operands[0].reg, BAD_OVERLAP);
14528      constraint (inst.operands[2].reg == inst.operands[1].reg, BAD_OVERLAP);
14529      inst.instruction |= inst.operands[2].reg << 12;
14530    }
14531  else
14532    {
14533      /* Assuming imm is already checked as [1,32].  */
14534      unsigned int value = inst.operands[2].imm;
14535      inst.instruction |= (value & 0x1c) << 10;
14536      inst.instruction |= (value & 0x03) << 6;
14537      /* Change last 4 bits from 0xd to 0xf.  */
14538      inst.instruction |= 0x2;
14539    }
14540}
14541
14542/* MVE instruction encoder helpers.  */
14543#define M_MNEM_vabav	0xee800f01
14544#define M_MNEM_vmladav	  0xeef00e00
14545#define M_MNEM_vmladava	  0xeef00e20
14546#define M_MNEM_vmladavx	  0xeef01e00
14547#define M_MNEM_vmladavax  0xeef01e20
14548#define M_MNEM_vmlsdav	  0xeef00e01
14549#define M_MNEM_vmlsdava	  0xeef00e21
14550#define M_MNEM_vmlsdavx	  0xeef01e01
14551#define M_MNEM_vmlsdavax  0xeef01e21
14552#define M_MNEM_vmullt	0xee011e00
14553#define M_MNEM_vmullb	0xee010e00
14554#define M_MNEM_vctp	0xf000e801
14555#define M_MNEM_vst20	0xfc801e00
14556#define M_MNEM_vst21	0xfc801e20
14557#define M_MNEM_vst40	0xfc801e01
14558#define M_MNEM_vst41	0xfc801e21
14559#define M_MNEM_vst42	0xfc801e41
14560#define M_MNEM_vst43	0xfc801e61
14561#define M_MNEM_vld20	0xfc901e00
14562#define M_MNEM_vld21	0xfc901e20
14563#define M_MNEM_vld40	0xfc901e01
14564#define M_MNEM_vld41	0xfc901e21
14565#define M_MNEM_vld42	0xfc901e41
14566#define M_MNEM_vld43	0xfc901e61
14567#define M_MNEM_vstrb	0xec000e00
14568#define M_MNEM_vstrh	0xec000e10
14569#define M_MNEM_vstrw	0xec000e40
14570#define M_MNEM_vstrd	0xec000e50
14571#define M_MNEM_vldrb	0xec100e00
14572#define M_MNEM_vldrh	0xec100e10
14573#define M_MNEM_vldrw	0xec100e40
14574#define M_MNEM_vldrd	0xec100e50
14575#define M_MNEM_vmovlt	0xeea01f40
14576#define M_MNEM_vmovlb	0xeea00f40
14577#define M_MNEM_vmovnt	0xfe311e81
14578#define M_MNEM_vmovnb	0xfe310e81
14579#define M_MNEM_vadc	0xee300f00
14580#define M_MNEM_vadci	0xee301f00
14581#define M_MNEM_vbrsr	0xfe011e60
14582#define M_MNEM_vaddlv	0xee890f00
14583#define M_MNEM_vaddlva	0xee890f20
14584#define M_MNEM_vaddv	0xeef10f00
14585#define M_MNEM_vaddva	0xeef10f20
14586#define M_MNEM_vddup	0xee011f6e
14587#define M_MNEM_vdwdup	0xee011f60
14588#define M_MNEM_vidup	0xee010f6e
14589#define M_MNEM_viwdup	0xee010f60
14590#define M_MNEM_vmaxv	0xeee20f00
14591#define M_MNEM_vmaxav	0xeee00f00
14592#define M_MNEM_vminv	0xeee20f80
14593#define M_MNEM_vminav	0xeee00f80
14594#define M_MNEM_vmlaldav	  0xee800e00
14595#define M_MNEM_vmlaldava  0xee800e20
14596#define M_MNEM_vmlaldavx  0xee801e00
14597#define M_MNEM_vmlaldavax 0xee801e20
14598#define M_MNEM_vmlsldav	  0xee800e01
14599#define M_MNEM_vmlsldava  0xee800e21
14600#define M_MNEM_vmlsldavx  0xee801e01
14601#define M_MNEM_vmlsldavax 0xee801e21
14602#define M_MNEM_vrmlaldavhx  0xee801f00
14603#define M_MNEM_vrmlaldavhax 0xee801f20
14604#define M_MNEM_vrmlsldavh   0xfe800e01
14605#define M_MNEM_vrmlsldavha  0xfe800e21
14606#define M_MNEM_vrmlsldavhx  0xfe801e01
14607#define M_MNEM_vrmlsldavhax 0xfe801e21
14608#define M_MNEM_vqmovnt	  0xee331e01
14609#define M_MNEM_vqmovnb	  0xee330e01
14610#define M_MNEM_vqmovunt	  0xee311e81
14611#define M_MNEM_vqmovunb	  0xee310e81
14612#define M_MNEM_vshrnt	    0xee801fc1
14613#define M_MNEM_vshrnb	    0xee800fc1
14614#define M_MNEM_vrshrnt	    0xfe801fc1
14615#define M_MNEM_vqshrnt	    0xee801f40
14616#define M_MNEM_vqshrnb	    0xee800f40
14617#define M_MNEM_vqshrunt	    0xee801fc0
14618#define M_MNEM_vqshrunb	    0xee800fc0
14619#define M_MNEM_vrshrnb	    0xfe800fc1
14620#define M_MNEM_vqrshrnt	    0xee801f41
14621#define M_MNEM_vqrshrnb	    0xee800f41
14622#define M_MNEM_vqrshrunt    0xfe801fc0
14623#define M_MNEM_vqrshrunb    0xfe800fc0
14624
14625/* Bfloat16 instruction encoder helpers.  */
14626#define B_MNEM_vfmat 0xfc300850
14627#define B_MNEM_vfmab 0xfc300810
14628
14629/* Neon instruction encoder helpers.  */
14630
14631/* Encodings for the different types for various Neon opcodes.  */
14632
14633/* An "invalid" code for the following tables.  */
14634#define N_INV -1u
14635
14636struct neon_tab_entry
14637{
14638  unsigned integer;
14639  unsigned float_or_poly;
14640  unsigned scalar_or_imm;
14641};
14642
14643/* Map overloaded Neon opcodes to their respective encodings.  */
14644#define NEON_ENC_TAB					\
14645  X(vabd,	0x0000700, 0x1200d00, N_INV),		\
14646  X(vabdl,	0x0800700, N_INV,     N_INV),		\
14647  X(vmax,	0x0000600, 0x0000f00, N_INV),		\
14648  X(vmin,	0x0000610, 0x0200f00, N_INV),		\
14649  X(vpadd,	0x0000b10, 0x1000d00, N_INV),		\
14650  X(vpmax,	0x0000a00, 0x1000f00, N_INV),		\
14651  X(vpmin,	0x0000a10, 0x1200f00, N_INV),		\
14652  X(vadd,	0x0000800, 0x0000d00, N_INV),		\
14653  X(vaddl,	0x0800000, N_INV,     N_INV),		\
14654  X(vsub,	0x1000800, 0x0200d00, N_INV),		\
14655  X(vsubl,	0x0800200, N_INV,     N_INV),		\
14656  X(vceq,	0x1000810, 0x0000e00, 0x1b10100),	\
14657  X(vcge,	0x0000310, 0x1000e00, 0x1b10080),	\
14658  X(vcgt,	0x0000300, 0x1200e00, 0x1b10000),	\
14659  /* Register variants of the following two instructions are encoded as
14660     vcge / vcgt with the operands reversed.  */  	\
14661  X(vclt,	0x0000300, 0x1200e00, 0x1b10200),	\
14662  X(vcle,	0x0000310, 0x1000e00, 0x1b10180),	\
14663  X(vfma,	N_INV, 0x0000c10, N_INV),		\
14664  X(vfms,	N_INV, 0x0200c10, N_INV),		\
14665  X(vmla,	0x0000900, 0x0000d10, 0x0800040),	\
14666  X(vmls,	0x1000900, 0x0200d10, 0x0800440),	\
14667  X(vmul,	0x0000910, 0x1000d10, 0x0800840),	\
14668  X(vmull,	0x0800c00, 0x0800e00, 0x0800a40), /* polynomial not float.  */ \
14669  X(vmlal,	0x0800800, N_INV,     0x0800240),	\
14670  X(vmlsl,	0x0800a00, N_INV,     0x0800640),	\
14671  X(vqdmlal,	0x0800900, N_INV,     0x0800340),	\
14672  X(vqdmlsl,	0x0800b00, N_INV,     0x0800740),	\
14673  X(vqdmull,	0x0800d00, N_INV,     0x0800b40),	\
14674  X(vqdmulh,    0x0000b00, N_INV,     0x0800c40),	\
14675  X(vqrdmulh,   0x1000b00, N_INV,     0x0800d40),	\
14676  X(vqrdmlah,   0x3000b10, N_INV,     0x0800e40),	\
14677  X(vqrdmlsh,   0x3000c10, N_INV,     0x0800f40),	\
14678  X(vshl,	0x0000400, N_INV,     0x0800510),	\
14679  X(vqshl,	0x0000410, N_INV,     0x0800710),	\
14680  X(vand,	0x0000110, N_INV,     0x0800030),	\
14681  X(vbic,	0x0100110, N_INV,     0x0800030),	\
14682  X(veor,	0x1000110, N_INV,     N_INV),		\
14683  X(vorn,	0x0300110, N_INV,     0x0800010),	\
14684  X(vorr,	0x0200110, N_INV,     0x0800010),	\
14685  X(vmvn,	0x1b00580, N_INV,     0x0800030),	\
14686  X(vshll,	0x1b20300, N_INV,     0x0800a10), /* max shift, immediate.  */ \
14687  X(vcvt,       0x1b30600, N_INV,     0x0800e10), /* integer, fixed-point.  */ \
14688  X(vdup,       0xe800b10, N_INV,     0x1b00c00), /* arm, scalar.  */ \
14689  X(vld1,       0x0200000, 0x0a00000, 0x0a00c00), /* interlv, lane, dup.  */ \
14690  X(vst1,	0x0000000, 0x0800000, N_INV),		\
14691  X(vld2,	0x0200100, 0x0a00100, 0x0a00d00),	\
14692  X(vst2,	0x0000100, 0x0800100, N_INV),		\
14693  X(vld3,	0x0200200, 0x0a00200, 0x0a00e00),	\
14694  X(vst3,	0x0000200, 0x0800200, N_INV),		\
14695  X(vld4,	0x0200300, 0x0a00300, 0x0a00f00),	\
14696  X(vst4,	0x0000300, 0x0800300, N_INV),		\
14697  X(vmovn,	0x1b20200, N_INV,     N_INV),		\
14698  X(vtrn,	0x1b20080, N_INV,     N_INV),		\
14699  X(vqmovn,	0x1b20200, N_INV,     N_INV),		\
14700  X(vqmovun,	0x1b20240, N_INV,     N_INV),		\
14701  X(vnmul,      0xe200a40, 0xe200b40, N_INV),		\
14702  X(vnmla,      0xe100a40, 0xe100b40, N_INV),		\
14703  X(vnmls,      0xe100a00, 0xe100b00, N_INV),		\
14704  X(vfnma,      0xe900a40, 0xe900b40, N_INV),		\
14705  X(vfnms,      0xe900a00, 0xe900b00, N_INV),		\
14706  X(vcmp,	0xeb40a40, 0xeb40b40, N_INV),		\
14707  X(vcmpz,	0xeb50a40, 0xeb50b40, N_INV),		\
14708  X(vcmpe,	0xeb40ac0, 0xeb40bc0, N_INV),		\
14709  X(vcmpez,     0xeb50ac0, 0xeb50bc0, N_INV),		\
14710  X(vseleq,	0xe000a00, N_INV,     N_INV),		\
14711  X(vselvs,	0xe100a00, N_INV,     N_INV),		\
14712  X(vselge,	0xe200a00, N_INV,     N_INV),		\
14713  X(vselgt,	0xe300a00, N_INV,     N_INV),		\
14714  X(vmaxnm,	0xe800a00, 0x3000f10, N_INV),		\
14715  X(vminnm,	0xe800a40, 0x3200f10, N_INV),		\
14716  X(vcvta,	0xebc0a40, 0x3bb0000, N_INV),		\
14717  X(vrintr,	0xeb60a40, 0x3ba0400, N_INV),		\
14718  X(vrinta,	0xeb80a40, 0x3ba0400, N_INV),		\
14719  X(aes,	0x3b00300, N_INV,     N_INV),		\
14720  X(sha3op,	0x2000c00, N_INV,     N_INV),		\
14721  X(sha1h,	0x3b902c0, N_INV,     N_INV),           \
14722  X(sha2op,     0x3ba0380, N_INV,     N_INV)
14723
14724enum neon_opc
14725{
14726#define X(OPC,I,F,S) N_MNEM_##OPC
14727NEON_ENC_TAB
14728#undef X
14729};
14730
14731static const struct neon_tab_entry neon_enc_tab[] =
14732{
14733#define X(OPC,I,F,S) { (I), (F), (S) }
14734NEON_ENC_TAB
14735#undef X
14736};
14737
14738/* Do not use these macros; instead, use NEON_ENCODE defined below.  */
14739#define NEON_ENC_INTEGER_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14740#define NEON_ENC_ARMREG_(X)  (neon_enc_tab[(X) & 0x0fffffff].integer)
14741#define NEON_ENC_POLY_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14742#define NEON_ENC_FLOAT_(X)   (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14743#define NEON_ENC_SCALAR_(X)  (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14744#define NEON_ENC_IMMED_(X)   (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14745#define NEON_ENC_INTERLV_(X) (neon_enc_tab[(X) & 0x0fffffff].integer)
14746#define NEON_ENC_LANE_(X)    (neon_enc_tab[(X) & 0x0fffffff].float_or_poly)
14747#define NEON_ENC_DUP_(X)     (neon_enc_tab[(X) & 0x0fffffff].scalar_or_imm)
14748#define NEON_ENC_SINGLE_(X) \
14749  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf0000000))
14750#define NEON_ENC_DOUBLE_(X) \
14751  ((neon_enc_tab[(X) & 0x0fffffff].float_or_poly) | ((X) & 0xf0000000))
14752#define NEON_ENC_FPV8_(X) \
14753  ((neon_enc_tab[(X) & 0x0fffffff].integer) | ((X) & 0xf000000))
14754
14755#define NEON_ENCODE(type, inst)					\
14756  do								\
14757    {								\
14758      inst.instruction = NEON_ENC_##type##_ (inst.instruction);	\
14759      inst.is_neon = 1;						\
14760    }								\
14761  while (0)
14762
14763#define check_neon_suffixes						\
14764  do									\
14765    {									\
14766      if (!inst.error && inst.vectype.elems > 0 && !inst.is_neon)	\
14767	{								\
14768	  as_bad (_("invalid neon suffix for non neon instruction"));	\
14769	  return;							\
14770	}								\
14771    }									\
14772  while (0)
14773
14774/* Define shapes for instruction operands. The following mnemonic characters
14775   are used in this table:
14776
14777     F - VFP S<n> register
14778     D - Neon D<n> register
14779     Q - Neon Q<n> register
14780     I - Immediate
14781     S - Scalar
14782     R - ARM register
14783     L - D<n> register list
14784
14785   This table is used to generate various data:
14786     - enumerations of the form NS_DDR to be used as arguments to
14787       neon_select_shape.
14788     - a table classifying shapes into single, double, quad, mixed.
14789     - a table used to drive neon_select_shape.  */
14790
14791#define NEON_SHAPE_DEF			\
14792  X(4, (R, R, Q, Q), QUAD),		\
14793  X(4, (Q, R, R, I), QUAD),		\
14794  X(4, (R, R, S, S), QUAD),		\
14795  X(4, (S, S, R, R), QUAD),		\
14796  X(3, (Q, R, I), QUAD),		\
14797  X(3, (I, Q, Q), QUAD),		\
14798  X(3, (I, Q, R), QUAD),		\
14799  X(3, (R, Q, Q), QUAD),		\
14800  X(3, (D, D, D), DOUBLE),		\
14801  X(3, (Q, Q, Q), QUAD),		\
14802  X(3, (D, D, I), DOUBLE),		\
14803  X(3, (Q, Q, I), QUAD),		\
14804  X(3, (D, D, S), DOUBLE),		\
14805  X(3, (Q, Q, S), QUAD),		\
14806  X(3, (Q, Q, R), QUAD),		\
14807  X(3, (R, R, Q), QUAD),		\
14808  X(2, (R, Q),	  QUAD),		\
14809  X(2, (D, D), DOUBLE),			\
14810  X(2, (Q, Q), QUAD),			\
14811  X(2, (D, S), DOUBLE),			\
14812  X(2, (Q, S), QUAD),			\
14813  X(2, (D, R), DOUBLE),			\
14814  X(2, (Q, R), QUAD),			\
14815  X(2, (D, I), DOUBLE),			\
14816  X(2, (Q, I), QUAD),			\
14817  X(3, (P, F, I), SINGLE),		\
14818  X(3, (P, D, I), DOUBLE),		\
14819  X(3, (P, Q, I), QUAD),		\
14820  X(4, (P, F, F, I), SINGLE),		\
14821  X(4, (P, D, D, I), DOUBLE),		\
14822  X(4, (P, Q, Q, I), QUAD),		\
14823  X(5, (P, F, F, F, I), SINGLE),	\
14824  X(5, (P, D, D, D, I), DOUBLE),	\
14825  X(5, (P, Q, Q, Q, I), QUAD),		\
14826  X(3, (D, L, D), DOUBLE),		\
14827  X(2, (D, Q), MIXED),			\
14828  X(2, (Q, D), MIXED),			\
14829  X(3, (D, Q, I), MIXED),		\
14830  X(3, (Q, D, I), MIXED),		\
14831  X(3, (Q, D, D), MIXED),		\
14832  X(3, (D, Q, Q), MIXED),		\
14833  X(3, (Q, Q, D), MIXED),		\
14834  X(3, (Q, D, S), MIXED),		\
14835  X(3, (D, Q, S), MIXED),		\
14836  X(4, (D, D, D, I), DOUBLE),		\
14837  X(4, (Q, Q, Q, I), QUAD),		\
14838  X(4, (D, D, S, I), DOUBLE),		\
14839  X(4, (Q, Q, S, I), QUAD),		\
14840  X(2, (F, F), SINGLE),			\
14841  X(3, (F, F, F), SINGLE),		\
14842  X(2, (F, I), SINGLE),			\
14843  X(2, (F, D), MIXED),			\
14844  X(2, (D, F), MIXED),			\
14845  X(3, (F, F, I), MIXED),		\
14846  X(4, (R, R, F, F), SINGLE),		\
14847  X(4, (F, F, R, R), SINGLE),		\
14848  X(3, (D, R, R), DOUBLE),		\
14849  X(3, (R, R, D), DOUBLE),		\
14850  X(2, (S, R), SINGLE),			\
14851  X(2, (R, S), SINGLE),			\
14852  X(2, (F, R), SINGLE),			\
14853  X(2, (R, F), SINGLE),			\
14854/* Used for MVE tail predicated loop instructions.  */\
14855  X(2, (R, R), QUAD),			\
14856/* Half float shape supported so far.  */\
14857  X (2, (H, D), MIXED),			\
14858  X (2, (D, H), MIXED),			\
14859  X (2, (H, F), MIXED),			\
14860  X (2, (F, H), MIXED),			\
14861  X (2, (H, H), HALF),			\
14862  X (2, (H, R), HALF),			\
14863  X (2, (R, H), HALF),			\
14864  X (2, (H, I), HALF),			\
14865  X (3, (H, H, H), HALF),		\
14866  X (3, (H, F, I), MIXED),		\
14867  X (3, (F, H, I), MIXED),		\
14868  X (3, (D, H, H), MIXED),		\
14869  X (3, (D, H, S), MIXED)
14870
14871#define S2(A,B)		NS_##A##B
14872#define S3(A,B,C)	NS_##A##B##C
14873#define S4(A,B,C,D)	NS_##A##B##C##D
14874#define S5(A,B,C,D,E)	NS_##A##B##C##D##E
14875
14876#define X(N, L, C) S##N L
14877
14878enum neon_shape
14879{
14880  NEON_SHAPE_DEF,
14881  NS_NULL
14882};
14883
14884#undef X
14885#undef S2
14886#undef S3
14887#undef S4
14888#undef S5
14889
14890enum neon_shape_class
14891{
14892  SC_HALF,
14893  SC_SINGLE,
14894  SC_DOUBLE,
14895  SC_QUAD,
14896  SC_MIXED
14897};
14898
14899#define X(N, L, C) SC_##C
14900
14901static enum neon_shape_class neon_shape_class[] =
14902{
14903  NEON_SHAPE_DEF
14904};
14905
14906#undef X
14907
14908enum neon_shape_el
14909{
14910  SE_H,
14911  SE_F,
14912  SE_D,
14913  SE_Q,
14914  SE_I,
14915  SE_S,
14916  SE_R,
14917  SE_L,
14918  SE_P
14919};
14920
14921/* Register widths of above.  */
14922static unsigned neon_shape_el_size[] =
14923{
14924  16,
14925  32,
14926  64,
14927  128,
14928  0,
14929  32,
14930  32,
14931  0,
14932  0
14933};
14934
14935struct neon_shape_info
14936{
14937  unsigned els;
14938  enum neon_shape_el el[NEON_MAX_TYPE_ELS];
14939};
14940
14941#define S2(A,B)		{ SE_##A, SE_##B }
14942#define S3(A,B,C)	{ SE_##A, SE_##B, SE_##C }
14943#define S4(A,B,C,D)	{ SE_##A, SE_##B, SE_##C, SE_##D }
14944#define S5(A,B,C,D,E)	{ SE_##A, SE_##B, SE_##C, SE_##D, SE_##E }
14945
14946#define X(N, L, C) { N, S##N L }
14947
14948static struct neon_shape_info neon_shape_tab[] =
14949{
14950  NEON_SHAPE_DEF
14951};
14952
14953#undef X
14954#undef S2
14955#undef S3
14956#undef S4
14957#undef S5
14958
14959/* Bit masks used in type checking given instructions.
14960  'N_EQK' means the type must be the same as (or based on in some way) the key
14961   type, which itself is marked with the 'N_KEY' bit. If the 'N_EQK' bit is
14962   set, various other bits can be set as well in order to modify the meaning of
14963   the type constraint.  */
14964
14965enum neon_type_mask
14966{
14967  N_S8   = 0x0000001,
14968  N_S16  = 0x0000002,
14969  N_S32  = 0x0000004,
14970  N_S64  = 0x0000008,
14971  N_U8   = 0x0000010,
14972  N_U16  = 0x0000020,
14973  N_U32  = 0x0000040,
14974  N_U64  = 0x0000080,
14975  N_I8   = 0x0000100,
14976  N_I16  = 0x0000200,
14977  N_I32  = 0x0000400,
14978  N_I64  = 0x0000800,
14979  N_8    = 0x0001000,
14980  N_16   = 0x0002000,
14981  N_32   = 0x0004000,
14982  N_64   = 0x0008000,
14983  N_P8   = 0x0010000,
14984  N_P16  = 0x0020000,
14985  N_F16  = 0x0040000,
14986  N_F32  = 0x0080000,
14987  N_F64  = 0x0100000,
14988  N_P64	 = 0x0200000,
14989  N_BF16 = 0x0400000,
14990  N_KEY  = 0x1000000, /* Key element (main type specifier).  */
14991  N_EQK  = 0x2000000, /* Given operand has the same type & size as the key.  */
14992  N_VFP  = 0x4000000, /* VFP mode: operand size must match register width.  */
14993  N_UNT  = 0x8000000, /* Must be explicitly untyped.  */
14994  N_DBL  = 0x0000001, /* If N_EQK, this operand is twice the size.  */
14995  N_HLF  = 0x0000002, /* If N_EQK, this operand is half the size.  */
14996  N_SGN  = 0x0000004, /* If N_EQK, this operand is forced to be signed.  */
14997  N_UNS  = 0x0000008, /* If N_EQK, this operand is forced to be unsigned.  */
14998  N_INT  = 0x0000010, /* If N_EQK, this operand is forced to be integer.  */
14999  N_FLT  = 0x0000020, /* If N_EQK, this operand is forced to be float.  */
15000  N_SIZ  = 0x0000040, /* If N_EQK, this operand is forced to be size-only.  */
15001  N_UTYP = 0,
15002  N_MAX_NONSPECIAL = N_P64
15003};
15004
15005#define N_ALLMODS  (N_DBL | N_HLF | N_SGN | N_UNS | N_INT | N_FLT | N_SIZ)
15006
15007#define N_SU_ALL   (N_S8 | N_S16 | N_S32 | N_S64 | N_U8 | N_U16 | N_U32 | N_U64)
15008#define N_SU_32    (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15009#define N_SU_16_64 (N_S16 | N_S32 | N_S64 | N_U16 | N_U32 | N_U64)
15010#define N_S_32     (N_S8 | N_S16 | N_S32)
15011#define N_F_16_32  (N_F16 | N_F32)
15012#define N_SUF_32   (N_SU_32 | N_F_16_32)
15013#define N_I_ALL    (N_I8 | N_I16 | N_I32 | N_I64)
15014#define N_IF_32    (N_I8 | N_I16 | N_I32 | N_F16 | N_F32)
15015#define N_F_ALL    (N_F16 | N_F32 | N_F64)
15016#define N_I_MVE	   (N_I8 | N_I16 | N_I32)
15017#define N_F_MVE	   (N_F16 | N_F32)
15018#define N_SU_MVE   (N_S8 | N_S16 | N_S32 | N_U8 | N_U16 | N_U32)
15019
15020/* Pass this as the first type argument to neon_check_type to ignore types
15021   altogether.  */
15022#define N_IGNORE_TYPE (N_KEY | N_EQK)
15023
15024/* Select a "shape" for the current instruction (describing register types or
15025   sizes) from a list of alternatives. Return NS_NULL if the current instruction
15026   doesn't fit. For non-polymorphic shapes, checking is usually done as a
15027   function of operand parsing, so this function doesn't need to be called.
15028   Shapes should be listed in order of decreasing length.  */
15029
15030static enum neon_shape
15031neon_select_shape (enum neon_shape shape, ...)
15032{
15033  va_list ap;
15034  enum neon_shape first_shape = shape;
15035
15036  /* Fix missing optional operands. FIXME: we don't know at this point how
15037     many arguments we should have, so this makes the assumption that we have
15038     > 1. This is true of all current Neon opcodes, I think, but may not be
15039     true in the future.  */
15040  if (!inst.operands[1].present)
15041    inst.operands[1] = inst.operands[0];
15042
15043  va_start (ap, shape);
15044
15045  for (; shape != NS_NULL; shape = (enum neon_shape) va_arg (ap, int))
15046    {
15047      unsigned j;
15048      int matches = 1;
15049
15050      for (j = 0; j < neon_shape_tab[shape].els; j++)
15051	{
15052	  if (!inst.operands[j].present)
15053	    {
15054	      matches = 0;
15055	      break;
15056	    }
15057
15058	  switch (neon_shape_tab[shape].el[j])
15059	    {
15060	      /* If a  .f16,  .16,  .u16,  .s16 type specifier is given over
15061		 a VFP single precision register operand, it's essentially
15062		 means only half of the register is used.
15063
15064		 If the type specifier is given after the mnemonics, the
15065		 information is stored in inst.vectype.  If the type specifier
15066		 is given after register operand, the information is stored
15067		 in inst.operands[].vectype.
15068
15069		 When there is only one type specifier, and all the register
15070		 operands are the same type of hardware register, the type
15071		 specifier applies to all register operands.
15072
15073		 If no type specifier is given, the shape is inferred from
15074		 operand information.
15075
15076		 for example:
15077		 vadd.f16 s0, s1, s2:		NS_HHH
15078		 vabs.f16 s0, s1:		NS_HH
15079		 vmov.f16 s0, r1:		NS_HR
15080		 vmov.f16 r0, s1:		NS_RH
15081		 vcvt.f16 r0, s1:		NS_RH
15082		 vcvt.f16.s32	s2, s2, #29:	NS_HFI
15083		 vcvt.f16.s32	s2, s2:		NS_HF
15084	      */
15085	    case SE_H:
15086	      if (!(inst.operands[j].isreg
15087		    && inst.operands[j].isvec
15088		    && inst.operands[j].issingle
15089		    && !inst.operands[j].isquad
15090		    && ((inst.vectype.elems == 1
15091			 && inst.vectype.el[0].size == 16)
15092			|| (inst.vectype.elems > 1
15093			    && inst.vectype.el[j].size == 16)
15094			|| (inst.vectype.elems == 0
15095			    && inst.operands[j].vectype.type != NT_invtype
15096			    && inst.operands[j].vectype.size == 16))))
15097		matches = 0;
15098	      break;
15099
15100	    case SE_F:
15101	      if (!(inst.operands[j].isreg
15102		    && inst.operands[j].isvec
15103		    && inst.operands[j].issingle
15104		    && !inst.operands[j].isquad
15105		    && ((inst.vectype.elems == 1 && inst.vectype.el[0].size == 32)
15106			|| (inst.vectype.elems > 1 && inst.vectype.el[j].size == 32)
15107			|| (inst.vectype.elems == 0
15108			    && (inst.operands[j].vectype.size == 32
15109				|| inst.operands[j].vectype.type == NT_invtype)))))
15110		matches = 0;
15111	      break;
15112
15113	    case SE_D:
15114	      if (!(inst.operands[j].isreg
15115		    && inst.operands[j].isvec
15116		    && !inst.operands[j].isquad
15117		    && !inst.operands[j].issingle))
15118		matches = 0;
15119	      break;
15120
15121	    case SE_R:
15122	      if (!(inst.operands[j].isreg
15123		    && !inst.operands[j].isvec))
15124		matches = 0;
15125	      break;
15126
15127	    case SE_Q:
15128	      if (!(inst.operands[j].isreg
15129		    && inst.operands[j].isvec
15130		    && inst.operands[j].isquad
15131		    && !inst.operands[j].issingle))
15132		matches = 0;
15133	      break;
15134
15135	    case SE_I:
15136	      if (!(!inst.operands[j].isreg
15137		    && !inst.operands[j].isscalar))
15138		matches = 0;
15139	      break;
15140
15141	    case SE_S:
15142	      if (!(!inst.operands[j].isreg
15143		    && inst.operands[j].isscalar))
15144		matches = 0;
15145	      break;
15146
15147	    case SE_P:
15148	    case SE_L:
15149	      break;
15150	    }
15151	  if (!matches)
15152	    break;
15153	}
15154      if (matches && (j >= ARM_IT_MAX_OPERANDS || !inst.operands[j].present))
15155	/* We've matched all the entries in the shape table, and we don't
15156	   have any left over operands which have not been matched.  */
15157	break;
15158    }
15159
15160  va_end (ap);
15161
15162  if (shape == NS_NULL && first_shape != NS_NULL)
15163    first_error (_("invalid instruction shape"));
15164
15165  return shape;
15166}
15167
15168/* True if SHAPE is predominantly a quadword operation (most of the time, this
15169   means the Q bit should be set).  */
15170
15171static int
15172neon_quad (enum neon_shape shape)
15173{
15174  return neon_shape_class[shape] == SC_QUAD;
15175}
15176
15177static void
15178neon_modify_type_size (unsigned typebits, enum neon_el_type *g_type,
15179		       unsigned *g_size)
15180{
15181  /* Allow modification to be made to types which are constrained to be
15182     based on the key element, based on bits set alongside N_EQK.  */
15183  if ((typebits & N_EQK) != 0)
15184    {
15185      if ((typebits & N_HLF) != 0)
15186	*g_size /= 2;
15187      else if ((typebits & N_DBL) != 0)
15188	*g_size *= 2;
15189      if ((typebits & N_SGN) != 0)
15190	*g_type = NT_signed;
15191      else if ((typebits & N_UNS) != 0)
15192	*g_type = NT_unsigned;
15193      else if ((typebits & N_INT) != 0)
15194	*g_type = NT_integer;
15195      else if ((typebits & N_FLT) != 0)
15196	*g_type = NT_float;
15197      else if ((typebits & N_SIZ) != 0)
15198	*g_type = NT_untyped;
15199    }
15200}
15201
15202/* Return operand OPNO promoted by bits set in THISARG. KEY should be the "key"
15203   operand type, i.e. the single type specified in a Neon instruction when it
15204   is the only one given.  */
15205
15206static struct neon_type_el
15207neon_type_promote (struct neon_type_el *key, unsigned thisarg)
15208{
15209  struct neon_type_el dest = *key;
15210
15211  gas_assert ((thisarg & N_EQK) != 0);
15212
15213  neon_modify_type_size (thisarg, &dest.type, &dest.size);
15214
15215  return dest;
15216}
15217
15218/* Convert Neon type and size into compact bitmask representation.  */
15219
15220static enum neon_type_mask
15221type_chk_of_el_type (enum neon_el_type type, unsigned size)
15222{
15223  switch (type)
15224    {
15225    case NT_untyped:
15226      switch (size)
15227	{
15228	case 8:  return N_8;
15229	case 16: return N_16;
15230	case 32: return N_32;
15231	case 64: return N_64;
15232	default: ;
15233	}
15234      break;
15235
15236    case NT_integer:
15237      switch (size)
15238	{
15239	case 8:  return N_I8;
15240	case 16: return N_I16;
15241	case 32: return N_I32;
15242	case 64: return N_I64;
15243	default: ;
15244	}
15245      break;
15246
15247    case NT_float:
15248      switch (size)
15249	{
15250	case 16: return N_F16;
15251	case 32: return N_F32;
15252	case 64: return N_F64;
15253	default: ;
15254	}
15255      break;
15256
15257    case NT_poly:
15258      switch (size)
15259	{
15260	case 8:  return N_P8;
15261	case 16: return N_P16;
15262	case 64: return N_P64;
15263	default: ;
15264	}
15265      break;
15266
15267    case NT_signed:
15268      switch (size)
15269	{
15270	case 8:  return N_S8;
15271	case 16: return N_S16;
15272	case 32: return N_S32;
15273	case 64: return N_S64;
15274	default: ;
15275	}
15276      break;
15277
15278    case NT_unsigned:
15279      switch (size)
15280	{
15281	case 8:  return N_U8;
15282	case 16: return N_U16;
15283	case 32: return N_U32;
15284	case 64: return N_U64;
15285	default: ;
15286	}
15287      break;
15288
15289    case NT_bfloat:
15290      if (size == 16) return N_BF16;
15291      break;
15292
15293    default: ;
15294    }
15295
15296  return N_UTYP;
15297}
15298
15299/* Convert compact Neon bitmask type representation to a type and size. Only
15300   handles the case where a single bit is set in the mask.  */
15301
15302static int
15303el_type_of_type_chk (enum neon_el_type *type, unsigned *size,
15304		     enum neon_type_mask mask)
15305{
15306  if ((mask & N_EQK) != 0)
15307    return FAIL;
15308
15309  if ((mask & (N_S8 | N_U8 | N_I8 | N_8 | N_P8)) != 0)
15310    *size = 8;
15311  else if ((mask & (N_S16 | N_U16 | N_I16 | N_16 | N_F16 | N_P16 | N_BF16))
15312	   != 0)
15313    *size = 16;
15314  else if ((mask & (N_S32 | N_U32 | N_I32 | N_32 | N_F32)) != 0)
15315    *size = 32;
15316  else if ((mask & (N_S64 | N_U64 | N_I64 | N_64 | N_F64 | N_P64)) != 0)
15317    *size = 64;
15318  else
15319    return FAIL;
15320
15321  if ((mask & (N_S8 | N_S16 | N_S32 | N_S64)) != 0)
15322    *type = NT_signed;
15323  else if ((mask & (N_U8 | N_U16 | N_U32 | N_U64)) != 0)
15324    *type = NT_unsigned;
15325  else if ((mask & (N_I8 | N_I16 | N_I32 | N_I64)) != 0)
15326    *type = NT_integer;
15327  else if ((mask & (N_8 | N_16 | N_32 | N_64)) != 0)
15328    *type = NT_untyped;
15329  else if ((mask & (N_P8 | N_P16 | N_P64)) != 0)
15330    *type = NT_poly;
15331  else if ((mask & (N_F_ALL)) != 0)
15332    *type = NT_float;
15333  else if ((mask & (N_BF16)) != 0)
15334    *type = NT_bfloat;
15335  else
15336    return FAIL;
15337
15338  return SUCCESS;
15339}
15340
15341/* Modify a bitmask of allowed types. This is only needed for type
15342   relaxation.  */
15343
15344static unsigned
15345modify_types_allowed (unsigned allowed, unsigned mods)
15346{
15347  unsigned size;
15348  enum neon_el_type type;
15349  unsigned destmask;
15350  int i;
15351
15352  destmask = 0;
15353
15354  for (i = 1; i <= N_MAX_NONSPECIAL; i <<= 1)
15355    {
15356      if (el_type_of_type_chk (&type, &size,
15357			       (enum neon_type_mask) (allowed & i)) == SUCCESS)
15358	{
15359	  neon_modify_type_size (mods, &type, &size);
15360	  destmask |= type_chk_of_el_type (type, size);
15361	}
15362    }
15363
15364  return destmask;
15365}
15366
15367/* Check type and return type classification.
15368   The manual states (paraphrase): If one datatype is given, it indicates the
15369   type given in:
15370    - the second operand, if there is one
15371    - the operand, if there is no second operand
15372    - the result, if there are no operands.
15373   This isn't quite good enough though, so we use a concept of a "key" datatype
15374   which is set on a per-instruction basis, which is the one which matters when
15375   only one data type is written.
15376   Note: this function has side-effects (e.g. filling in missing operands). All
15377   Neon instructions should call it before performing bit encoding.  */
15378
15379static struct neon_type_el
15380neon_check_type (unsigned els, enum neon_shape ns, ...)
15381{
15382  va_list ap;
15383  unsigned i, pass, key_el = 0;
15384  unsigned types[NEON_MAX_TYPE_ELS];
15385  enum neon_el_type k_type = NT_invtype;
15386  unsigned k_size = -1u;
15387  struct neon_type_el badtype = {NT_invtype, -1};
15388  unsigned key_allowed = 0;
15389
15390  /* Optional registers in Neon instructions are always (not) in operand 1.
15391     Fill in the missing operand here, if it was omitted.  */
15392  if (els > 1 && !inst.operands[1].present)
15393    inst.operands[1] = inst.operands[0];
15394
15395  /* Suck up all the varargs.  */
15396  va_start (ap, ns);
15397  for (i = 0; i < els; i++)
15398    {
15399      unsigned thisarg = va_arg (ap, unsigned);
15400      if (thisarg == N_IGNORE_TYPE)
15401	{
15402	  va_end (ap);
15403	  return badtype;
15404	}
15405      types[i] = thisarg;
15406      if ((thisarg & N_KEY) != 0)
15407	key_el = i;
15408    }
15409  va_end (ap);
15410
15411  if (inst.vectype.elems > 0)
15412    for (i = 0; i < els; i++)
15413      if (inst.operands[i].vectype.type != NT_invtype)
15414	{
15415	  first_error (_("types specified in both the mnemonic and operands"));
15416	  return badtype;
15417	}
15418
15419  /* Duplicate inst.vectype elements here as necessary.
15420     FIXME: No idea if this is exactly the same as the ARM assembler,
15421     particularly when an insn takes one register and one non-register
15422     operand. */
15423  if (inst.vectype.elems == 1 && els > 1)
15424    {
15425      unsigned j;
15426      inst.vectype.elems = els;
15427      inst.vectype.el[key_el] = inst.vectype.el[0];
15428      for (j = 0; j < els; j++)
15429	if (j != key_el)
15430	  inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15431						  types[j]);
15432    }
15433  else if (inst.vectype.elems == 0 && els > 0)
15434    {
15435      unsigned j;
15436      /* No types were given after the mnemonic, so look for types specified
15437	 after each operand. We allow some flexibility here; as long as the
15438	 "key" operand has a type, we can infer the others.  */
15439      for (j = 0; j < els; j++)
15440	if (inst.operands[j].vectype.type != NT_invtype)
15441	  inst.vectype.el[j] = inst.operands[j].vectype;
15442
15443      if (inst.operands[key_el].vectype.type != NT_invtype)
15444	{
15445	  for (j = 0; j < els; j++)
15446	    if (inst.operands[j].vectype.type == NT_invtype)
15447	      inst.vectype.el[j] = neon_type_promote (&inst.vectype.el[key_el],
15448						      types[j]);
15449	}
15450      else
15451	{
15452	  first_error (_("operand types can't be inferred"));
15453	  return badtype;
15454	}
15455    }
15456  else if (inst.vectype.elems != els)
15457    {
15458      first_error (_("type specifier has the wrong number of parts"));
15459      return badtype;
15460    }
15461
15462  for (pass = 0; pass < 2; pass++)
15463    {
15464      for (i = 0; i < els; i++)
15465	{
15466	  unsigned thisarg = types[i];
15467	  unsigned types_allowed = ((thisarg & N_EQK) != 0 && pass != 0)
15468	    ? modify_types_allowed (key_allowed, thisarg) : thisarg;
15469	  enum neon_el_type g_type = inst.vectype.el[i].type;
15470	  unsigned g_size = inst.vectype.el[i].size;
15471
15472	  /* Decay more-specific signed & unsigned types to sign-insensitive
15473	     integer types if sign-specific variants are unavailable.  */
15474	  if ((g_type == NT_signed || g_type == NT_unsigned)
15475	      && (types_allowed & N_SU_ALL) == 0)
15476	    g_type = NT_integer;
15477
15478	  /* If only untyped args are allowed, decay any more specific types to
15479	     them. Some instructions only care about signs for some element
15480	     sizes, so handle that properly.  */
15481	  if (((types_allowed & N_UNT) == 0)
15482	      && ((g_size == 8 && (types_allowed & N_8) != 0)
15483		  || (g_size == 16 && (types_allowed & N_16) != 0)
15484		  || (g_size == 32 && (types_allowed & N_32) != 0)
15485		  || (g_size == 64 && (types_allowed & N_64) != 0)))
15486	    g_type = NT_untyped;
15487
15488	  if (pass == 0)
15489	    {
15490	      if ((thisarg & N_KEY) != 0)
15491		{
15492		  k_type = g_type;
15493		  k_size = g_size;
15494		  key_allowed = thisarg & ~N_KEY;
15495
15496		  /* Check architecture constraint on FP16 extension.  */
15497		  if (k_size == 16
15498		      && k_type == NT_float
15499		      && ! ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15500		    {
15501		      inst.error = _(BAD_FP16);
15502		      return badtype;
15503		    }
15504		}
15505	    }
15506	  else
15507	    {
15508	      if ((thisarg & N_VFP) != 0)
15509		{
15510		  enum neon_shape_el regshape;
15511		  unsigned regwidth, match;
15512
15513		  /* PR 11136: Catch the case where we are passed a shape of NS_NULL.  */
15514		  if (ns == NS_NULL)
15515		    {
15516		      first_error (_("invalid instruction shape"));
15517		      return badtype;
15518		    }
15519		  regshape = neon_shape_tab[ns].el[i];
15520		  regwidth = neon_shape_el_size[regshape];
15521
15522		  /* In VFP mode, operands must match register widths. If we
15523		     have a key operand, use its width, else use the width of
15524		     the current operand.  */
15525		  if (k_size != -1u)
15526		    match = k_size;
15527		  else
15528		    match = g_size;
15529
15530		  /* FP16 will use a single precision register.  */
15531		  if (regwidth == 32 && match == 16)
15532		    {
15533		      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16))
15534			match = regwidth;
15535		      else
15536			{
15537			  inst.error = _(BAD_FP16);
15538			  return badtype;
15539			}
15540		    }
15541
15542		  if (regwidth != match)
15543		    {
15544		      first_error (_("operand size must match register width"));
15545		      return badtype;
15546		    }
15547		}
15548
15549	      if ((thisarg & N_EQK) == 0)
15550		{
15551		  unsigned given_type = type_chk_of_el_type (g_type, g_size);
15552
15553		  if ((given_type & types_allowed) == 0)
15554		    {
15555		      first_error (BAD_SIMD_TYPE);
15556		      return badtype;
15557		    }
15558		}
15559	      else
15560		{
15561		  enum neon_el_type mod_k_type = k_type;
15562		  unsigned mod_k_size = k_size;
15563		  neon_modify_type_size (thisarg, &mod_k_type, &mod_k_size);
15564		  if (g_type != mod_k_type || g_size != mod_k_size)
15565		    {
15566		      first_error (_("inconsistent types in Neon instruction"));
15567		      return badtype;
15568		    }
15569		}
15570	    }
15571	}
15572    }
15573
15574  return inst.vectype.el[key_el];
15575}
15576
15577/* Neon-style VFP instruction forwarding.  */
15578
15579/* Thumb VFP instructions have 0xE in the condition field.  */
15580
15581static void
15582do_vfp_cond_or_thumb (void)
15583{
15584  inst.is_neon = 1;
15585
15586  if (thumb_mode)
15587    inst.instruction |= 0xe0000000;
15588  else
15589    inst.instruction |= inst.cond << 28;
15590}
15591
15592/* Look up and encode a simple mnemonic, for use as a helper function for the
15593   Neon-style VFP syntax.  This avoids duplication of bits of the insns table,
15594   etc.  It is assumed that operand parsing has already been done, and that the
15595   operands are in the form expected by the given opcode (this isn't necessarily
15596   the same as the form in which they were parsed, hence some massaging must
15597   take place before this function is called).
15598   Checks current arch version against that in the looked-up opcode.  */
15599
15600static void
15601do_vfp_nsyn_opcode (const char *opname)
15602{
15603  const struct asm_opcode *opcode;
15604
15605  opcode = (const struct asm_opcode *) str_hash_find (arm_ops_hsh, opname);
15606
15607  if (!opcode)
15608    abort ();
15609
15610  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant,
15611		thumb_mode ? *opcode->tvariant : *opcode->avariant),
15612	      _(BAD_FPU));
15613
15614  inst.is_neon = 1;
15615
15616  if (thumb_mode)
15617    {
15618      inst.instruction = opcode->tvalue;
15619      opcode->tencode ();
15620    }
15621  else
15622    {
15623      inst.instruction = (inst.cond << 28) | opcode->avalue;
15624      opcode->aencode ();
15625    }
15626}
15627
15628static void
15629do_vfp_nsyn_add_sub (enum neon_shape rs)
15630{
15631  int is_add = (inst.instruction & 0x0fffffff) == N_MNEM_vadd;
15632
15633  if (rs == NS_FFF || rs == NS_HHH)
15634    {
15635      if (is_add)
15636	do_vfp_nsyn_opcode ("fadds");
15637      else
15638	do_vfp_nsyn_opcode ("fsubs");
15639
15640      /* ARMv8.2 fp16 instruction.  */
15641      if (rs == NS_HHH)
15642	do_scalar_fp16_v82_encode ();
15643    }
15644  else
15645    {
15646      if (is_add)
15647	do_vfp_nsyn_opcode ("faddd");
15648      else
15649	do_vfp_nsyn_opcode ("fsubd");
15650    }
15651}
15652
15653/* Check operand types to see if this is a VFP instruction, and if so call
15654   PFN ().  */
15655
15656static int
15657try_vfp_nsyn (int args, void (*pfn) (enum neon_shape))
15658{
15659  enum neon_shape rs;
15660  struct neon_type_el et;
15661
15662  switch (args)
15663    {
15664    case 2:
15665      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15666      et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15667      break;
15668
15669    case 3:
15670      rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15671      et = neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15672			    N_F_ALL | N_KEY | N_VFP);
15673      break;
15674
15675    default:
15676      abort ();
15677    }
15678
15679  if (et.type != NT_invtype)
15680    {
15681      pfn (rs);
15682      return SUCCESS;
15683    }
15684
15685  inst.error = NULL;
15686  return FAIL;
15687}
15688
15689static void
15690do_vfp_nsyn_mla_mls (enum neon_shape rs)
15691{
15692  int is_mla = (inst.instruction & 0x0fffffff) == N_MNEM_vmla;
15693
15694  if (rs == NS_FFF || rs == NS_HHH)
15695    {
15696      if (is_mla)
15697	do_vfp_nsyn_opcode ("fmacs");
15698      else
15699	do_vfp_nsyn_opcode ("fnmacs");
15700
15701      /* ARMv8.2 fp16 instruction.  */
15702      if (rs == NS_HHH)
15703	do_scalar_fp16_v82_encode ();
15704    }
15705  else
15706    {
15707      if (is_mla)
15708	do_vfp_nsyn_opcode ("fmacd");
15709      else
15710	do_vfp_nsyn_opcode ("fnmacd");
15711    }
15712}
15713
15714static void
15715do_vfp_nsyn_fma_fms (enum neon_shape rs)
15716{
15717  int is_fma = (inst.instruction & 0x0fffffff) == N_MNEM_vfma;
15718
15719  if (rs == NS_FFF || rs == NS_HHH)
15720    {
15721      if (is_fma)
15722	do_vfp_nsyn_opcode ("ffmas");
15723      else
15724	do_vfp_nsyn_opcode ("ffnmas");
15725
15726      /* ARMv8.2 fp16 instruction.  */
15727      if (rs == NS_HHH)
15728	do_scalar_fp16_v82_encode ();
15729    }
15730  else
15731    {
15732      if (is_fma)
15733	do_vfp_nsyn_opcode ("ffmad");
15734      else
15735	do_vfp_nsyn_opcode ("ffnmad");
15736    }
15737}
15738
15739static void
15740do_vfp_nsyn_mul (enum neon_shape rs)
15741{
15742  if (rs == NS_FFF || rs == NS_HHH)
15743    {
15744      do_vfp_nsyn_opcode ("fmuls");
15745
15746      /* ARMv8.2 fp16 instruction.  */
15747      if (rs == NS_HHH)
15748	do_scalar_fp16_v82_encode ();
15749    }
15750  else
15751    do_vfp_nsyn_opcode ("fmuld");
15752}
15753
15754static void
15755do_vfp_nsyn_abs_neg (enum neon_shape rs)
15756{
15757  int is_neg = (inst.instruction & 0x80) != 0;
15758  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_VFP | N_KEY);
15759
15760  if (rs == NS_FF || rs == NS_HH)
15761    {
15762      if (is_neg)
15763	do_vfp_nsyn_opcode ("fnegs");
15764      else
15765	do_vfp_nsyn_opcode ("fabss");
15766
15767      /* ARMv8.2 fp16 instruction.  */
15768      if (rs == NS_HH)
15769	do_scalar_fp16_v82_encode ();
15770    }
15771  else
15772    {
15773      if (is_neg)
15774	do_vfp_nsyn_opcode ("fnegd");
15775      else
15776	do_vfp_nsyn_opcode ("fabsd");
15777    }
15778}
15779
15780/* Encode single-precision (only!) VFP fldm/fstm instructions. Double precision
15781   insns belong to Neon, and are handled elsewhere.  */
15782
15783static void
15784do_vfp_nsyn_ldm_stm (int is_dbmode)
15785{
15786  int is_ldm = (inst.instruction & (1 << 20)) != 0;
15787  if (is_ldm)
15788    {
15789      if (is_dbmode)
15790	do_vfp_nsyn_opcode ("fldmdbs");
15791      else
15792	do_vfp_nsyn_opcode ("fldmias");
15793    }
15794  else
15795    {
15796      if (is_dbmode)
15797	do_vfp_nsyn_opcode ("fstmdbs");
15798      else
15799	do_vfp_nsyn_opcode ("fstmias");
15800    }
15801}
15802
15803static void
15804do_vfp_nsyn_sqrt (void)
15805{
15806  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
15807  neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
15808
15809  if (rs == NS_FF || rs == NS_HH)
15810    {
15811      do_vfp_nsyn_opcode ("fsqrts");
15812
15813      /* ARMv8.2 fp16 instruction.  */
15814      if (rs == NS_HH)
15815	do_scalar_fp16_v82_encode ();
15816    }
15817  else
15818    do_vfp_nsyn_opcode ("fsqrtd");
15819}
15820
15821static void
15822do_vfp_nsyn_div (void)
15823{
15824  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15825  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15826		   N_F_ALL | N_KEY | N_VFP);
15827
15828  if (rs == NS_FFF || rs == NS_HHH)
15829    {
15830      do_vfp_nsyn_opcode ("fdivs");
15831
15832      /* ARMv8.2 fp16 instruction.  */
15833      if (rs == NS_HHH)
15834	do_scalar_fp16_v82_encode ();
15835    }
15836  else
15837    do_vfp_nsyn_opcode ("fdivd");
15838}
15839
15840static void
15841do_vfp_nsyn_nmul (void)
15842{
15843  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_NULL);
15844  neon_check_type (3, rs, N_EQK | N_VFP, N_EQK | N_VFP,
15845		   N_F_ALL | N_KEY | N_VFP);
15846
15847  if (rs == NS_FFF || rs == NS_HHH)
15848    {
15849      NEON_ENCODE (SINGLE, inst);
15850      do_vfp_sp_dyadic ();
15851
15852      /* ARMv8.2 fp16 instruction.  */
15853      if (rs == NS_HHH)
15854	do_scalar_fp16_v82_encode ();
15855    }
15856  else
15857    {
15858      NEON_ENCODE (DOUBLE, inst);
15859      do_vfp_dp_rd_rn_rm ();
15860    }
15861  do_vfp_cond_or_thumb ();
15862
15863}
15864
15865/* Turn a size (8, 16, 32, 64) into the respective bit number minus 3
15866   (0, 1, 2, 3).  */
15867
15868static unsigned
15869neon_logbits (unsigned x)
15870{
15871  return ffs (x) - 4;
15872}
15873
15874#define LOW4(R) ((R) & 0xf)
15875#define HI1(R) (((R) >> 4) & 1)
15876#define LOW1(R) ((R) & 0x1)
15877#define HI4(R) (((R) >> 1) & 0xf)
15878
15879static unsigned
15880mve_get_vcmp_vpt_cond (struct neon_type_el et)
15881{
15882  switch (et.type)
15883    {
15884    default:
15885      first_error (BAD_EL_TYPE);
15886      return 0;
15887    case NT_float:
15888      switch (inst.operands[0].imm)
15889	{
15890	default:
15891	  first_error (_("invalid condition"));
15892	  return 0;
15893	case 0x0:
15894	  /* eq.  */
15895	  return 0;
15896	case 0x1:
15897	  /* ne.  */
15898	  return 1;
15899	case 0xa:
15900	  /* ge/  */
15901	  return 4;
15902	case 0xb:
15903	  /* lt.  */
15904	  return 5;
15905	case 0xc:
15906	  /* gt.  */
15907	  return 6;
15908	case 0xd:
15909	  /* le.  */
15910	  return 7;
15911	}
15912    case NT_integer:
15913      /* only accept eq and ne.  */
15914      if (inst.operands[0].imm > 1)
15915	{
15916	  first_error (_("invalid condition"));
15917	  return 0;
15918	}
15919      return inst.operands[0].imm;
15920    case NT_unsigned:
15921      if (inst.operands[0].imm == 0x2)
15922	return 2;
15923      else if (inst.operands[0].imm == 0x8)
15924	return 3;
15925      else
15926	{
15927	  first_error (_("invalid condition"));
15928	  return 0;
15929	}
15930    case NT_signed:
15931      switch (inst.operands[0].imm)
15932	{
15933	  default:
15934	    first_error (_("invalid condition"));
15935	    return 0;
15936	  case 0xa:
15937	    /* ge.  */
15938	    return 4;
15939	  case 0xb:
15940	    /* lt.  */
15941	    return 5;
15942	  case 0xc:
15943	    /* gt.  */
15944	    return 6;
15945	  case 0xd:
15946	    /* le.  */
15947	    return 7;
15948	}
15949    }
15950  /* Should be unreachable.  */
15951  abort ();
15952}
15953
15954/* For VCTP (create vector tail predicate) in MVE.  */
15955static void
15956do_mve_vctp (void)
15957{
15958  int dt = 0;
15959  unsigned size = 0x0;
15960
15961  if (inst.cond > COND_ALWAYS)
15962    inst.pred_insn_type = INSIDE_VPT_INSN;
15963  else
15964    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
15965
15966  /* This is a typical MVE instruction which has no type but have size 8, 16,
15967     32 and 64.  For instructions with no type, inst.vectype.el[j].type is set
15968     to NT_untyped and size is updated in inst.vectype.el[j].size.  */
15969  if ((inst.operands[0].present) && (inst.vectype.el[0].type == NT_untyped))
15970    dt = inst.vectype.el[0].size;
15971
15972  /* Setting this does not indicate an actual NEON instruction, but only
15973     indicates that the mnemonic accepts neon-style type suffixes.  */
15974  inst.is_neon = 1;
15975
15976  switch (dt)
15977    {
15978      case 8:
15979	break;
15980      case 16:
15981	size = 0x1; break;
15982      case 32:
15983	size = 0x2; break;
15984      case 64:
15985	size = 0x3; break;
15986      default:
15987	first_error (_("Type is not allowed for this instruction"));
15988    }
15989  inst.instruction |= size << 20;
15990  inst.instruction |= inst.operands[0].reg << 16;
15991}
15992
15993static void
15994do_mve_vpt (void)
15995{
15996  /* We are dealing with a vector predicated block.  */
15997  if (inst.operands[0].present)
15998    {
15999      enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16000      struct neon_type_el et
16001	= neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16002			   N_EQK);
16003
16004      unsigned fcond = mve_get_vcmp_vpt_cond (et);
16005
16006      constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16007
16008      if (et.type == NT_invtype)
16009	return;
16010
16011      if (et.type == NT_float)
16012	{
16013	  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16014		      BAD_FPU);
16015	  constraint (et.size != 16 && et.size != 32, BAD_EL_TYPE);
16016	  inst.instruction |= (et.size == 16) << 28;
16017	  inst.instruction |= 0x3 << 20;
16018	}
16019      else
16020	{
16021	  constraint (et.size != 8 && et.size != 16 && et.size != 32,
16022		      BAD_EL_TYPE);
16023	  inst.instruction |= 1 << 28;
16024	  inst.instruction |= neon_logbits (et.size) << 20;
16025	}
16026
16027      if (inst.operands[2].isquad)
16028	{
16029	  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16030	  inst.instruction |= LOW4 (inst.operands[2].reg);
16031	  inst.instruction |= (fcond & 0x2) >> 1;
16032	}
16033      else
16034	{
16035	  if (inst.operands[2].reg == REG_SP)
16036	    as_tsktsk (MVE_BAD_SP);
16037	  inst.instruction |= 1 << 6;
16038	  inst.instruction |= (fcond & 0x2) << 4;
16039	  inst.instruction |= inst.operands[2].reg;
16040	}
16041      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16042      inst.instruction |= (fcond & 0x4) << 10;
16043      inst.instruction |= (fcond & 0x1) << 7;
16044
16045    }
16046    set_pred_insn_type (VPT_INSN);
16047    now_pred.cc = 0;
16048    now_pred.mask = ((inst.instruction & 0x00400000) >> 19)
16049		    | ((inst.instruction & 0xe000) >> 13);
16050    now_pred.warn_deprecated = FALSE;
16051    now_pred.type = VECTOR_PRED;
16052    inst.is_neon = 1;
16053}
16054
16055static void
16056do_mve_vcmp (void)
16057{
16058  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
16059  if (!inst.operands[1].isreg || !inst.operands[1].isquad)
16060    first_error (_(reg_expected_msgs[REG_TYPE_MQ]));
16061  if (!inst.operands[2].present)
16062    first_error (_("MVE vector or ARM register expected"));
16063  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
16064
16065  /* Deal with 'else' conditional MVE's vcmp, it will be parsed as vcmpe.  */
16066  if ((inst.instruction & 0xffffffff) == N_MNEM_vcmpe
16067      && inst.operands[1].isquad)
16068    {
16069      inst.instruction = N_MNEM_vcmp;
16070      inst.cond = 0x10;
16071    }
16072
16073  if (inst.cond > COND_ALWAYS)
16074    inst.pred_insn_type = INSIDE_VPT_INSN;
16075  else
16076    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16077
16078  enum neon_shape rs = neon_select_shape (NS_IQQ, NS_IQR, NS_NULL);
16079  struct neon_type_el et
16080    = neon_check_type (3, rs, N_EQK, N_KEY | N_F_MVE | N_I_MVE | N_SU_32,
16081		       N_EQK);
16082
16083  constraint (rs == NS_IQR && inst.operands[2].reg == REG_PC
16084	      && !inst.operands[2].iszr, BAD_PC);
16085
16086  unsigned fcond = mve_get_vcmp_vpt_cond (et);
16087
16088  inst.instruction = 0xee010f00;
16089  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16090  inst.instruction |= (fcond & 0x4) << 10;
16091  inst.instruction |= (fcond & 0x1) << 7;
16092  if (et.type == NT_float)
16093    {
16094      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
16095		  BAD_FPU);
16096      inst.instruction |= (et.size == 16) << 28;
16097      inst.instruction |= 0x3 << 20;
16098    }
16099  else
16100    {
16101      inst.instruction |= 1 << 28;
16102      inst.instruction |= neon_logbits (et.size) << 20;
16103    }
16104  if (inst.operands[2].isquad)
16105    {
16106      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16107      inst.instruction |= (fcond & 0x2) >> 1;
16108      inst.instruction |= LOW4 (inst.operands[2].reg);
16109    }
16110  else
16111    {
16112      if (inst.operands[2].reg == REG_SP)
16113	as_tsktsk (MVE_BAD_SP);
16114      inst.instruction |= 1 << 6;
16115      inst.instruction |= (fcond & 0x2) << 4;
16116      inst.instruction |= inst.operands[2].reg;
16117    }
16118
16119  inst.is_neon = 1;
16120  return;
16121}
16122
16123static void
16124do_mve_vmaxa_vmina (void)
16125{
16126  if (inst.cond > COND_ALWAYS)
16127    inst.pred_insn_type = INSIDE_VPT_INSN;
16128  else
16129    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16130
16131  enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16132  struct neon_type_el et
16133    = neon_check_type (2, rs, N_EQK, N_KEY | N_S8 | N_S16 | N_S32);
16134
16135  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16136  inst.instruction |= neon_logbits (et.size) << 18;
16137  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16138  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16139  inst.instruction |= LOW4 (inst.operands[1].reg);
16140  inst.is_neon = 1;
16141}
16142
16143static void
16144do_mve_vfmas (void)
16145{
16146  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16147  struct neon_type_el et
16148    = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK, N_EQK);
16149
16150  if (inst.cond > COND_ALWAYS)
16151    inst.pred_insn_type = INSIDE_VPT_INSN;
16152  else
16153    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16154
16155  if (inst.operands[2].reg == REG_SP)
16156    as_tsktsk (MVE_BAD_SP);
16157  else if (inst.operands[2].reg == REG_PC)
16158    as_tsktsk (MVE_BAD_PC);
16159
16160  inst.instruction |= (et.size == 16) << 28;
16161  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16162  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16163  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16164  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16165  inst.instruction |= inst.operands[2].reg;
16166  inst.is_neon = 1;
16167}
16168
16169static void
16170do_mve_viddup (void)
16171{
16172  if (inst.cond > COND_ALWAYS)
16173    inst.pred_insn_type = INSIDE_VPT_INSN;
16174  else
16175    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16176
16177  unsigned imm = inst.relocs[0].exp.X_add_number;
16178  constraint (imm != 1 && imm != 2 && imm != 4 && imm != 8,
16179	      _("immediate must be either 1, 2, 4 or 8"));
16180
16181  enum neon_shape rs;
16182  struct neon_type_el et;
16183  unsigned Rm;
16184  if (inst.instruction == M_MNEM_vddup || inst.instruction == M_MNEM_vidup)
16185    {
16186      rs = neon_select_shape (NS_QRI, NS_NULL);
16187      et = neon_check_type (2, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK);
16188      Rm = 7;
16189    }
16190  else
16191    {
16192      constraint ((inst.operands[2].reg % 2) != 1, BAD_EVEN);
16193      if (inst.operands[2].reg == REG_SP)
16194	as_tsktsk (MVE_BAD_SP);
16195      else if (inst.operands[2].reg == REG_PC)
16196	first_error (BAD_PC);
16197
16198      rs = neon_select_shape (NS_QRRI, NS_NULL);
16199      et = neon_check_type (3, rs, N_KEY | N_U8 | N_U16 | N_U32, N_EQK, N_EQK);
16200      Rm = inst.operands[2].reg >> 1;
16201    }
16202  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16203  inst.instruction |= neon_logbits (et.size) << 20;
16204  inst.instruction |= inst.operands[1].reg << 16;
16205  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16206  inst.instruction |= (imm > 2) << 7;
16207  inst.instruction |= Rm << 1;
16208  inst.instruction |= (imm == 2 || imm == 8);
16209  inst.is_neon = 1;
16210}
16211
16212static void
16213do_mve_vmlas (void)
16214{
16215  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
16216  struct neon_type_el et
16217    = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16218
16219  if (inst.operands[2].reg == REG_PC)
16220    as_tsktsk (MVE_BAD_PC);
16221  else if (inst.operands[2].reg == REG_SP)
16222    as_tsktsk (MVE_BAD_SP);
16223
16224  if (inst.cond > COND_ALWAYS)
16225    inst.pred_insn_type = INSIDE_VPT_INSN;
16226  else
16227    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16228
16229  inst.instruction |= (et.type == NT_unsigned) << 28;
16230  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16231  inst.instruction |= neon_logbits (et.size) << 20;
16232  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16233  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16234  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16235  inst.instruction |= inst.operands[2].reg;
16236  inst.is_neon = 1;
16237}
16238
16239static void
16240do_mve_vshll (void)
16241{
16242  struct neon_type_el et
16243    = neon_check_type (2, NS_QQI, N_EQK, N_S8 | N_U8 | N_S16 | N_U16 | N_KEY);
16244
16245  if (inst.cond > COND_ALWAYS)
16246    inst.pred_insn_type = INSIDE_VPT_INSN;
16247  else
16248    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16249
16250  int imm = inst.operands[2].imm;
16251  constraint (imm < 1 || (unsigned)imm > et.size,
16252	      _("immediate value out of range"));
16253
16254  if ((unsigned)imm == et.size)
16255    {
16256      inst.instruction |= neon_logbits (et.size) << 18;
16257      inst.instruction |= 0x110001;
16258    }
16259  else
16260    {
16261      inst.instruction |= (et.size + imm) << 16;
16262      inst.instruction |= 0x800140;
16263    }
16264
16265  inst.instruction |= (et.type == NT_unsigned) << 28;
16266  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16267  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16268  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16269  inst.instruction |= LOW4 (inst.operands[1].reg);
16270  inst.is_neon = 1;
16271}
16272
16273static void
16274do_mve_vshlc (void)
16275{
16276  if (inst.cond > COND_ALWAYS)
16277    inst.pred_insn_type = INSIDE_VPT_INSN;
16278  else
16279    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16280
16281  if (inst.operands[1].reg == REG_PC)
16282    as_tsktsk (MVE_BAD_PC);
16283  else if (inst.operands[1].reg == REG_SP)
16284    as_tsktsk (MVE_BAD_SP);
16285
16286  int imm = inst.operands[2].imm;
16287  constraint (imm < 1 || imm > 32, _("immediate value out of range"));
16288
16289  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16290  inst.instruction |= (imm & 0x1f) << 16;
16291  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16292  inst.instruction |= inst.operands[1].reg;
16293  inst.is_neon = 1;
16294}
16295
16296static void
16297do_mve_vshrn (void)
16298{
16299  unsigned types;
16300  switch (inst.instruction)
16301    {
16302    case M_MNEM_vshrnt:
16303    case M_MNEM_vshrnb:
16304    case M_MNEM_vrshrnt:
16305    case M_MNEM_vrshrnb:
16306      types = N_I16 | N_I32;
16307      break;
16308    case M_MNEM_vqshrnt:
16309    case M_MNEM_vqshrnb:
16310    case M_MNEM_vqrshrnt:
16311    case M_MNEM_vqrshrnb:
16312      types = N_U16 | N_U32 | N_S16 | N_S32;
16313      break;
16314    case M_MNEM_vqshrunt:
16315    case M_MNEM_vqshrunb:
16316    case M_MNEM_vqrshrunt:
16317    case M_MNEM_vqrshrunb:
16318      types = N_S16 | N_S32;
16319      break;
16320    default:
16321      abort ();
16322    }
16323
16324  struct neon_type_el et = neon_check_type (2, NS_QQI, N_EQK, types | N_KEY);
16325
16326  if (inst.cond > COND_ALWAYS)
16327    inst.pred_insn_type = INSIDE_VPT_INSN;
16328  else
16329    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16330
16331  unsigned Qd = inst.operands[0].reg;
16332  unsigned Qm = inst.operands[1].reg;
16333  unsigned imm = inst.operands[2].imm;
16334  constraint (imm < 1 || ((unsigned) imm) > (et.size / 2),
16335	      et.size == 16
16336	      ? _("immediate operand expected in the range [1,8]")
16337	      : _("immediate operand expected in the range [1,16]"));
16338
16339  inst.instruction |= (et.type == NT_unsigned) << 28;
16340  inst.instruction |= HI1 (Qd) << 22;
16341  inst.instruction |= (et.size - imm) << 16;
16342  inst.instruction |= LOW4 (Qd) << 12;
16343  inst.instruction |= HI1 (Qm) << 5;
16344  inst.instruction |= LOW4 (Qm);
16345  inst.is_neon = 1;
16346}
16347
16348static void
16349do_mve_vqmovn (void)
16350{
16351  struct neon_type_el et;
16352  if (inst.instruction == M_MNEM_vqmovnt
16353     || inst.instruction == M_MNEM_vqmovnb)
16354    et = neon_check_type (2, NS_QQ, N_EQK,
16355			  N_U16 | N_U32 | N_S16 | N_S32 | N_KEY);
16356  else
16357    et = neon_check_type (2, NS_QQ, N_EQK, N_S16 | N_S32 | N_KEY);
16358
16359  if (inst.cond > COND_ALWAYS)
16360    inst.pred_insn_type = INSIDE_VPT_INSN;
16361  else
16362    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16363
16364  inst.instruction |= (et.type == NT_unsigned) << 28;
16365  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16366  inst.instruction |= (et.size == 32) << 18;
16367  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16368  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16369  inst.instruction |= LOW4 (inst.operands[1].reg);
16370  inst.is_neon = 1;
16371}
16372
16373static void
16374do_mve_vpsel (void)
16375{
16376  neon_select_shape (NS_QQQ, NS_NULL);
16377
16378  if (inst.cond > COND_ALWAYS)
16379    inst.pred_insn_type = INSIDE_VPT_INSN;
16380  else
16381    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16382
16383  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16384  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16385  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16386  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16387  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16388  inst.instruction |= LOW4 (inst.operands[2].reg);
16389  inst.is_neon = 1;
16390}
16391
16392static void
16393do_mve_vpnot (void)
16394{
16395  if (inst.cond > COND_ALWAYS)
16396    inst.pred_insn_type = INSIDE_VPT_INSN;
16397  else
16398    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16399}
16400
16401static void
16402do_mve_vmaxnma_vminnma (void)
16403{
16404  enum neon_shape rs = neon_select_shape (NS_QQ, NS_NULL);
16405  struct neon_type_el et
16406    = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
16407
16408  if (inst.cond > COND_ALWAYS)
16409    inst.pred_insn_type = INSIDE_VPT_INSN;
16410  else
16411    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16412
16413  inst.instruction |= (et.size == 16) << 28;
16414  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16415  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16416  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16417  inst.instruction |= LOW4 (inst.operands[1].reg);
16418  inst.is_neon = 1;
16419}
16420
16421static void
16422do_mve_vcmul (void)
16423{
16424  enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
16425  struct neon_type_el et
16426    = neon_check_type (3, rs, N_EQK, N_EQK, N_F_MVE | N_KEY);
16427
16428  if (inst.cond > COND_ALWAYS)
16429    inst.pred_insn_type = INSIDE_VPT_INSN;
16430  else
16431    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16432
16433  unsigned rot = inst.relocs[0].exp.X_add_number;
16434  constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
16435	      _("immediate out of range"));
16436
16437  if (et.size == 32 && (inst.operands[0].reg == inst.operands[1].reg
16438			|| inst.operands[0].reg == inst.operands[2].reg))
16439    as_tsktsk (BAD_MVE_SRCDEST);
16440
16441  inst.instruction |= (et.size == 32) << 28;
16442  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16443  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16444  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16445  inst.instruction |= (rot > 90) << 12;
16446  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16447  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16448  inst.instruction |= LOW4 (inst.operands[2].reg);
16449  inst.instruction |= (rot == 90 || rot == 270);
16450  inst.is_neon = 1;
16451}
16452
16453/* To handle the Low Overhead Loop instructions
16454   in Armv8.1-M Mainline and MVE.  */
16455static void
16456do_t_loloop (void)
16457{
16458  unsigned long insn = inst.instruction;
16459
16460  inst.instruction = THUMB_OP32 (inst.instruction);
16461
16462  if (insn == T_MNEM_lctp)
16463    return;
16464
16465  set_pred_insn_type (MVE_OUTSIDE_PRED_INSN);
16466
16467  if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16468    {
16469      struct neon_type_el et
16470       = neon_check_type (2, NS_RR, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
16471      inst.instruction |= neon_logbits (et.size) << 20;
16472      inst.is_neon = 1;
16473    }
16474
16475  switch (insn)
16476    {
16477    case T_MNEM_letp:
16478      constraint (!inst.operands[0].present,
16479		  _("expected LR"));
16480      /* fall through.  */
16481    case T_MNEM_le:
16482      /* le <label>.  */
16483      if (!inst.operands[0].present)
16484       inst.instruction |= 1 << 21;
16485
16486      v8_1_loop_reloc (TRUE);
16487      break;
16488
16489    case T_MNEM_wls:
16490    case T_MNEM_wlstp:
16491      v8_1_loop_reloc (FALSE);
16492      /* fall through.  */
16493    case T_MNEM_dlstp:
16494    case T_MNEM_dls:
16495      constraint (inst.operands[1].isreg != 1, BAD_ARGS);
16496
16497      if (insn == T_MNEM_wlstp || insn == T_MNEM_dlstp)
16498       constraint (inst.operands[1].reg == REG_PC, BAD_PC);
16499      else if (inst.operands[1].reg == REG_PC)
16500       as_tsktsk (MVE_BAD_PC);
16501      if (inst.operands[1].reg == REG_SP)
16502       as_tsktsk (MVE_BAD_SP);
16503
16504      inst.instruction |= (inst.operands[1].reg << 16);
16505      break;
16506
16507    default:
16508      abort ();
16509    }
16510}
16511
16512
16513static void
16514do_vfp_nsyn_cmp (void)
16515{
16516  enum neon_shape rs;
16517  if (!inst.operands[0].isreg)
16518    {
16519      do_mve_vcmp ();
16520      return;
16521    }
16522  else
16523    {
16524      constraint (inst.operands[2].present, BAD_SYNTAX);
16525      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
16526		  BAD_FPU);
16527    }
16528
16529  if (inst.operands[1].isreg)
16530    {
16531      rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_NULL);
16532      neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY | N_VFP);
16533
16534      if (rs == NS_FF || rs == NS_HH)
16535	{
16536	  NEON_ENCODE (SINGLE, inst);
16537	  do_vfp_sp_monadic ();
16538	}
16539      else
16540	{
16541	  NEON_ENCODE (DOUBLE, inst);
16542	  do_vfp_dp_rd_rm ();
16543	}
16544    }
16545  else
16546    {
16547      rs = neon_select_shape (NS_HI, NS_FI, NS_DI, NS_NULL);
16548      neon_check_type (2, rs, N_F_ALL | N_KEY | N_VFP, N_EQK);
16549
16550      switch (inst.instruction & 0x0fffffff)
16551	{
16552	case N_MNEM_vcmp:
16553	  inst.instruction += N_MNEM_vcmpz - N_MNEM_vcmp;
16554	  break;
16555	case N_MNEM_vcmpe:
16556	  inst.instruction += N_MNEM_vcmpez - N_MNEM_vcmpe;
16557	  break;
16558	default:
16559	  abort ();
16560	}
16561
16562      if (rs == NS_FI || rs == NS_HI)
16563	{
16564	  NEON_ENCODE (SINGLE, inst);
16565	  do_vfp_sp_compare_z ();
16566	}
16567      else
16568	{
16569	  NEON_ENCODE (DOUBLE, inst);
16570	  do_vfp_dp_rd ();
16571	}
16572    }
16573  do_vfp_cond_or_thumb ();
16574
16575  /* ARMv8.2 fp16 instruction.  */
16576  if (rs == NS_HI || rs == NS_HH)
16577    do_scalar_fp16_v82_encode ();
16578}
16579
16580static void
16581nsyn_insert_sp (void)
16582{
16583  inst.operands[1] = inst.operands[0];
16584  memset (&inst.operands[0], '\0', sizeof (inst.operands[0]));
16585  inst.operands[0].reg = REG_SP;
16586  inst.operands[0].isreg = 1;
16587  inst.operands[0].writeback = 1;
16588  inst.operands[0].present = 1;
16589}
16590
16591/* Fix up Neon data-processing instructions, ORing in the correct bits for
16592   ARM mode or Thumb mode and moving the encoded bit 24 to bit 28.  */
16593
16594static void
16595neon_dp_fixup (struct arm_it* insn)
16596{
16597  unsigned int i = insn->instruction;
16598  insn->is_neon = 1;
16599
16600  if (thumb_mode)
16601    {
16602      /* The U bit is at bit 24 by default. Move to bit 28 in Thumb mode.  */
16603      if (i & (1 << 24))
16604	i |= 1 << 28;
16605
16606      i &= ~(1 << 24);
16607
16608      i |= 0xef000000;
16609    }
16610  else
16611    i |= 0xf2000000;
16612
16613  insn->instruction = i;
16614}
16615
16616static void
16617mve_encode_qqr (int size, int U, int fp)
16618{
16619  if (inst.operands[2].reg == REG_SP)
16620    as_tsktsk (MVE_BAD_SP);
16621  else if (inst.operands[2].reg == REG_PC)
16622    as_tsktsk (MVE_BAD_PC);
16623
16624  if (fp)
16625    {
16626      /* vadd.  */
16627      if (((unsigned)inst.instruction) == 0xd00)
16628	inst.instruction = 0xee300f40;
16629      /* vsub.  */
16630      else if (((unsigned)inst.instruction) == 0x200d00)
16631	inst.instruction = 0xee301f40;
16632      /* vmul.  */
16633      else if (((unsigned)inst.instruction) == 0x1000d10)
16634	inst.instruction = 0xee310e60;
16635
16636      /* Setting size which is 1 for F16 and 0 for F32.  */
16637      inst.instruction |= (size == 16) << 28;
16638    }
16639  else
16640    {
16641      /* vadd.  */
16642      if (((unsigned)inst.instruction) == 0x800)
16643	inst.instruction = 0xee010f40;
16644      /* vsub.  */
16645      else if (((unsigned)inst.instruction) == 0x1000800)
16646	inst.instruction = 0xee011f40;
16647      /* vhadd.  */
16648      else if (((unsigned)inst.instruction) == 0)
16649	inst.instruction = 0xee000f40;
16650      /* vhsub.  */
16651      else if (((unsigned)inst.instruction) == 0x200)
16652	inst.instruction = 0xee001f40;
16653      /* vmla.  */
16654      else if (((unsigned)inst.instruction) == 0x900)
16655	inst.instruction = 0xee010e40;
16656      /* vmul.  */
16657      else if (((unsigned)inst.instruction) == 0x910)
16658	inst.instruction = 0xee011e60;
16659      /* vqadd.  */
16660      else if (((unsigned)inst.instruction) == 0x10)
16661	inst.instruction = 0xee000f60;
16662      /* vqsub.  */
16663      else if (((unsigned)inst.instruction) == 0x210)
16664	inst.instruction = 0xee001f60;
16665      /* vqrdmlah.  */
16666      else if (((unsigned)inst.instruction) == 0x3000b10)
16667	inst.instruction = 0xee000e40;
16668      /* vqdmulh.  */
16669      else if (((unsigned)inst.instruction) == 0x0000b00)
16670	inst.instruction = 0xee010e60;
16671      /* vqrdmulh.  */
16672      else if (((unsigned)inst.instruction) == 0x1000b00)
16673	inst.instruction = 0xfe010e60;
16674
16675      /* Set U-bit.  */
16676      inst.instruction |= U << 28;
16677
16678      /* Setting bits for size.  */
16679      inst.instruction |= neon_logbits (size) << 20;
16680    }
16681  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16682  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16683  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16684  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16685  inst.instruction |= inst.operands[2].reg;
16686  inst.is_neon = 1;
16687}
16688
16689static void
16690mve_encode_rqq (unsigned bit28, unsigned size)
16691{
16692  inst.instruction |= bit28 << 28;
16693  inst.instruction |= neon_logbits (size) << 20;
16694  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16695  inst.instruction |= inst.operands[0].reg << 12;
16696  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16697  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16698  inst.instruction |= LOW4 (inst.operands[2].reg);
16699  inst.is_neon = 1;
16700}
16701
16702static void
16703mve_encode_qqq (int ubit, int size)
16704{
16705
16706  inst.instruction |= (ubit != 0) << 28;
16707  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16708  inst.instruction |= neon_logbits (size) << 20;
16709  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16710  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16711  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16712  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16713  inst.instruction |= LOW4 (inst.operands[2].reg);
16714
16715  inst.is_neon = 1;
16716}
16717
16718static void
16719mve_encode_rq (unsigned bit28, unsigned size)
16720{
16721  inst.instruction |= bit28 << 28;
16722  inst.instruction |= neon_logbits (size) << 18;
16723  inst.instruction |= inst.operands[0].reg << 12;
16724  inst.instruction |= LOW4 (inst.operands[1].reg);
16725  inst.is_neon = 1;
16726}
16727
16728static void
16729mve_encode_rrqq (unsigned U, unsigned size)
16730{
16731  constraint (inst.operands[3].reg > 14, MVE_BAD_QREG);
16732
16733  inst.instruction |= U << 28;
16734  inst.instruction |= (inst.operands[1].reg >> 1) << 20;
16735  inst.instruction |= LOW4 (inst.operands[2].reg) << 16;
16736  inst.instruction |= (size == 32) << 16;
16737  inst.instruction |= inst.operands[0].reg << 12;
16738  inst.instruction |= HI1 (inst.operands[2].reg) << 7;
16739  inst.instruction |= inst.operands[3].reg;
16740  inst.is_neon = 1;
16741}
16742
16743/* Helper function for neon_three_same handling the operands.  */
16744static void
16745neon_three_args (int isquad)
16746{
16747  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16748  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16749  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
16750  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
16751  inst.instruction |= LOW4 (inst.operands[2].reg);
16752  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
16753  inst.instruction |= (isquad != 0) << 6;
16754  inst.is_neon = 1;
16755}
16756
16757/* Encode insns with bit pattern:
16758
16759  |28/24|23|22 |21 20|19 16|15 12|11    8|7|6|5|4|3  0|
16760  |  U  |x |D  |size | Rn  | Rd  |x x x x|N|Q|M|x| Rm |
16761
16762  SIZE is passed in bits. -1 means size field isn't changed, in case it has a
16763  different meaning for some instruction.  */
16764
16765static void
16766neon_three_same (int isquad, int ubit, int size)
16767{
16768  neon_three_args (isquad);
16769  inst.instruction |= (ubit != 0) << 24;
16770  if (size != -1)
16771    inst.instruction |= neon_logbits (size) << 20;
16772
16773  neon_dp_fixup (&inst);
16774}
16775
16776/* Encode instructions of the form:
16777
16778  |28/24|23|22|21 20|19 18|17 16|15 12|11      7|6|5|4|3  0|
16779  |  U  |x |D |x  x |size |x  x | Rd  |x x x x x|Q|M|x| Rm |
16780
16781  Don't write size if SIZE == -1.  */
16782
16783static void
16784neon_two_same (int qbit, int ubit, int size)
16785{
16786  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16787  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16788  inst.instruction |= LOW4 (inst.operands[1].reg);
16789  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16790  inst.instruction |= (qbit != 0) << 6;
16791  inst.instruction |= (ubit != 0) << 24;
16792
16793  if (size != -1)
16794    inst.instruction |= neon_logbits (size) << 18;
16795
16796  neon_dp_fixup (&inst);
16797}
16798
16799enum vfp_or_neon_is_neon_bits
16800{
16801NEON_CHECK_CC = 1,
16802NEON_CHECK_ARCH = 2,
16803NEON_CHECK_ARCH8 = 4
16804};
16805
16806/* Call this function if an instruction which may have belonged to the VFP or
16807 Neon instruction sets, but turned out to be a Neon instruction (due to the
16808 operand types involved, etc.). We have to check and/or fix-up a couple of
16809 things:
16810
16811   - Make sure the user hasn't attempted to make a Neon instruction
16812     conditional.
16813   - Alter the value in the condition code field if necessary.
16814   - Make sure that the arch supports Neon instructions.
16815
16816 Which of these operations take place depends on bits from enum
16817 vfp_or_neon_is_neon_bits.
16818
16819 WARNING: This function has side effects! If NEON_CHECK_CC is used and the
16820 current instruction's condition is COND_ALWAYS, the condition field is
16821 changed to inst.uncond_value.  This is necessary because instructions shared
16822 between VFP and Neon may be conditional for the VFP variants only, and the
16823 unconditional Neon version must have, e.g., 0xF in the condition field.  */
16824
16825static int
16826vfp_or_neon_is_neon (unsigned check)
16827{
16828/* Conditions are always legal in Thumb mode (IT blocks).  */
16829if (!thumb_mode && (check & NEON_CHECK_CC))
16830  {
16831    if (inst.cond != COND_ALWAYS)
16832      {
16833	first_error (_(BAD_COND));
16834	return FAIL;
16835      }
16836    if (inst.uncond_value != -1u)
16837      inst.instruction |= inst.uncond_value << 28;
16838  }
16839
16840
16841  if (((check & NEON_CHECK_ARCH) && !mark_feature_used (&fpu_neon_ext_v1))
16842      || ((check & NEON_CHECK_ARCH8)
16843	  && !mark_feature_used (&fpu_neon_ext_armv8)))
16844    {
16845      first_error (_(BAD_FPU));
16846      return FAIL;
16847    }
16848
16849return SUCCESS;
16850}
16851
16852
16853/* Return TRUE if the SIMD instruction is available for the current
16854   cpu_variant.  FP is set to TRUE if this is a SIMD floating-point
16855   instruction.  CHECK contains th.  CHECK contains the set of bits to pass to
16856   vfp_or_neon_is_neon for the NEON specific checks.  */
16857
16858static bfd_boolean
16859check_simd_pred_availability (int fp, unsigned check)
16860{
16861if (inst.cond > COND_ALWAYS)
16862  {
16863    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16864      {
16865	inst.error = BAD_FPU;
16866	return FALSE;
16867      }
16868    inst.pred_insn_type = INSIDE_VPT_INSN;
16869  }
16870else if (inst.cond < COND_ALWAYS)
16871  {
16872    if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16873      inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16874    else if (vfp_or_neon_is_neon (check) == FAIL)
16875      return FALSE;
16876  }
16877else
16878  {
16879    if (!ARM_CPU_HAS_FEATURE (cpu_variant, fp ? mve_fp_ext : mve_ext)
16880	&& vfp_or_neon_is_neon (check) == FAIL)
16881      return FALSE;
16882
16883    if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16884      inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
16885  }
16886return TRUE;
16887}
16888
16889/* Neon instruction encoders, in approximate order of appearance.  */
16890
16891static void
16892do_neon_dyadic_i_su (void)
16893{
16894  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16895   return;
16896
16897  enum neon_shape rs;
16898  struct neon_type_el et;
16899  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16900    rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16901  else
16902    rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16903
16904  et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_32 | N_KEY);
16905
16906
16907  if (rs != NS_QQR)
16908    neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16909  else
16910    mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16911}
16912
16913static void
16914do_neon_dyadic_i64_su (void)
16915{
16916  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
16917    return;
16918  enum neon_shape rs;
16919  struct neon_type_el et;
16920  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16921    {
16922      rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
16923      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
16924    }
16925  else
16926    {
16927      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16928      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
16929    }
16930  if (rs == NS_QQR)
16931    mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
16932  else
16933    neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
16934}
16935
16936static void
16937neon_imm_shift (int write_ubit, int uval, int isquad, struct neon_type_el et,
16938		unsigned immbits)
16939{
16940  unsigned size = et.size >> 3;
16941  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
16942  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
16943  inst.instruction |= LOW4 (inst.operands[1].reg);
16944  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
16945  inst.instruction |= (isquad != 0) << 6;
16946  inst.instruction |= immbits << 16;
16947  inst.instruction |= (size >> 3) << 7;
16948  inst.instruction |= (size & 0x7) << 19;
16949  if (write_ubit)
16950    inst.instruction |= (uval != 0) << 24;
16951
16952  neon_dp_fixup (&inst);
16953}
16954
16955static void
16956do_neon_shl (void)
16957{
16958  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
16959   return;
16960
16961  if (!inst.operands[2].isreg)
16962    {
16963      enum neon_shape rs;
16964      struct neon_type_el et;
16965      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16966	{
16967	  rs = neon_select_shape (NS_QQI, NS_NULL);
16968	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_MVE);
16969	}
16970      else
16971	{
16972	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
16973	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_I_ALL);
16974	}
16975      int imm = inst.operands[2].imm;
16976
16977      constraint (imm < 0 || (unsigned)imm >= et.size,
16978		  _("immediate out of range for shift"));
16979      NEON_ENCODE (IMMED, inst);
16980      neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
16981    }
16982  else
16983    {
16984      enum neon_shape rs;
16985      struct neon_type_el et;
16986      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
16987	{
16988	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
16989	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
16990	}
16991      else
16992	{
16993	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
16994	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
16995	}
16996
16997
16998      if (rs == NS_QQR)
16999	{
17000	  constraint (inst.operands[0].reg != inst.operands[1].reg,
17001		       _("invalid instruction shape"));
17002	  if (inst.operands[2].reg == REG_SP)
17003	    as_tsktsk (MVE_BAD_SP);
17004	  else if (inst.operands[2].reg == REG_PC)
17005	    as_tsktsk (MVE_BAD_PC);
17006
17007	  inst.instruction = 0xee311e60;
17008	  inst.instruction |= (et.type == NT_unsigned) << 28;
17009	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17010	  inst.instruction |= neon_logbits (et.size) << 18;
17011	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17012	  inst.instruction |= inst.operands[2].reg;
17013	  inst.is_neon = 1;
17014	}
17015      else
17016	{
17017	  unsigned int tmp;
17018
17019	  /* VSHL/VQSHL 3-register variants have syntax such as:
17020	       vshl.xx Dd, Dm, Dn
17021	     whereas other 3-register operations encoded by neon_three_same have
17022	     syntax like:
17023	       vadd.xx Dd, Dn, Dm
17024	     (i.e. with Dn & Dm reversed). Swap operands[1].reg and
17025	     operands[2].reg here.  */
17026	  tmp = inst.operands[2].reg;
17027	  inst.operands[2].reg = inst.operands[1].reg;
17028	  inst.operands[1].reg = tmp;
17029	  NEON_ENCODE (INTEGER, inst);
17030	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17031	}
17032    }
17033}
17034
17035static void
17036do_neon_qshl (void)
17037{
17038  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17039   return;
17040
17041  if (!inst.operands[2].isreg)
17042    {
17043      enum neon_shape rs;
17044      struct neon_type_el et;
17045      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17046	{
17047	  rs = neon_select_shape (NS_QQI, NS_NULL);
17048	  et = neon_check_type (2, rs, N_EQK, N_KEY | N_SU_MVE);
17049	}
17050      else
17051	{
17052	  rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17053	  et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
17054	}
17055      int imm = inst.operands[2].imm;
17056
17057      constraint (imm < 0 || (unsigned)imm >= et.size,
17058		  _("immediate out of range for shift"));
17059      NEON_ENCODE (IMMED, inst);
17060      neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et, imm);
17061    }
17062  else
17063    {
17064      enum neon_shape rs;
17065      struct neon_type_el et;
17066
17067      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17068	{
17069	  rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17070	  et = neon_check_type (3, rs, N_EQK, N_SU_MVE | N_KEY, N_EQK | N_EQK);
17071	}
17072      else
17073	{
17074	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17075	  et = neon_check_type (3, rs, N_EQK, N_SU_ALL | N_KEY, N_EQK | N_SGN);
17076	}
17077
17078      if (rs == NS_QQR)
17079	{
17080	  constraint (inst.operands[0].reg != inst.operands[1].reg,
17081		       _("invalid instruction shape"));
17082	  if (inst.operands[2].reg == REG_SP)
17083	    as_tsktsk (MVE_BAD_SP);
17084	  else if (inst.operands[2].reg == REG_PC)
17085	    as_tsktsk (MVE_BAD_PC);
17086
17087	  inst.instruction = 0xee311ee0;
17088	  inst.instruction |= (et.type == NT_unsigned) << 28;
17089	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17090	  inst.instruction |= neon_logbits (et.size) << 18;
17091	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17092	  inst.instruction |= inst.operands[2].reg;
17093	  inst.is_neon = 1;
17094	}
17095      else
17096	{
17097	  unsigned int tmp;
17098
17099	  /* See note in do_neon_shl.  */
17100	  tmp = inst.operands[2].reg;
17101	  inst.operands[2].reg = inst.operands[1].reg;
17102	  inst.operands[1].reg = tmp;
17103	  NEON_ENCODE (INTEGER, inst);
17104	  neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17105	}
17106    }
17107}
17108
17109static void
17110do_neon_rshl (void)
17111{
17112  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
17113   return;
17114
17115  enum neon_shape rs;
17116  struct neon_type_el et;
17117  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17118    {
17119      rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
17120      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17121    }
17122  else
17123    {
17124      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17125      et = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_ALL | N_KEY);
17126    }
17127
17128  unsigned int tmp;
17129
17130  if (rs == NS_QQR)
17131    {
17132      if (inst.operands[2].reg == REG_PC)
17133	as_tsktsk (MVE_BAD_PC);
17134      else if (inst.operands[2].reg == REG_SP)
17135	as_tsktsk (MVE_BAD_SP);
17136
17137      constraint (inst.operands[0].reg != inst.operands[1].reg,
17138		  _("invalid instruction shape"));
17139
17140      if (inst.instruction == 0x0000510)
17141	/* We are dealing with vqrshl.  */
17142	inst.instruction = 0xee331ee0;
17143      else
17144	/* We are dealing with vrshl.  */
17145	inst.instruction = 0xee331e60;
17146
17147      inst.instruction |= (et.type == NT_unsigned) << 28;
17148      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17149      inst.instruction |= neon_logbits (et.size) << 18;
17150      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17151      inst.instruction |= inst.operands[2].reg;
17152      inst.is_neon = 1;
17153    }
17154  else
17155    {
17156      tmp = inst.operands[2].reg;
17157      inst.operands[2].reg = inst.operands[1].reg;
17158      inst.operands[1].reg = tmp;
17159      neon_three_same (neon_quad (rs), et.type == NT_unsigned, et.size);
17160    }
17161}
17162
17163static int
17164neon_cmode_for_logic_imm (unsigned immediate, unsigned *immbits, int size)
17165{
17166  /* Handle .I8 pseudo-instructions.  */
17167  if (size == 8)
17168    {
17169      /* Unfortunately, this will make everything apart from zero out-of-range.
17170	 FIXME is this the intended semantics? There doesn't seem much point in
17171	 accepting .I8 if so.  */
17172      immediate |= immediate << 8;
17173      size = 16;
17174    }
17175
17176  if (size >= 32)
17177    {
17178      if (immediate == (immediate & 0x000000ff))
17179	{
17180	  *immbits = immediate;
17181	  return 0x1;
17182	}
17183      else if (immediate == (immediate & 0x0000ff00))
17184	{
17185	  *immbits = immediate >> 8;
17186	  return 0x3;
17187	}
17188      else if (immediate == (immediate & 0x00ff0000))
17189	{
17190	  *immbits = immediate >> 16;
17191	  return 0x5;
17192	}
17193      else if (immediate == (immediate & 0xff000000))
17194	{
17195	  *immbits = immediate >> 24;
17196	  return 0x7;
17197	}
17198      if ((immediate & 0xffff) != (immediate >> 16))
17199	goto bad_immediate;
17200      immediate &= 0xffff;
17201    }
17202
17203  if (immediate == (immediate & 0x000000ff))
17204    {
17205      *immbits = immediate;
17206      return 0x9;
17207    }
17208  else if (immediate == (immediate & 0x0000ff00))
17209    {
17210      *immbits = immediate >> 8;
17211      return 0xb;
17212    }
17213
17214  bad_immediate:
17215  first_error (_("immediate value out of range"));
17216  return FAIL;
17217}
17218
17219static void
17220do_neon_logic (void)
17221{
17222  if (inst.operands[2].present && inst.operands[2].isreg)
17223    {
17224      enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17225      if (rs == NS_QQQ
17226	  && !check_simd_pred_availability (FALSE,
17227					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17228	return;
17229      else if (rs != NS_QQQ
17230	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17231	first_error (BAD_FPU);
17232
17233      neon_check_type (3, rs, N_IGNORE_TYPE);
17234      /* U bit and size field were set as part of the bitmask.  */
17235      NEON_ENCODE (INTEGER, inst);
17236      neon_three_same (neon_quad (rs), 0, -1);
17237    }
17238  else
17239    {
17240      const int three_ops_form = (inst.operands[2].present
17241				  && !inst.operands[2].isreg);
17242      const int immoperand = (three_ops_form ? 2 : 1);
17243      enum neon_shape rs = (three_ops_form
17244			    ? neon_select_shape (NS_DDI, NS_QQI, NS_NULL)
17245			    : neon_select_shape (NS_DI, NS_QI, NS_NULL));
17246      /* Because neon_select_shape makes the second operand a copy of the first
17247	 if the second operand is not present.  */
17248      if (rs == NS_QQI
17249	  && !check_simd_pred_availability (FALSE,
17250					    NEON_CHECK_ARCH | NEON_CHECK_CC))
17251	return;
17252      else if (rs != NS_QQI
17253	       && !ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1))
17254	first_error (BAD_FPU);
17255
17256      struct neon_type_el et;
17257      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17258	et = neon_check_type (2, rs, N_I32 | N_I16 | N_KEY, N_EQK);
17259      else
17260	et = neon_check_type (2, rs, N_I8 | N_I16 | N_I32 | N_I64 | N_F32
17261			      | N_KEY, N_EQK);
17262
17263      if (et.type == NT_invtype)
17264	return;
17265      enum neon_opc opcode = (enum neon_opc) inst.instruction & 0x0fffffff;
17266      unsigned immbits;
17267      int cmode;
17268
17269
17270      if (three_ops_form)
17271	constraint (inst.operands[0].reg != inst.operands[1].reg,
17272		    _("first and second operands shall be the same register"));
17273
17274      NEON_ENCODE (IMMED, inst);
17275
17276      immbits = inst.operands[immoperand].imm;
17277      if (et.size == 64)
17278	{
17279	  /* .i64 is a pseudo-op, so the immediate must be a repeating
17280	     pattern.  */
17281	  if (immbits != (inst.operands[immoperand].regisimm ?
17282			  inst.operands[immoperand].reg : 0))
17283	    {
17284	      /* Set immbits to an invalid constant.  */
17285	      immbits = 0xdeadbeef;
17286	    }
17287	}
17288
17289      switch (opcode)
17290	{
17291	case N_MNEM_vbic:
17292	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17293	  break;
17294
17295	case N_MNEM_vorr:
17296	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17297	  break;
17298
17299	case N_MNEM_vand:
17300	  /* Pseudo-instruction for VBIC.  */
17301	  neon_invert_size (&immbits, 0, et.size);
17302	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17303	  break;
17304
17305	case N_MNEM_vorn:
17306	  /* Pseudo-instruction for VORR.  */
17307	  neon_invert_size (&immbits, 0, et.size);
17308	  cmode = neon_cmode_for_logic_imm (immbits, &immbits, et.size);
17309	  break;
17310
17311	default:
17312	  abort ();
17313	}
17314
17315      if (cmode == FAIL)
17316	return;
17317
17318      inst.instruction |= neon_quad (rs) << 6;
17319      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17320      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17321      inst.instruction |= cmode << 8;
17322      neon_write_immbits (immbits);
17323
17324      neon_dp_fixup (&inst);
17325    }
17326}
17327
17328static void
17329do_neon_bitfield (void)
17330{
17331  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
17332  neon_check_type (3, rs, N_IGNORE_TYPE);
17333  neon_three_same (neon_quad (rs), 0, -1);
17334}
17335
17336static void
17337neon_dyadic_misc (enum neon_el_type ubit_meaning, unsigned types,
17338		  unsigned destbits)
17339{
17340  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17341  struct neon_type_el et = neon_check_type (3, rs, N_EQK | destbits, N_EQK,
17342					    types | N_KEY);
17343  if (et.type == NT_float)
17344    {
17345      NEON_ENCODE (FLOAT, inst);
17346      if (rs == NS_QQR)
17347	mve_encode_qqr (et.size, 0, 1);
17348      else
17349	neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
17350    }
17351  else
17352    {
17353      NEON_ENCODE (INTEGER, inst);
17354      if (rs == NS_QQR)
17355	mve_encode_qqr (et.size, et.type == ubit_meaning, 0);
17356      else
17357	neon_three_same (neon_quad (rs), et.type == ubit_meaning, et.size);
17358    }
17359}
17360
17361
17362static void
17363do_neon_dyadic_if_su_d (void)
17364{
17365  /* This version only allow D registers, but that constraint is enforced during
17366     operand parsing so we don't need to do anything extra here.  */
17367  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17368}
17369
17370static void
17371do_neon_dyadic_if_i_d (void)
17372{
17373  /* The "untyped" case can't happen. Do this to stop the "U" bit being
17374     affected if we specify unsigned args.  */
17375  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17376}
17377
17378static void
17379do_mve_vstr_vldr_QI (int size, int elsize, int load)
17380{
17381  constraint (size < 32, BAD_ADDR_MODE);
17382  constraint (size != elsize, BAD_EL_TYPE);
17383  constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
17384  constraint (!inst.operands[1].preind, BAD_ADDR_MODE);
17385  constraint (load && inst.operands[0].reg == inst.operands[1].reg,
17386	      _("destination register and offset register may not be the"
17387		" same"));
17388
17389  int imm = inst.relocs[0].exp.X_add_number;
17390  int add = 1;
17391  if (imm < 0)
17392    {
17393      add = 0;
17394      imm = -imm;
17395    }
17396  constraint ((imm % (size / 8) != 0)
17397	      || imm > (0x7f << neon_logbits (size)),
17398	      (size == 32) ? _("immediate must be a multiple of 4 in the"
17399			       " range of +/-[0,508]")
17400			   : _("immediate must be a multiple of 8 in the"
17401			       " range of +/-[0,1016]"));
17402  inst.instruction |= 0x11 << 24;
17403  inst.instruction |= add << 23;
17404  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17405  inst.instruction |= inst.operands[1].writeback << 21;
17406  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17407  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17408  inst.instruction |= 1 << 12;
17409  inst.instruction |= (size == 64) << 8;
17410  inst.instruction &= 0xffffff00;
17411  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17412  inst.instruction |= imm >> neon_logbits (size);
17413}
17414
17415static void
17416do_mve_vstr_vldr_RQ (int size, int elsize, int load)
17417{
17418    unsigned os = inst.operands[1].imm >> 5;
17419    unsigned type = inst.vectype.el[0].type;
17420    constraint (os != 0 && size == 8,
17421		_("can not shift offsets when accessing less than half-word"));
17422    constraint (os && os != neon_logbits (size),
17423		_("shift immediate must be 1, 2 or 3 for half-word, word"
17424		  " or double-word accesses respectively"));
17425    if (inst.operands[1].reg == REG_PC)
17426      as_tsktsk (MVE_BAD_PC);
17427
17428    switch (size)
17429      {
17430      case 8:
17431	constraint (elsize >= 64, BAD_EL_TYPE);
17432	break;
17433      case 16:
17434	constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17435	break;
17436      case 32:
17437      case 64:
17438	constraint (elsize != size, BAD_EL_TYPE);
17439	break;
17440      default:
17441	break;
17442      }
17443    constraint (inst.operands[1].writeback || !inst.operands[1].preind,
17444		BAD_ADDR_MODE);
17445    if (load)
17446      {
17447	constraint (inst.operands[0].reg == (inst.operands[1].imm & 0x1f),
17448		    _("destination register and offset register may not be"
17449		    " the same"));
17450	constraint (size == elsize && type == NT_signed, BAD_EL_TYPE);
17451	constraint (size != elsize && type != NT_unsigned && type != NT_signed,
17452		    BAD_EL_TYPE);
17453	inst.instruction |= ((size == elsize) || (type == NT_unsigned)) << 28;
17454      }
17455    else
17456      {
17457	constraint (type != NT_untyped, BAD_EL_TYPE);
17458      }
17459
17460    inst.instruction |= 1 << 23;
17461    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17462    inst.instruction |= inst.operands[1].reg << 16;
17463    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17464    inst.instruction |= neon_logbits (elsize) << 7;
17465    inst.instruction |= HI1 (inst.operands[1].imm) << 5;
17466    inst.instruction |= LOW4 (inst.operands[1].imm);
17467    inst.instruction |= !!os;
17468}
17469
17470static void
17471do_mve_vstr_vldr_RI (int size, int elsize, int load)
17472{
17473  enum neon_el_type type = inst.vectype.el[0].type;
17474
17475  constraint (size >= 64, BAD_ADDR_MODE);
17476  switch (size)
17477    {
17478    case 16:
17479      constraint (elsize < 16 || elsize >= 64, BAD_EL_TYPE);
17480      break;
17481    case 32:
17482      constraint (elsize != size, BAD_EL_TYPE);
17483      break;
17484    default:
17485      break;
17486    }
17487  if (load)
17488    {
17489      constraint (elsize != size && type != NT_unsigned
17490		  && type != NT_signed, BAD_EL_TYPE);
17491    }
17492  else
17493    {
17494      constraint (elsize != size && type != NT_untyped, BAD_EL_TYPE);
17495    }
17496
17497  int imm = inst.relocs[0].exp.X_add_number;
17498  int add = 1;
17499  if (imm < 0)
17500    {
17501      add = 0;
17502      imm = -imm;
17503    }
17504
17505  if ((imm % (size / 8) != 0) || imm > (0x7f << neon_logbits (size)))
17506    {
17507      switch (size)
17508	{
17509	case 8:
17510	  constraint (1, _("immediate must be in the range of +/-[0,127]"));
17511	  break;
17512	case 16:
17513	  constraint (1, _("immediate must be a multiple of 2 in the"
17514			   " range of +/-[0,254]"));
17515	  break;
17516	case 32:
17517	  constraint (1, _("immediate must be a multiple of 4 in the"
17518			   " range of +/-[0,508]"));
17519	  break;
17520	}
17521    }
17522
17523  if (size != elsize)
17524    {
17525      constraint (inst.operands[1].reg > 7, BAD_HIREG);
17526      constraint (inst.operands[0].reg > 14,
17527		  _("MVE vector register in the range [Q0..Q7] expected"));
17528      inst.instruction |= (load && type == NT_unsigned) << 28;
17529      inst.instruction |= (size == 16) << 19;
17530      inst.instruction |= neon_logbits (elsize) << 7;
17531    }
17532  else
17533    {
17534      if (inst.operands[1].reg == REG_PC)
17535	as_tsktsk (MVE_BAD_PC);
17536      else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17537	as_tsktsk (MVE_BAD_SP);
17538      inst.instruction |= 1 << 12;
17539      inst.instruction |= neon_logbits (size) << 7;
17540    }
17541  inst.instruction |= inst.operands[1].preind << 24;
17542  inst.instruction |= add << 23;
17543  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17544  inst.instruction |= inst.operands[1].writeback << 21;
17545  inst.instruction |= inst.operands[1].reg << 16;
17546  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17547  inst.instruction &= 0xffffff80;
17548  inst.instruction |= imm >> neon_logbits (size);
17549
17550}
17551
17552static void
17553do_mve_vstr_vldr (void)
17554{
17555  unsigned size;
17556  int load = 0;
17557
17558  if (inst.cond > COND_ALWAYS)
17559    inst.pred_insn_type = INSIDE_VPT_INSN;
17560  else
17561    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17562
17563  switch (inst.instruction)
17564    {
17565    default:
17566      gas_assert (0);
17567      break;
17568    case M_MNEM_vldrb:
17569      load = 1;
17570      /* fall through.  */
17571    case M_MNEM_vstrb:
17572      size = 8;
17573      break;
17574    case M_MNEM_vldrh:
17575      load = 1;
17576      /* fall through.  */
17577    case M_MNEM_vstrh:
17578      size = 16;
17579      break;
17580    case M_MNEM_vldrw:
17581      load = 1;
17582      /* fall through.  */
17583    case M_MNEM_vstrw:
17584      size = 32;
17585      break;
17586    case M_MNEM_vldrd:
17587      load = 1;
17588      /* fall through.  */
17589    case M_MNEM_vstrd:
17590      size = 64;
17591      break;
17592    }
17593  unsigned elsize = inst.vectype.el[0].size;
17594
17595  if (inst.operands[1].isquad)
17596    {
17597      /* We are dealing with [Q, imm]{!} cases.  */
17598      do_mve_vstr_vldr_QI (size, elsize, load);
17599    }
17600  else
17601    {
17602      if (inst.operands[1].immisreg == 2)
17603	{
17604	  /* We are dealing with [R, Q, {UXTW #os}] cases.  */
17605	  do_mve_vstr_vldr_RQ (size, elsize, load);
17606	}
17607      else if (!inst.operands[1].immisreg)
17608	{
17609	  /* We are dealing with [R, Imm]{!}/[R], Imm cases.  */
17610	  do_mve_vstr_vldr_RI (size, elsize, load);
17611	}
17612      else
17613	constraint (1, BAD_ADDR_MODE);
17614    }
17615
17616  inst.is_neon = 1;
17617}
17618
17619static void
17620do_mve_vst_vld (void)
17621{
17622  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
17623    return;
17624
17625  constraint (!inst.operands[1].preind || inst.relocs[0].exp.X_add_symbol != 0
17626	      || inst.relocs[0].exp.X_add_number != 0
17627	      || inst.operands[1].immisreg != 0,
17628	      BAD_ADDR_MODE);
17629  constraint (inst.vectype.el[0].size > 32, BAD_EL_TYPE);
17630  if (inst.operands[1].reg == REG_PC)
17631    as_tsktsk (MVE_BAD_PC);
17632  else if (inst.operands[1].reg == REG_SP && inst.operands[1].writeback)
17633    as_tsktsk (MVE_BAD_SP);
17634
17635
17636  /* These instructions are one of the "exceptions" mentioned in
17637     handle_pred_state.  They are MVE instructions that are not VPT compatible
17638     and do not accept a VPT code, thus appending such a code is a syntax
17639     error.  */
17640  if (inst.cond > COND_ALWAYS)
17641    first_error (BAD_SYNTAX);
17642  /* If we append a scalar condition code we can set this to
17643     MVE_OUTSIDE_PRED_INSN as it will also lead to a syntax error.  */
17644  else if (inst.cond < COND_ALWAYS)
17645    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17646  else
17647    inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
17648
17649  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17650  inst.instruction |= inst.operands[1].writeback << 21;
17651  inst.instruction |= inst.operands[1].reg << 16;
17652  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17653  inst.instruction |= neon_logbits (inst.vectype.el[0].size) << 7;
17654  inst.is_neon = 1;
17655}
17656
17657static void
17658do_mve_vaddlv (void)
17659{
17660  enum neon_shape rs = neon_select_shape (NS_RRQ, NS_NULL);
17661  struct neon_type_el et
17662    = neon_check_type (3, rs, N_EQK, N_EQK, N_S32 | N_U32 | N_KEY);
17663
17664  if (et.type == NT_invtype)
17665    first_error (BAD_EL_TYPE);
17666
17667  if (inst.cond > COND_ALWAYS)
17668    inst.pred_insn_type = INSIDE_VPT_INSN;
17669  else
17670    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
17671
17672  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
17673
17674  inst.instruction |= (et.type == NT_unsigned) << 28;
17675  inst.instruction |= inst.operands[1].reg << 19;
17676  inst.instruction |= inst.operands[0].reg << 12;
17677  inst.instruction |= inst.operands[2].reg;
17678  inst.is_neon = 1;
17679}
17680
17681static void
17682do_neon_dyadic_if_su (void)
17683{
17684  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17685  struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
17686					    N_SUF_32 | N_KEY);
17687
17688  constraint ((inst.instruction == ((unsigned) N_MNEM_vmax)
17689	       || inst.instruction == ((unsigned) N_MNEM_vmin))
17690	      && et.type == NT_float
17691	      && !ARM_CPU_HAS_FEATURE (cpu_variant,fpu_neon_ext_v1), BAD_FPU);
17692
17693  if (!check_simd_pred_availability (et.type == NT_float,
17694				     NEON_CHECK_ARCH | NEON_CHECK_CC))
17695    return;
17696
17697  neon_dyadic_misc (NT_unsigned, N_SUF_32, 0);
17698}
17699
17700static void
17701do_neon_addsub_if_i (void)
17702{
17703  if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
17704      && try_vfp_nsyn (3, do_vfp_nsyn_add_sub) == SUCCESS)
17705    return;
17706
17707  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_QQR, NS_NULL);
17708  struct neon_type_el et = neon_check_type (3, rs, N_EQK,
17709					    N_EQK, N_IF_32 | N_I64 | N_KEY);
17710
17711  constraint (rs == NS_QQR && et.size == 64, BAD_FPU);
17712  /* If we are parsing Q registers and the element types match MVE, which NEON
17713     also supports, then we must check whether this is an instruction that can
17714     be used by both MVE/NEON.  This distinction can be made based on whether
17715     they are predicated or not.  */
17716  if ((rs == NS_QQQ || rs == NS_QQR) && et.size != 64)
17717    {
17718      if (!check_simd_pred_availability (et.type == NT_float,
17719					 NEON_CHECK_ARCH | NEON_CHECK_CC))
17720	return;
17721    }
17722  else
17723    {
17724      /* If they are either in a D register or are using an unsupported.  */
17725      if (rs != NS_QQR
17726	  && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
17727	return;
17728    }
17729
17730  /* The "untyped" case can't happen. Do this to stop the "U" bit being
17731     affected if we specify unsigned args.  */
17732  neon_dyadic_misc (NT_untyped, N_IF_32 | N_I64, 0);
17733}
17734
17735/* Swaps operands 1 and 2. If operand 1 (optional arg) was omitted, we want the
17736   result to be:
17737     V<op> A,B     (A is operand 0, B is operand 2)
17738   to mean:
17739     V<op> A,B,A
17740   not:
17741     V<op> A,B,B
17742   so handle that case specially.  */
17743
17744static void
17745neon_exchange_operands (void)
17746{
17747  if (inst.operands[1].present)
17748    {
17749      void *scratch = xmalloc (sizeof (inst.operands[0]));
17750
17751      /* Swap operands[1] and operands[2].  */
17752      memcpy (scratch, &inst.operands[1], sizeof (inst.operands[0]));
17753      inst.operands[1] = inst.operands[2];
17754      memcpy (&inst.operands[2], scratch, sizeof (inst.operands[0]));
17755      free (scratch);
17756    }
17757  else
17758    {
17759      inst.operands[1] = inst.operands[2];
17760      inst.operands[2] = inst.operands[0];
17761    }
17762}
17763
17764static void
17765neon_compare (unsigned regtypes, unsigned immtypes, int invert)
17766{
17767  if (inst.operands[2].isreg)
17768    {
17769      if (invert)
17770	neon_exchange_operands ();
17771      neon_dyadic_misc (NT_unsigned, regtypes, N_SIZ);
17772    }
17773  else
17774    {
17775      enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
17776      struct neon_type_el et = neon_check_type (2, rs,
17777	N_EQK | N_SIZ, immtypes | N_KEY);
17778
17779      NEON_ENCODE (IMMED, inst);
17780      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17781      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17782      inst.instruction |= LOW4 (inst.operands[1].reg);
17783      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
17784      inst.instruction |= neon_quad (rs) << 6;
17785      inst.instruction |= (et.type == NT_float) << 10;
17786      inst.instruction |= neon_logbits (et.size) << 18;
17787
17788      neon_dp_fixup (&inst);
17789    }
17790}
17791
17792static void
17793do_neon_cmp (void)
17794{
17795  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, FALSE);
17796}
17797
17798static void
17799do_neon_cmp_inv (void)
17800{
17801  neon_compare (N_SUF_32, N_S_32 | N_F_16_32, TRUE);
17802}
17803
17804static void
17805do_neon_ceq (void)
17806{
17807  neon_compare (N_IF_32, N_IF_32, FALSE);
17808}
17809
17810/* For multiply instructions, we have the possibility of 16-bit or 32-bit
17811   scalars, which are encoded in 5 bits, M : Rm.
17812   For 16-bit scalars, the register is encoded in Rm[2:0] and the index in
17813   M:Rm[3], and for 32-bit scalars, the register is encoded in Rm[3:0] and the
17814   index in M.
17815
17816   Dot Product instructions are similar to multiply instructions except elsize
17817   should always be 32.
17818
17819   This function translates SCALAR, which is GAS's internal encoding of indexed
17820   scalar register, to raw encoding.  There is also register and index range
17821   check based on ELSIZE.  */
17822
17823static unsigned
17824neon_scalar_for_mul (unsigned scalar, unsigned elsize)
17825{
17826  unsigned regno = NEON_SCALAR_REG (scalar);
17827  unsigned elno = NEON_SCALAR_INDEX (scalar);
17828
17829  switch (elsize)
17830    {
17831    case 16:
17832      if (regno > 7 || elno > 3)
17833	goto bad_scalar;
17834      return regno | (elno << 3);
17835
17836    case 32:
17837      if (regno > 15 || elno > 1)
17838	goto bad_scalar;
17839      return regno | (elno << 4);
17840
17841    default:
17842    bad_scalar:
17843      first_error (_("scalar out of range for multiply instruction"));
17844    }
17845
17846  return 0;
17847}
17848
17849/* Encode multiply / multiply-accumulate scalar instructions.  */
17850
17851static void
17852neon_mul_mac (struct neon_type_el et, int ubit)
17853{
17854  unsigned scalar;
17855
17856  /* Give a more helpful error message if we have an invalid type.  */
17857  if (et.type == NT_invtype)
17858    return;
17859
17860  scalar = neon_scalar_for_mul (inst.operands[2].reg, et.size);
17861  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17862  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17863  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17864  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
17865  inst.instruction |= LOW4 (scalar);
17866  inst.instruction |= HI1 (scalar) << 5;
17867  inst.instruction |= (et.type == NT_float) << 8;
17868  inst.instruction |= neon_logbits (et.size) << 20;
17869  inst.instruction |= (ubit != 0) << 24;
17870
17871  neon_dp_fixup (&inst);
17872}
17873
17874static void
17875do_neon_mac_maybe_scalar (void)
17876{
17877  if (try_vfp_nsyn (3, do_vfp_nsyn_mla_mls) == SUCCESS)
17878    return;
17879
17880  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17881    return;
17882
17883  if (inst.operands[2].isscalar)
17884    {
17885      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17886      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
17887      struct neon_type_el et = neon_check_type (3, rs,
17888	N_EQK, N_EQK, N_I16 | N_I32 | N_F_16_32 | N_KEY);
17889      NEON_ENCODE (SCALAR, inst);
17890      neon_mul_mac (et, neon_quad (rs));
17891    }
17892  else if (!inst.operands[2].isvec)
17893    {
17894      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17895
17896      enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
17897      neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
17898
17899      neon_dyadic_misc (NT_unsigned, N_SU_MVE, 0);
17900    }
17901  else
17902    {
17903      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17904      /* The "untyped" case can't happen.  Do this to stop the "U" bit being
17905	 affected if we specify unsigned args.  */
17906      neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17907    }
17908}
17909
17910static void
17911do_bfloat_vfma (void)
17912{
17913  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
17914  constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
17915  enum neon_shape rs;
17916  int t_bit = 0;
17917
17918  if (inst.instruction != B_MNEM_vfmab)
17919  {
17920      t_bit = 1;
17921      inst.instruction = B_MNEM_vfmat;
17922  }
17923
17924  if (inst.operands[2].isscalar)
17925    {
17926      rs = neon_select_shape (NS_QQS, NS_NULL);
17927      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17928
17929      inst.instruction |= (1 << 25);
17930      int index = inst.operands[2].reg & 0xf;
17931      constraint (!(index < 4), _("index must be in the range 0 to 3"));
17932      inst.operands[2].reg >>= 4;
17933      constraint (!(inst.operands[2].reg < 8),
17934		  _("indexed register must be less than 8"));
17935      neon_three_args (t_bit);
17936      inst.instruction |= ((index & 1) << 3);
17937      inst.instruction |= ((index & 2) << 4);
17938    }
17939  else
17940    {
17941      rs = neon_select_shape (NS_QQQ, NS_NULL);
17942      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
17943      neon_three_args (t_bit);
17944    }
17945
17946}
17947
17948static void
17949do_neon_fmac (void)
17950{
17951  if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_fma)
17952      && try_vfp_nsyn (3, do_vfp_nsyn_fma_fms) == SUCCESS)
17953    return;
17954
17955  if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH))
17956    return;
17957
17958  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
17959    {
17960      enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
17961      struct neon_type_el et = neon_check_type (3, rs, N_F_MVE | N_KEY, N_EQK,
17962						N_EQK);
17963
17964      if (rs == NS_QQR)
17965	{
17966
17967	  if (inst.operands[2].reg == REG_SP)
17968	    as_tsktsk (MVE_BAD_SP);
17969	  else if (inst.operands[2].reg == REG_PC)
17970	    as_tsktsk (MVE_BAD_PC);
17971
17972	  inst.instruction = 0xee310e40;
17973	  inst.instruction |= (et.size == 16) << 28;
17974	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
17975	  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
17976	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
17977	  inst.instruction |= HI1 (inst.operands[1].reg) << 6;
17978	  inst.instruction |= inst.operands[2].reg;
17979	  inst.is_neon = 1;
17980	  return;
17981	}
17982    }
17983  else
17984    {
17985      constraint (!inst.operands[2].isvec, BAD_FPU);
17986    }
17987
17988  neon_dyadic_misc (NT_untyped, N_IF_32, 0);
17989}
17990
17991static void
17992do_mve_vfma (void)
17993{
17994  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_bf16) &&
17995      inst.cond == COND_ALWAYS)
17996    {
17997      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
17998      inst.instruction = N_MNEM_vfma;
17999      inst.pred_insn_type = INSIDE_VPT_INSN;
18000      inst.cond = 0xf;
18001      return do_neon_fmac();
18002    }
18003  else
18004    {
18005      do_bfloat_vfma();
18006    }
18007}
18008
18009static void
18010do_neon_tst (void)
18011{
18012  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18013  struct neon_type_el et = neon_check_type (3, rs,
18014    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18015  neon_three_same (neon_quad (rs), 0, et.size);
18016}
18017
18018/* VMUL with 3 registers allows the P8 type. The scalar version supports the
18019   same types as the MAC equivalents. The polynomial type for this instruction
18020   is encoded the same as the integer type.  */
18021
18022static void
18023do_neon_mul (void)
18024{
18025  if (try_vfp_nsyn (3, do_vfp_nsyn_mul) == SUCCESS)
18026    return;
18027
18028  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
18029    return;
18030
18031  if (inst.operands[2].isscalar)
18032    {
18033      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18034      do_neon_mac_maybe_scalar ();
18035    }
18036  else
18037    {
18038      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18039	{
18040	  enum neon_shape rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18041	  struct neon_type_el et
18042	    = neon_check_type (3, rs, N_EQK, N_EQK, N_I_MVE | N_F_MVE | N_KEY);
18043	  if (et.type == NT_float)
18044	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext),
18045			BAD_FPU);
18046
18047	  neon_dyadic_misc (NT_float, N_I_MVE | N_F_MVE, 0);
18048	}
18049      else
18050	{
18051	  constraint (!inst.operands[2].isvec, BAD_FPU);
18052	  neon_dyadic_misc (NT_poly,
18053			    N_I8 | N_I16 | N_I32 | N_F16 | N_F32 | N_P8, 0);
18054	}
18055    }
18056}
18057
18058static void
18059do_neon_qdmulh (void)
18060{
18061  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18062   return;
18063
18064  if (inst.operands[2].isscalar)
18065    {
18066      constraint (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
18067      enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18068      struct neon_type_el et = neon_check_type (3, rs,
18069	N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18070      NEON_ENCODE (SCALAR, inst);
18071      neon_mul_mac (et, neon_quad (rs));
18072    }
18073  else
18074    {
18075      enum neon_shape rs;
18076      struct neon_type_el et;
18077      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18078	{
18079	  rs = neon_select_shape (NS_QQR, NS_QQQ, NS_NULL);
18080	  et = neon_check_type (3, rs,
18081	    N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18082	}
18083      else
18084	{
18085	  rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18086	  et = neon_check_type (3, rs,
18087	    N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18088	}
18089
18090      NEON_ENCODE (INTEGER, inst);
18091      if (rs == NS_QQR)
18092	mve_encode_qqr (et.size, 0, 0);
18093      else
18094	/* The U bit (rounding) comes from bit mask.  */
18095	neon_three_same (neon_quad (rs), 0, et.size);
18096    }
18097}
18098
18099static void
18100do_mve_vaddv (void)
18101{
18102  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18103  struct neon_type_el et
18104    = neon_check_type (2, rs, N_EQK,  N_SU_32 | N_KEY);
18105
18106  if (et.type == NT_invtype)
18107    first_error (BAD_EL_TYPE);
18108
18109  if (inst.cond > COND_ALWAYS)
18110    inst.pred_insn_type = INSIDE_VPT_INSN;
18111  else
18112    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18113
18114  constraint (inst.operands[1].reg > 14, MVE_BAD_QREG);
18115
18116  mve_encode_rq (et.type == NT_unsigned, et.size);
18117}
18118
18119static void
18120do_mve_vhcadd (void)
18121{
18122  enum neon_shape rs = neon_select_shape (NS_QQQI, NS_NULL);
18123  struct neon_type_el et
18124    = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18125
18126  if (inst.cond > COND_ALWAYS)
18127    inst.pred_insn_type = INSIDE_VPT_INSN;
18128  else
18129    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18130
18131  unsigned rot = inst.relocs[0].exp.X_add_number;
18132  constraint (rot != 90 && rot != 270, _("immediate out of range"));
18133
18134  if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
18135    as_tsktsk (_("Warning: 32-bit element size and same first and third "
18136		 "operand makes instruction UNPREDICTABLE"));
18137
18138  mve_encode_qqq (0, et.size);
18139  inst.instruction |= (rot == 270) << 12;
18140  inst.is_neon = 1;
18141}
18142
18143static void
18144do_mve_vqdmull (void)
18145{
18146  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_QQR, NS_NULL);
18147  struct neon_type_el et
18148    = neon_check_type (3, rs, N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18149
18150  if (et.size == 32
18151      && (inst.operands[0].reg == inst.operands[1].reg
18152	  || (rs == NS_QQQ && inst.operands[0].reg == inst.operands[2].reg)))
18153    as_tsktsk (BAD_MVE_SRCDEST);
18154
18155  if (inst.cond > COND_ALWAYS)
18156    inst.pred_insn_type = INSIDE_VPT_INSN;
18157  else
18158    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18159
18160  if (rs == NS_QQQ)
18161    {
18162      mve_encode_qqq (et.size == 32, 64);
18163      inst.instruction |= 1;
18164    }
18165  else
18166    {
18167      mve_encode_qqr (64, et.size == 32, 0);
18168      inst.instruction |= 0x3 << 5;
18169    }
18170}
18171
18172static void
18173do_mve_vadc (void)
18174{
18175  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18176  struct neon_type_el et
18177    = neon_check_type (3, rs, N_KEY | N_I32, N_EQK, N_EQK);
18178
18179  if (et.type == NT_invtype)
18180    first_error (BAD_EL_TYPE);
18181
18182  if (inst.cond > COND_ALWAYS)
18183    inst.pred_insn_type = INSIDE_VPT_INSN;
18184  else
18185    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18186
18187  mve_encode_qqq (0, 64);
18188}
18189
18190static void
18191do_mve_vbrsr (void)
18192{
18193  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18194  struct neon_type_el et
18195    = neon_check_type (3, rs, N_EQK, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18196
18197  if (inst.cond > COND_ALWAYS)
18198    inst.pred_insn_type = INSIDE_VPT_INSN;
18199  else
18200    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18201
18202  mve_encode_qqr (et.size, 0, 0);
18203}
18204
18205static void
18206do_mve_vsbc (void)
18207{
18208  neon_check_type (3, NS_QQQ, N_EQK, N_EQK, N_I32 | N_KEY);
18209
18210  if (inst.cond > COND_ALWAYS)
18211    inst.pred_insn_type = INSIDE_VPT_INSN;
18212  else
18213    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18214
18215  mve_encode_qqq (1, 64);
18216}
18217
18218static void
18219do_mve_vmulh (void)
18220{
18221  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18222  struct neon_type_el et
18223    = neon_check_type (3, rs, N_EQK, N_EQK, N_SU_MVE | N_KEY);
18224
18225  if (inst.cond > COND_ALWAYS)
18226    inst.pred_insn_type = INSIDE_VPT_INSN;
18227  else
18228    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18229
18230  mve_encode_qqq (et.type == NT_unsigned, et.size);
18231}
18232
18233static void
18234do_mve_vqdmlah (void)
18235{
18236  enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18237  struct neon_type_el et
18238    = neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18239
18240  if (inst.cond > COND_ALWAYS)
18241    inst.pred_insn_type = INSIDE_VPT_INSN;
18242  else
18243    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18244
18245  mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18246}
18247
18248static void
18249do_mve_vqdmladh (void)
18250{
18251  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
18252  struct neon_type_el et
18253    = neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18254
18255  if (inst.cond > COND_ALWAYS)
18256    inst.pred_insn_type = INSIDE_VPT_INSN;
18257  else
18258    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18259
18260  mve_encode_qqq (0, et.size);
18261}
18262
18263
18264static void
18265do_mve_vmull (void)
18266{
18267
18268  enum neon_shape rs = neon_select_shape (NS_HHH, NS_FFF, NS_DDD, NS_DDS,
18269					  NS_QQS, NS_QQQ, NS_QQR, NS_NULL);
18270  if (inst.cond == COND_ALWAYS
18271      && ((unsigned)inst.instruction) == M_MNEM_vmullt)
18272    {
18273
18274      if (rs == NS_QQQ)
18275	{
18276	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18277	    goto neon_vmul;
18278	}
18279      else
18280	goto neon_vmul;
18281    }
18282
18283  constraint (rs != NS_QQQ, BAD_FPU);
18284  struct neon_type_el et = neon_check_type (3, rs, N_EQK , N_EQK,
18285					    N_SU_32 | N_P8 | N_P16 | N_KEY);
18286
18287  /* We are dealing with MVE's vmullt.  */
18288  if (et.size == 32
18289      && (inst.operands[0].reg == inst.operands[1].reg
18290	  || inst.operands[0].reg == inst.operands[2].reg))
18291    as_tsktsk (BAD_MVE_SRCDEST);
18292
18293  if (inst.cond > COND_ALWAYS)
18294    inst.pred_insn_type = INSIDE_VPT_INSN;
18295  else
18296    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18297
18298  if (et.type == NT_poly)
18299    mve_encode_qqq (neon_logbits (et.size), 64);
18300  else
18301    mve_encode_qqq (et.type == NT_unsigned, et.size);
18302
18303  return;
18304
18305 neon_vmul:
18306  inst.instruction = N_MNEM_vmul;
18307  inst.cond = 0xb;
18308  if (thumb_mode)
18309    inst.pred_insn_type = INSIDE_IT_INSN;
18310  do_neon_mul ();
18311}
18312
18313static void
18314do_mve_vabav (void)
18315{
18316  enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18317
18318  if (rs == NS_NULL)
18319    return;
18320
18321  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18322    return;
18323
18324  struct neon_type_el et = neon_check_type (2, NS_NULL, N_EQK, N_KEY | N_S8
18325					    | N_S16 | N_S32 | N_U8 | N_U16
18326					    | N_U32);
18327
18328  if (inst.cond > COND_ALWAYS)
18329    inst.pred_insn_type = INSIDE_VPT_INSN;
18330  else
18331    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18332
18333  mve_encode_rqq (et.type == NT_unsigned, et.size);
18334}
18335
18336static void
18337do_mve_vmladav (void)
18338{
18339  enum neon_shape rs = neon_select_shape (NS_RQQ, NS_NULL);
18340  struct neon_type_el et = neon_check_type (3, rs,
18341					    N_EQK, N_EQK, N_SU_MVE | N_KEY);
18342
18343  if (et.type == NT_unsigned
18344      && (inst.instruction == M_MNEM_vmladavx
18345	  || inst.instruction == M_MNEM_vmladavax
18346	  || inst.instruction == M_MNEM_vmlsdav
18347	  || inst.instruction == M_MNEM_vmlsdava
18348	  || inst.instruction == M_MNEM_vmlsdavx
18349	  || inst.instruction == M_MNEM_vmlsdavax))
18350    first_error (BAD_SIMD_TYPE);
18351
18352  constraint (inst.operands[2].reg > 14,
18353	      _("MVE vector register in the range [Q0..Q7] expected"));
18354
18355  if (inst.cond > COND_ALWAYS)
18356    inst.pred_insn_type = INSIDE_VPT_INSN;
18357  else
18358    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18359
18360  if (inst.instruction == M_MNEM_vmlsdav
18361      || inst.instruction == M_MNEM_vmlsdava
18362      || inst.instruction == M_MNEM_vmlsdavx
18363      || inst.instruction == M_MNEM_vmlsdavax)
18364    inst.instruction |= (et.size == 8) << 28;
18365  else
18366    inst.instruction |= (et.size == 8) << 8;
18367
18368  mve_encode_rqq (et.type == NT_unsigned, 64);
18369  inst.instruction |= (et.size == 32) << 16;
18370}
18371
18372static void
18373do_mve_vmlaldav (void)
18374{
18375  enum neon_shape rs = neon_select_shape (NS_RRQQ, NS_NULL);
18376  struct neon_type_el et
18377    = neon_check_type (4, rs, N_EQK, N_EQK, N_EQK,
18378		       N_S16 | N_S32 | N_U16 | N_U32 | N_KEY);
18379
18380  if (et.type == NT_unsigned
18381      && (inst.instruction == M_MNEM_vmlsldav
18382	  || inst.instruction == M_MNEM_vmlsldava
18383	  || inst.instruction == M_MNEM_vmlsldavx
18384	  || inst.instruction == M_MNEM_vmlsldavax))
18385    first_error (BAD_SIMD_TYPE);
18386
18387  if (inst.cond > COND_ALWAYS)
18388    inst.pred_insn_type = INSIDE_VPT_INSN;
18389  else
18390    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18391
18392  mve_encode_rrqq (et.type == NT_unsigned, et.size);
18393}
18394
18395static void
18396do_mve_vrmlaldavh (void)
18397{
18398  struct neon_type_el et;
18399  if (inst.instruction == M_MNEM_vrmlsldavh
18400     || inst.instruction == M_MNEM_vrmlsldavha
18401     || inst.instruction == M_MNEM_vrmlsldavhx
18402     || inst.instruction == M_MNEM_vrmlsldavhax)
18403    {
18404      et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18405      if (inst.operands[1].reg == REG_SP)
18406	as_tsktsk (MVE_BAD_SP);
18407    }
18408  else
18409    {
18410      if (inst.instruction == M_MNEM_vrmlaldavhx
18411	  || inst.instruction == M_MNEM_vrmlaldavhax)
18412	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK, N_S32 | N_KEY);
18413      else
18414	et = neon_check_type (4, NS_RRQQ, N_EQK, N_EQK, N_EQK,
18415			      N_U32 | N_S32 | N_KEY);
18416      /* vrmlaldavh's encoding with SP as the second, odd, GPR operand may alias
18417	 with vmax/min instructions, making the use of SP in assembly really
18418	 nonsensical, so instead of issuing a warning like we do for other uses
18419	 of SP for the odd register operand we error out.  */
18420      constraint (inst.operands[1].reg == REG_SP, BAD_SP);
18421    }
18422
18423  /* Make sure we still check the second operand is an odd one and that PC is
18424     disallowed.  This because we are parsing for any GPR operand, to be able
18425     to distinguish between giving a warning or an error for SP as described
18426     above.  */
18427  constraint ((inst.operands[1].reg % 2) != 1, BAD_EVEN);
18428  constraint (inst.operands[1].reg == REG_PC, BAD_PC);
18429
18430  if (inst.cond > COND_ALWAYS)
18431    inst.pred_insn_type = INSIDE_VPT_INSN;
18432  else
18433    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18434
18435  mve_encode_rrqq (et.type == NT_unsigned, 0);
18436}
18437
18438
18439static void
18440do_mve_vmaxnmv (void)
18441{
18442  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18443  struct neon_type_el et
18444    = neon_check_type (2, rs, N_EQK, N_F_MVE | N_KEY);
18445
18446  if (inst.cond > COND_ALWAYS)
18447    inst.pred_insn_type = INSIDE_VPT_INSN;
18448  else
18449    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18450
18451  if (inst.operands[0].reg == REG_SP)
18452    as_tsktsk (MVE_BAD_SP);
18453  else if (inst.operands[0].reg == REG_PC)
18454    as_tsktsk (MVE_BAD_PC);
18455
18456  mve_encode_rq (et.size == 16, 64);
18457}
18458
18459static void
18460do_mve_vmaxv (void)
18461{
18462  enum neon_shape rs = neon_select_shape (NS_RQ, NS_NULL);
18463  struct neon_type_el et;
18464
18465  if (inst.instruction == M_MNEM_vmaxv || inst.instruction == M_MNEM_vminv)
18466    et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
18467  else
18468    et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18469
18470  if (inst.cond > COND_ALWAYS)
18471    inst.pred_insn_type = INSIDE_VPT_INSN;
18472  else
18473    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
18474
18475  if (inst.operands[0].reg == REG_SP)
18476    as_tsktsk (MVE_BAD_SP);
18477  else if (inst.operands[0].reg == REG_PC)
18478    as_tsktsk (MVE_BAD_PC);
18479
18480  mve_encode_rq (et.type == NT_unsigned, et.size);
18481}
18482
18483
18484static void
18485do_neon_qrdmlah (void)
18486{
18487  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18488   return;
18489  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18490    {
18491      /* Check we're on the correct architecture.  */
18492      if (!mark_feature_used (&fpu_neon_ext_armv8))
18493	inst.error
18494	  = _("instruction form not available on this architecture.");
18495      else if (!mark_feature_used (&fpu_neon_ext_v8_1))
18496	{
18497	  as_warn (_("this instruction implies use of ARMv8.1 AdvSIMD."));
18498	  record_feature_use (&fpu_neon_ext_v8_1);
18499	}
18500	if (inst.operands[2].isscalar)
18501	  {
18502	    enum neon_shape rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
18503	    struct neon_type_el et = neon_check_type (3, rs,
18504	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18505	    NEON_ENCODE (SCALAR, inst);
18506	    neon_mul_mac (et, neon_quad (rs));
18507	  }
18508	else
18509	  {
18510	    enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18511	    struct neon_type_el et = neon_check_type (3, rs,
18512	      N_EQK, N_EQK, N_S16 | N_S32 | N_KEY);
18513	    NEON_ENCODE (INTEGER, inst);
18514	    /* The U bit (rounding) comes from bit mask.  */
18515	    neon_three_same (neon_quad (rs), 0, et.size);
18516	  }
18517    }
18518  else
18519    {
18520      enum neon_shape rs = neon_select_shape (NS_QQR, NS_NULL);
18521      struct neon_type_el et
18522	= neon_check_type (3, rs, N_EQK, N_EQK, N_S_32 | N_KEY);
18523
18524      NEON_ENCODE (INTEGER, inst);
18525      mve_encode_qqr (et.size, et.type == NT_unsigned, 0);
18526    }
18527}
18528
18529static void
18530do_neon_fcmp_absolute (void)
18531{
18532  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18533  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18534					    N_F_16_32 | N_KEY);
18535  /* Size field comes from bit mask.  */
18536  neon_three_same (neon_quad (rs), 1, et.size == 16 ? (int) et.size : -1);
18537}
18538
18539static void
18540do_neon_fcmp_absolute_inv (void)
18541{
18542  neon_exchange_operands ();
18543  do_neon_fcmp_absolute ();
18544}
18545
18546static void
18547do_neon_step (void)
18548{
18549  enum neon_shape rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
18550  struct neon_type_el et = neon_check_type (3, rs, N_EQK, N_EQK,
18551					    N_F_16_32 | N_KEY);
18552  neon_three_same (neon_quad (rs), 0, et.size == 16 ? (int) et.size : -1);
18553}
18554
18555static void
18556do_neon_abs_neg (void)
18557{
18558  enum neon_shape rs;
18559  struct neon_type_el et;
18560
18561  if (try_vfp_nsyn (2, do_vfp_nsyn_abs_neg) == SUCCESS)
18562    return;
18563
18564  rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
18565  et = neon_check_type (2, rs, N_EQK, N_S_32 | N_F_16_32 | N_KEY);
18566
18567  if (!check_simd_pred_availability (et.type == NT_float,
18568				     NEON_CHECK_ARCH | NEON_CHECK_CC))
18569    return;
18570
18571  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18572  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18573  inst.instruction |= LOW4 (inst.operands[1].reg);
18574  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18575  inst.instruction |= neon_quad (rs) << 6;
18576  inst.instruction |= (et.type == NT_float) << 10;
18577  inst.instruction |= neon_logbits (et.size) << 18;
18578
18579  neon_dp_fixup (&inst);
18580}
18581
18582static void
18583do_neon_sli (void)
18584{
18585  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18586    return;
18587
18588  enum neon_shape rs;
18589  struct neon_type_el et;
18590  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18591    {
18592      rs = neon_select_shape (NS_QQI, NS_NULL);
18593      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18594    }
18595  else
18596    {
18597      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18598      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18599    }
18600
18601
18602  int imm = inst.operands[2].imm;
18603  constraint (imm < 0 || (unsigned)imm >= et.size,
18604	      _("immediate out of range for insert"));
18605  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18606}
18607
18608static void
18609do_neon_sri (void)
18610{
18611  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18612    return;
18613
18614  enum neon_shape rs;
18615  struct neon_type_el et;
18616  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18617    {
18618      rs = neon_select_shape (NS_QQI, NS_NULL);
18619      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_KEY);
18620    }
18621  else
18622    {
18623      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18624      et = neon_check_type (2, rs, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
18625    }
18626
18627  int imm = inst.operands[2].imm;
18628  constraint (imm < 1 || (unsigned)imm > et.size,
18629	      _("immediate out of range for insert"));
18630  neon_imm_shift (FALSE, 0, neon_quad (rs), et, et.size - imm);
18631}
18632
18633static void
18634do_neon_qshlu_imm (void)
18635{
18636  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
18637    return;
18638
18639  enum neon_shape rs;
18640  struct neon_type_el et;
18641  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
18642    {
18643      rs = neon_select_shape (NS_QQI, NS_NULL);
18644      et = neon_check_type (2, rs, N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
18645    }
18646  else
18647    {
18648      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
18649      et = neon_check_type (2, rs, N_EQK | N_UNS,
18650			    N_S8 | N_S16 | N_S32 | N_S64 | N_KEY);
18651    }
18652
18653  int imm = inst.operands[2].imm;
18654  constraint (imm < 0 || (unsigned)imm >= et.size,
18655	      _("immediate out of range for shift"));
18656  /* Only encodes the 'U present' variant of the instruction.
18657     In this case, signed types have OP (bit 8) set to 0.
18658     Unsigned types have OP set to 1.  */
18659  inst.instruction |= (et.type == NT_unsigned) << 8;
18660  /* The rest of the bits are the same as other immediate shifts.  */
18661  neon_imm_shift (FALSE, 0, neon_quad (rs), et, imm);
18662}
18663
18664static void
18665do_neon_qmovn (void)
18666{
18667  struct neon_type_el et = neon_check_type (2, NS_DQ,
18668    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18669  /* Saturating move where operands can be signed or unsigned, and the
18670     destination has the same signedness.  */
18671  NEON_ENCODE (INTEGER, inst);
18672  if (et.type == NT_unsigned)
18673    inst.instruction |= 0xc0;
18674  else
18675    inst.instruction |= 0x80;
18676  neon_two_same (0, 1, et.size / 2);
18677}
18678
18679static void
18680do_neon_qmovun (void)
18681{
18682  struct neon_type_el et = neon_check_type (2, NS_DQ,
18683    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18684  /* Saturating move with unsigned results. Operands must be signed.  */
18685  NEON_ENCODE (INTEGER, inst);
18686  neon_two_same (0, 1, et.size / 2);
18687}
18688
18689static void
18690do_neon_rshift_sat_narrow (void)
18691{
18692  /* FIXME: Types for narrowing. If operands are signed, results can be signed
18693     or unsigned. If operands are unsigned, results must also be unsigned.  */
18694  struct neon_type_el et = neon_check_type (2, NS_DQI,
18695    N_EQK | N_HLF, N_SU_16_64 | N_KEY);
18696  int imm = inst.operands[2].imm;
18697  /* This gets the bounds check, size encoding and immediate bits calculation
18698     right.  */
18699  et.size /= 2;
18700
18701  /* VQ{R}SHRN.I<size> <Dd>, <Qm>, #0 is a synonym for
18702     VQMOVN.I<size> <Dd>, <Qm>.  */
18703  if (imm == 0)
18704    {
18705      inst.operands[2].present = 0;
18706      inst.instruction = N_MNEM_vqmovn;
18707      do_neon_qmovn ();
18708      return;
18709    }
18710
18711  constraint (imm < 1 || (unsigned)imm > et.size,
18712	      _("immediate out of range"));
18713  neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, et.size - imm);
18714}
18715
18716static void
18717do_neon_rshift_sat_narrow_u (void)
18718{
18719  /* FIXME: Types for narrowing. If operands are signed, results can be signed
18720     or unsigned. If operands are unsigned, results must also be unsigned.  */
18721  struct neon_type_el et = neon_check_type (2, NS_DQI,
18722    N_EQK | N_HLF | N_UNS, N_S16 | N_S32 | N_S64 | N_KEY);
18723  int imm = inst.operands[2].imm;
18724  /* This gets the bounds check, size encoding and immediate bits calculation
18725     right.  */
18726  et.size /= 2;
18727
18728  /* VQSHRUN.I<size> <Dd>, <Qm>, #0 is a synonym for
18729     VQMOVUN.I<size> <Dd>, <Qm>.  */
18730  if (imm == 0)
18731    {
18732      inst.operands[2].present = 0;
18733      inst.instruction = N_MNEM_vqmovun;
18734      do_neon_qmovun ();
18735      return;
18736    }
18737
18738  constraint (imm < 1 || (unsigned)imm > et.size,
18739	      _("immediate out of range"));
18740  /* FIXME: The manual is kind of unclear about what value U should have in
18741     VQ{R}SHRUN instructions, but U=0, op=0 definitely encodes VRSHR, so it
18742     must be 1.  */
18743  neon_imm_shift (TRUE, 1, 0, et, et.size - imm);
18744}
18745
18746static void
18747do_neon_movn (void)
18748{
18749  struct neon_type_el et = neon_check_type (2, NS_DQ,
18750    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18751  NEON_ENCODE (INTEGER, inst);
18752  neon_two_same (0, 1, et.size / 2);
18753}
18754
18755static void
18756do_neon_rshift_narrow (void)
18757{
18758  struct neon_type_el et = neon_check_type (2, NS_DQI,
18759    N_EQK | N_HLF, N_I16 | N_I32 | N_I64 | N_KEY);
18760  int imm = inst.operands[2].imm;
18761  /* This gets the bounds check, size encoding and immediate bits calculation
18762     right.  */
18763  et.size /= 2;
18764
18765  /* If immediate is zero then we are a pseudo-instruction for
18766     VMOVN.I<size> <Dd>, <Qm>  */
18767  if (imm == 0)
18768    {
18769      inst.operands[2].present = 0;
18770      inst.instruction = N_MNEM_vmovn;
18771      do_neon_movn ();
18772      return;
18773    }
18774
18775  constraint (imm < 1 || (unsigned)imm > et.size,
18776	      _("immediate out of range for narrowing operation"));
18777  neon_imm_shift (FALSE, 0, 0, et, et.size - imm);
18778}
18779
18780static void
18781do_neon_shll (void)
18782{
18783  /* FIXME: Type checking when lengthening.  */
18784  struct neon_type_el et = neon_check_type (2, NS_QDI,
18785    N_EQK | N_DBL, N_I8 | N_I16 | N_I32 | N_KEY);
18786  unsigned imm = inst.operands[2].imm;
18787
18788  if (imm == et.size)
18789    {
18790      /* Maximum shift variant.  */
18791      NEON_ENCODE (INTEGER, inst);
18792      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
18793      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
18794      inst.instruction |= LOW4 (inst.operands[1].reg);
18795      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
18796      inst.instruction |= neon_logbits (et.size) << 18;
18797
18798      neon_dp_fixup (&inst);
18799    }
18800  else
18801    {
18802      /* A more-specific type check for non-max versions.  */
18803      et = neon_check_type (2, NS_QDI,
18804	N_EQK | N_DBL, N_SU_32 | N_KEY);
18805      NEON_ENCODE (IMMED, inst);
18806      neon_imm_shift (TRUE, et.type == NT_unsigned, 0, et, imm);
18807    }
18808}
18809
18810/* Check the various types for the VCVT instruction, and return which version
18811   the current instruction is.  */
18812
18813#define CVT_FLAVOUR_VAR							      \
18814  CVT_VAR (s32_f32, N_S32, N_F32, whole_reg,   "ftosls", "ftosis", "ftosizs") \
18815  CVT_VAR (u32_f32, N_U32, N_F32, whole_reg,   "ftouls", "ftouis", "ftouizs") \
18816  CVT_VAR (f32_s32, N_F32, N_S32, whole_reg,   "fsltos", "fsitos", NULL)      \
18817  CVT_VAR (f32_u32, N_F32, N_U32, whole_reg,   "fultos", "fuitos", NULL)      \
18818  /* Half-precision conversions.  */					      \
18819  CVT_VAR (s16_f16, N_S16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18820  CVT_VAR (u16_f16, N_U16, N_F16 | N_KEY, whole_reg, NULL, NULL, NULL)	      \
18821  CVT_VAR (f16_s16, N_F16 | N_KEY, N_S16, whole_reg, NULL, NULL, NULL)	      \
18822  CVT_VAR (f16_u16, N_F16 | N_KEY, N_U16, whole_reg, NULL, NULL, NULL)	      \
18823  CVT_VAR (f32_f16, N_F32, N_F16, whole_reg,   NULL,     NULL,     NULL)      \
18824  CVT_VAR (f16_f32, N_F16, N_F32, whole_reg,   NULL,     NULL,     NULL)      \
18825  /* New VCVT instructions introduced by ARMv8.2 fp16 extension.	      \
18826     Compared with single/double precision variants, only the co-processor    \
18827     field is different, so the encoding flow is reused here.  */	      \
18828  CVT_VAR (f16_s32, N_F16 | N_KEY, N_S32, N_VFP, "fsltos", "fsitos", NULL)    \
18829  CVT_VAR (f16_u32, N_F16 | N_KEY, N_U32, N_VFP, "fultos", "fuitos", NULL)    \
18830  CVT_VAR (u32_f16, N_U32, N_F16 | N_KEY, N_VFP, "ftouls", "ftouis", "ftouizs")\
18831  CVT_VAR (s32_f16, N_S32, N_F16 | N_KEY, N_VFP, "ftosls", "ftosis", "ftosizs")\
18832  CVT_VAR (bf16_f32, N_BF16, N_F32, whole_reg,   NULL, NULL, NULL)	      \
18833  /* VFP instructions.  */						      \
18834  CVT_VAR (f32_f64, N_F32, N_F64, N_VFP,       NULL,     "fcvtsd", NULL)      \
18835  CVT_VAR (f64_f32, N_F64, N_F32, N_VFP,       NULL,     "fcvtds", NULL)      \
18836  CVT_VAR (s32_f64, N_S32, N_F64 | key, N_VFP, "ftosld", "ftosid", "ftosizd") \
18837  CVT_VAR (u32_f64, N_U32, N_F64 | key, N_VFP, "ftould", "ftouid", "ftouizd") \
18838  CVT_VAR (f64_s32, N_F64 | key, N_S32, N_VFP, "fsltod", "fsitod", NULL)      \
18839  CVT_VAR (f64_u32, N_F64 | key, N_U32, N_VFP, "fultod", "fuitod", NULL)      \
18840  /* VFP instructions with bitshift.  */				      \
18841  CVT_VAR (f32_s16, N_F32 | key, N_S16, N_VFP, "fshtos", NULL,     NULL)      \
18842  CVT_VAR (f32_u16, N_F32 | key, N_U16, N_VFP, "fuhtos", NULL,     NULL)      \
18843  CVT_VAR (f64_s16, N_F64 | key, N_S16, N_VFP, "fshtod", NULL,     NULL)      \
18844  CVT_VAR (f64_u16, N_F64 | key, N_U16, N_VFP, "fuhtod", NULL,     NULL)      \
18845  CVT_VAR (s16_f32, N_S16, N_F32 | key, N_VFP, "ftoshs", NULL,     NULL)      \
18846  CVT_VAR (u16_f32, N_U16, N_F32 | key, N_VFP, "ftouhs", NULL,     NULL)      \
18847  CVT_VAR (s16_f64, N_S16, N_F64 | key, N_VFP, "ftoshd", NULL,     NULL)      \
18848  CVT_VAR (u16_f64, N_U16, N_F64 | key, N_VFP, "ftouhd", NULL,     NULL)
18849
18850#define CVT_VAR(C, X, Y, R, BSN, CN, ZN) \
18851  neon_cvt_flavour_##C,
18852
18853/* The different types of conversions we can do.  */
18854enum neon_cvt_flavour
18855{
18856  CVT_FLAVOUR_VAR
18857  neon_cvt_flavour_invalid,
18858  neon_cvt_flavour_first_fp = neon_cvt_flavour_f32_f64
18859};
18860
18861#undef CVT_VAR
18862
18863static enum neon_cvt_flavour
18864get_neon_cvt_flavour (enum neon_shape rs)
18865{
18866#define CVT_VAR(C,X,Y,R,BSN,CN,ZN)			\
18867  et = neon_check_type (2, rs, (R) | (X), (R) | (Y));	\
18868  if (et.type != NT_invtype)				\
18869    {							\
18870      inst.error = NULL;				\
18871      return (neon_cvt_flavour_##C);			\
18872    }
18873
18874  struct neon_type_el et;
18875  unsigned whole_reg = (rs == NS_FFI || rs == NS_FD || rs == NS_DF
18876			|| rs == NS_FF) ? N_VFP : 0;
18877  /* The instruction versions which take an immediate take one register
18878     argument, which is extended to the width of the full register. Thus the
18879     "source" and "destination" registers must have the same width.  Hack that
18880     here by making the size equal to the key (wider, in this case) operand.  */
18881  unsigned key = (rs == NS_QQI || rs == NS_DDI || rs == NS_FFI) ? N_KEY : 0;
18882
18883  CVT_FLAVOUR_VAR;
18884
18885  return neon_cvt_flavour_invalid;
18886#undef CVT_VAR
18887}
18888
18889enum neon_cvt_mode
18890{
18891  neon_cvt_mode_a,
18892  neon_cvt_mode_n,
18893  neon_cvt_mode_p,
18894  neon_cvt_mode_m,
18895  neon_cvt_mode_z,
18896  neon_cvt_mode_x,
18897  neon_cvt_mode_r
18898};
18899
18900/* Neon-syntax VFP conversions.  */
18901
18902static void
18903do_vfp_nsyn_cvt (enum neon_shape rs, enum neon_cvt_flavour flavour)
18904{
18905  const char *opname = 0;
18906
18907  if (rs == NS_DDI || rs == NS_QQI || rs == NS_FFI
18908      || rs == NS_FHI || rs == NS_HFI)
18909    {
18910      /* Conversions with immediate bitshift.  */
18911      const char *enc[] =
18912	{
18913#define CVT_VAR(C,A,B,R,BSN,CN,ZN) BSN,
18914	  CVT_FLAVOUR_VAR
18915	  NULL
18916#undef CVT_VAR
18917	};
18918
18919      if (flavour < (int) ARRAY_SIZE (enc))
18920	{
18921	  opname = enc[flavour];
18922	  constraint (inst.operands[0].reg != inst.operands[1].reg,
18923		      _("operands 0 and 1 must be the same register"));
18924	  inst.operands[1] = inst.operands[2];
18925	  memset (&inst.operands[2], '\0', sizeof (inst.operands[2]));
18926	}
18927    }
18928  else
18929    {
18930      /* Conversions without bitshift.  */
18931      const char *enc[] =
18932	{
18933#define CVT_VAR(C,A,B,R,BSN,CN,ZN) CN,
18934	  CVT_FLAVOUR_VAR
18935	  NULL
18936#undef CVT_VAR
18937	};
18938
18939      if (flavour < (int) ARRAY_SIZE (enc))
18940	opname = enc[flavour];
18941    }
18942
18943  if (opname)
18944    do_vfp_nsyn_opcode (opname);
18945
18946  /* ARMv8.2 fp16 VCVT instruction.  */
18947  if (flavour == neon_cvt_flavour_s32_f16
18948      || flavour == neon_cvt_flavour_u32_f16
18949      || flavour == neon_cvt_flavour_f16_u32
18950      || flavour == neon_cvt_flavour_f16_s32)
18951    do_scalar_fp16_v82_encode ();
18952}
18953
18954static void
18955do_vfp_nsyn_cvtz (void)
18956{
18957  enum neon_shape rs = neon_select_shape (NS_FH, NS_FF, NS_FD, NS_NULL);
18958  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
18959  const char *enc[] =
18960    {
18961#define CVT_VAR(C,A,B,R,BSN,CN,ZN) ZN,
18962      CVT_FLAVOUR_VAR
18963      NULL
18964#undef CVT_VAR
18965    };
18966
18967  if (flavour < (int) ARRAY_SIZE (enc) && enc[flavour])
18968    do_vfp_nsyn_opcode (enc[flavour]);
18969}
18970
18971static void
18972do_vfp_nsyn_cvt_fpv8 (enum neon_cvt_flavour flavour,
18973		      enum neon_cvt_mode mode)
18974{
18975  int sz, op;
18976  int rm;
18977
18978  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
18979     D register operands.  */
18980  if (flavour == neon_cvt_flavour_s32_f64
18981      || flavour == neon_cvt_flavour_u32_f64)
18982    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
18983		_(BAD_FPU));
18984
18985  if (flavour == neon_cvt_flavour_s32_f16
18986      || flavour == neon_cvt_flavour_u32_f16)
18987    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16),
18988		_(BAD_FP16));
18989
18990  set_pred_insn_type (OUTSIDE_PRED_INSN);
18991
18992  switch (flavour)
18993    {
18994    case neon_cvt_flavour_s32_f64:
18995      sz = 1;
18996      op = 1;
18997      break;
18998    case neon_cvt_flavour_s32_f32:
18999      sz = 0;
19000      op = 1;
19001      break;
19002    case neon_cvt_flavour_s32_f16:
19003      sz = 0;
19004      op = 1;
19005      break;
19006    case neon_cvt_flavour_u32_f64:
19007      sz = 1;
19008      op = 0;
19009      break;
19010    case neon_cvt_flavour_u32_f32:
19011      sz = 0;
19012      op = 0;
19013      break;
19014    case neon_cvt_flavour_u32_f16:
19015      sz = 0;
19016      op = 0;
19017      break;
19018    default:
19019      first_error (_("invalid instruction shape"));
19020      return;
19021    }
19022
19023  switch (mode)
19024    {
19025    case neon_cvt_mode_a: rm = 0; break;
19026    case neon_cvt_mode_n: rm = 1; break;
19027    case neon_cvt_mode_p: rm = 2; break;
19028    case neon_cvt_mode_m: rm = 3; break;
19029    default: first_error (_("invalid rounding mode")); return;
19030    }
19031
19032  NEON_ENCODE (FPV8, inst);
19033  encode_arm_vfp_reg (inst.operands[0].reg, VFP_REG_Sd);
19034  encode_arm_vfp_reg (inst.operands[1].reg, sz == 1 ? VFP_REG_Dm : VFP_REG_Sm);
19035  inst.instruction |= sz << 8;
19036
19037  /* ARMv8.2 fp16 VCVT instruction.  */
19038  if (flavour == neon_cvt_flavour_s32_f16
19039      ||flavour == neon_cvt_flavour_u32_f16)
19040    do_scalar_fp16_v82_encode ();
19041  inst.instruction |= op << 7;
19042  inst.instruction |= rm << 16;
19043  inst.instruction |= 0xf0000000;
19044  inst.is_neon = TRUE;
19045}
19046
19047static void
19048do_neon_cvt_1 (enum neon_cvt_mode mode)
19049{
19050  enum neon_shape rs = neon_select_shape (NS_DDI, NS_QQI, NS_FFI, NS_DD, NS_QQ,
19051					  NS_FD, NS_DF, NS_FF, NS_QD, NS_DQ,
19052					  NS_FH, NS_HF, NS_FHI, NS_HFI,
19053					  NS_NULL);
19054  enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19055
19056  if (flavour == neon_cvt_flavour_invalid)
19057    return;
19058
19059  /* PR11109: Handle round-to-zero for VCVT conversions.  */
19060  if (mode == neon_cvt_mode_z
19061      && ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_vfp_v2)
19062      && (flavour == neon_cvt_flavour_s16_f16
19063	  || flavour == neon_cvt_flavour_u16_f16
19064	  || flavour == neon_cvt_flavour_s32_f32
19065	  || flavour == neon_cvt_flavour_u32_f32
19066	  || flavour == neon_cvt_flavour_s32_f64
19067	  || flavour == neon_cvt_flavour_u32_f64)
19068      && (rs == NS_FD || rs == NS_FF))
19069    {
19070      do_vfp_nsyn_cvtz ();
19071      return;
19072    }
19073
19074  /* ARMv8.2 fp16 VCVT conversions.  */
19075  if (mode == neon_cvt_mode_z
19076      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16)
19077      && (flavour == neon_cvt_flavour_s32_f16
19078	  || flavour == neon_cvt_flavour_u32_f16)
19079      && (rs == NS_FH))
19080    {
19081      do_vfp_nsyn_cvtz ();
19082      do_scalar_fp16_v82_encode ();
19083      return;
19084    }
19085
19086  if ((rs == NS_FD || rs == NS_QQI) && mode == neon_cvt_mode_n
19087      && ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19088    {
19089      /* We are dealing with vcvt with the 'ne' condition.  */
19090      inst.cond = 0x1;
19091      inst.instruction = N_MNEM_vcvt;
19092      do_neon_cvt_1 (neon_cvt_mode_z);
19093      return;
19094    }
19095
19096  /* VFP rather than Neon conversions.  */
19097  if (flavour >= neon_cvt_flavour_first_fp)
19098    {
19099      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19100	do_vfp_nsyn_cvt (rs, flavour);
19101      else
19102	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19103
19104      return;
19105    }
19106
19107  switch (rs)
19108    {
19109    case NS_QQI:
19110      if (mode == neon_cvt_mode_z
19111	  && (flavour == neon_cvt_flavour_f16_s16
19112	      || flavour == neon_cvt_flavour_f16_u16
19113	      || flavour == neon_cvt_flavour_s16_f16
19114	      || flavour == neon_cvt_flavour_u16_f16
19115	      || flavour == neon_cvt_flavour_f32_u32
19116	      || flavour == neon_cvt_flavour_f32_s32
19117	      || flavour == neon_cvt_flavour_s32_f32
19118	      || flavour == neon_cvt_flavour_u32_f32))
19119	{
19120	  if (!check_simd_pred_availability (TRUE,
19121					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19122	    return;
19123	}
19124      /* fall through.  */
19125    case NS_DDI:
19126      {
19127	unsigned immbits;
19128	unsigned enctab[] = {0x0000100, 0x1000100, 0x0, 0x1000000,
19129			     0x0000100, 0x1000100, 0x0, 0x1000000};
19130
19131	if ((rs != NS_QQI || !ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19132	    && vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19133	    return;
19134
19135	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19136	  {
19137	    constraint (inst.operands[2].present && inst.operands[2].imm == 0,
19138			_("immediate value out of range"));
19139	    switch (flavour)
19140	      {
19141		case neon_cvt_flavour_f16_s16:
19142		case neon_cvt_flavour_f16_u16:
19143		case neon_cvt_flavour_s16_f16:
19144		case neon_cvt_flavour_u16_f16:
19145		  constraint (inst.operands[2].imm > 16,
19146			      _("immediate value out of range"));
19147		  break;
19148		case neon_cvt_flavour_f32_u32:
19149		case neon_cvt_flavour_f32_s32:
19150		case neon_cvt_flavour_s32_f32:
19151		case neon_cvt_flavour_u32_f32:
19152		  constraint (inst.operands[2].imm > 32,
19153			      _("immediate value out of range"));
19154		  break;
19155		default:
19156		  inst.error = BAD_FPU;
19157		  return;
19158	      }
19159	  }
19160
19161	/* Fixed-point conversion with #0 immediate is encoded as an
19162	   integer conversion.  */
19163	if (inst.operands[2].present && inst.operands[2].imm == 0)
19164	  goto int_encode;
19165	NEON_ENCODE (IMMED, inst);
19166	if (flavour != neon_cvt_flavour_invalid)
19167	  inst.instruction |= enctab[flavour];
19168	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19169	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19170	inst.instruction |= LOW4 (inst.operands[1].reg);
19171	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19172	inst.instruction |= neon_quad (rs) << 6;
19173	inst.instruction |= 1 << 21;
19174	if (flavour < neon_cvt_flavour_s16_f16)
19175	  {
19176	    inst.instruction |= 1 << 21;
19177	    immbits = 32 - inst.operands[2].imm;
19178	    inst.instruction |= immbits << 16;
19179	  }
19180	else
19181	  {
19182	    inst.instruction |= 3 << 20;
19183	    immbits = 16 - inst.operands[2].imm;
19184	    inst.instruction |= immbits << 16;
19185	    inst.instruction &= ~(1 << 9);
19186	  }
19187
19188	neon_dp_fixup (&inst);
19189      }
19190      break;
19191
19192    case NS_QQ:
19193      if ((mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
19194	   || mode == neon_cvt_mode_m || mode == neon_cvt_mode_p)
19195	  && (flavour == neon_cvt_flavour_s16_f16
19196	      || flavour == neon_cvt_flavour_u16_f16
19197	      || flavour == neon_cvt_flavour_s32_f32
19198	      || flavour == neon_cvt_flavour_u32_f32))
19199	{
19200	  if (!check_simd_pred_availability (TRUE,
19201					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19202	    return;
19203	}
19204      else if (mode == neon_cvt_mode_z
19205	       && (flavour == neon_cvt_flavour_f16_s16
19206		   || flavour == neon_cvt_flavour_f16_u16
19207		   || flavour == neon_cvt_flavour_s16_f16
19208		   || flavour == neon_cvt_flavour_u16_f16
19209		   || flavour == neon_cvt_flavour_f32_u32
19210		   || flavour == neon_cvt_flavour_f32_s32
19211		   || flavour == neon_cvt_flavour_s32_f32
19212		   || flavour == neon_cvt_flavour_u32_f32))
19213	{
19214	  if (!check_simd_pred_availability (TRUE,
19215					     NEON_CHECK_CC | NEON_CHECK_ARCH))
19216	    return;
19217	}
19218      /* fall through.  */
19219    case NS_DD:
19220      if (mode != neon_cvt_mode_x && mode != neon_cvt_mode_z)
19221	{
19222
19223	  NEON_ENCODE (FLOAT, inst);
19224	  if (!check_simd_pred_availability (TRUE,
19225					     NEON_CHECK_CC | NEON_CHECK_ARCH8))
19226	    return;
19227
19228	  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19229	  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19230	  inst.instruction |= LOW4 (inst.operands[1].reg);
19231	  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19232	  inst.instruction |= neon_quad (rs) << 6;
19233	  inst.instruction |= (flavour == neon_cvt_flavour_u16_f16
19234			       || flavour == neon_cvt_flavour_u32_f32) << 7;
19235	  inst.instruction |= mode << 8;
19236	  if (flavour == neon_cvt_flavour_u16_f16
19237	      || flavour == neon_cvt_flavour_s16_f16)
19238	    /* Mask off the original size bits and reencode them.  */
19239	    inst.instruction = ((inst.instruction & 0xfff3ffff) | (1 << 18));
19240
19241	  if (thumb_mode)
19242	    inst.instruction |= 0xfc000000;
19243	  else
19244	    inst.instruction |= 0xf0000000;
19245	}
19246      else
19247	{
19248    int_encode:
19249	  {
19250	    unsigned enctab[] = { 0x100, 0x180, 0x0, 0x080,
19251				  0x100, 0x180, 0x0, 0x080};
19252
19253	    NEON_ENCODE (INTEGER, inst);
19254
19255	  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
19256	    {
19257	      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19258		return;
19259	    }
19260
19261	    if (flavour != neon_cvt_flavour_invalid)
19262	      inst.instruction |= enctab[flavour];
19263
19264	    inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19265	    inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19266	    inst.instruction |= LOW4 (inst.operands[1].reg);
19267	    inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19268	    inst.instruction |= neon_quad (rs) << 6;
19269	    if (flavour >= neon_cvt_flavour_s16_f16
19270		&& flavour <= neon_cvt_flavour_f16_u16)
19271	      /* Half precision.  */
19272	      inst.instruction |= 1 << 18;
19273	    else
19274	      inst.instruction |= 2 << 18;
19275
19276	    neon_dp_fixup (&inst);
19277	  }
19278	}
19279      break;
19280
19281    /* Half-precision conversions for Advanced SIMD -- neon.  */
19282    case NS_QD:
19283    case NS_DQ:
19284      if (vfp_or_neon_is_neon (NEON_CHECK_CC | NEON_CHECK_ARCH) == FAIL)
19285	return;
19286
19287      if ((rs == NS_DQ)
19288	  && (inst.vectype.el[0].size != 16 || inst.vectype.el[1].size != 32))
19289	  {
19290	    as_bad (_("operand size must match register width"));
19291	    break;
19292	  }
19293
19294      if ((rs == NS_QD)
19295	  && ((inst.vectype.el[0].size != 32 || inst.vectype.el[1].size != 16)))
19296	  {
19297	    as_bad (_("operand size must match register width"));
19298	    break;
19299	  }
19300
19301      if (rs == NS_DQ)
19302	{
19303	  if (flavour == neon_cvt_flavour_bf16_f32)
19304	    {
19305	      if (vfp_or_neon_is_neon (NEON_CHECK_ARCH8) == FAIL)
19306		return;
19307	      constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19308	      /* VCVT.bf16.f32.  */
19309	      inst.instruction = 0x11b60640;
19310	    }
19311	  else
19312	    /* VCVT.f16.f32.  */
19313	    inst.instruction = 0x3b60600;
19314	}
19315      else
19316	/* VCVT.f32.f16.  */
19317	inst.instruction = 0x3b60700;
19318
19319      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19320      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19321      inst.instruction |= LOW4 (inst.operands[1].reg);
19322      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19323      neon_dp_fixup (&inst);
19324      break;
19325
19326    default:
19327      /* Some VFP conversions go here (s32 <-> f32, u32 <-> f32).  */
19328      if (mode == neon_cvt_mode_x || mode == neon_cvt_mode_z)
19329	do_vfp_nsyn_cvt (rs, flavour);
19330      else
19331	do_vfp_nsyn_cvt_fpv8 (flavour, mode);
19332    }
19333}
19334
19335static void
19336do_neon_cvtr (void)
19337{
19338  do_neon_cvt_1 (neon_cvt_mode_x);
19339}
19340
19341static void
19342do_neon_cvt (void)
19343{
19344  do_neon_cvt_1 (neon_cvt_mode_z);
19345}
19346
19347static void
19348do_neon_cvta (void)
19349{
19350  do_neon_cvt_1 (neon_cvt_mode_a);
19351}
19352
19353static void
19354do_neon_cvtn (void)
19355{
19356  do_neon_cvt_1 (neon_cvt_mode_n);
19357}
19358
19359static void
19360do_neon_cvtp (void)
19361{
19362  do_neon_cvt_1 (neon_cvt_mode_p);
19363}
19364
19365static void
19366do_neon_cvtm (void)
19367{
19368  do_neon_cvt_1 (neon_cvt_mode_m);
19369}
19370
19371static void
19372do_neon_cvttb_2 (bfd_boolean t, bfd_boolean to, bfd_boolean is_double)
19373{
19374  if (is_double)
19375    mark_feature_used (&fpu_vfp_ext_armv8);
19376
19377  encode_arm_vfp_reg (inst.operands[0].reg,
19378		      (is_double && !to) ? VFP_REG_Dd : VFP_REG_Sd);
19379  encode_arm_vfp_reg (inst.operands[1].reg,
19380		      (is_double && to) ? VFP_REG_Dm : VFP_REG_Sm);
19381  inst.instruction |= to ? 0x10000 : 0;
19382  inst.instruction |= t ? 0x80 : 0;
19383  inst.instruction |= is_double ? 0x100 : 0;
19384  do_vfp_cond_or_thumb ();
19385}
19386
19387static void
19388do_neon_cvttb_1 (bfd_boolean t)
19389{
19390  enum neon_shape rs = neon_select_shape (NS_HF, NS_HD, NS_FH, NS_FF, NS_FD,
19391					  NS_DF, NS_DH, NS_QQ, NS_QQI, NS_NULL);
19392
19393  if (rs == NS_NULL)
19394    return;
19395  else if (rs == NS_QQ || rs == NS_QQI)
19396    {
19397      int single_to_half = 0;
19398      if (!check_simd_pred_availability (TRUE, NEON_CHECK_ARCH))
19399	return;
19400
19401      enum neon_cvt_flavour flavour = get_neon_cvt_flavour (rs);
19402
19403      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19404	  && (flavour ==  neon_cvt_flavour_u16_f16
19405	      || flavour ==  neon_cvt_flavour_s16_f16
19406	      || flavour ==  neon_cvt_flavour_f16_s16
19407	      || flavour ==  neon_cvt_flavour_f16_u16
19408	      || flavour ==  neon_cvt_flavour_u32_f32
19409	      || flavour ==  neon_cvt_flavour_s32_f32
19410	      || flavour ==  neon_cvt_flavour_f32_s32
19411	      || flavour ==  neon_cvt_flavour_f32_u32))
19412	{
19413	  inst.cond = 0xf;
19414	  inst.instruction = N_MNEM_vcvt;
19415	  set_pred_insn_type (INSIDE_VPT_INSN);
19416	  do_neon_cvt_1 (neon_cvt_mode_z);
19417	  return;
19418	}
19419      else if (rs == NS_QQ && flavour == neon_cvt_flavour_f32_f16)
19420	single_to_half = 1;
19421      else if (rs == NS_QQ && flavour != neon_cvt_flavour_f16_f32)
19422	{
19423	  first_error (BAD_FPU);
19424	  return;
19425	}
19426
19427      inst.instruction = 0xee3f0e01;
19428      inst.instruction |= single_to_half << 28;
19429      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19430      inst.instruction |= LOW4 (inst.operands[0].reg) << 13;
19431      inst.instruction |= t << 12;
19432      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19433      inst.instruction |= LOW4 (inst.operands[1].reg) << 1;
19434      inst.is_neon = 1;
19435    }
19436  else if (neon_check_type (2, rs, N_F16, N_F32 | N_VFP).type != NT_invtype)
19437    {
19438      inst.error = NULL;
19439      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19440    }
19441  else if (neon_check_type (2, rs, N_F32 | N_VFP, N_F16).type != NT_invtype)
19442    {
19443      inst.error = NULL;
19444      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/FALSE);
19445    }
19446  else if (neon_check_type (2, rs, N_F16, N_F64 | N_VFP).type != NT_invtype)
19447    {
19448      /* The VCVTB and VCVTT instructions with D-register operands
19449         don't work for SP only targets.  */
19450      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19451		  _(BAD_FPU));
19452
19453      inst.error = NULL;
19454      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/TRUE);
19455    }
19456  else if (neon_check_type (2, rs, N_F64 | N_VFP, N_F16).type != NT_invtype)
19457    {
19458      /* The VCVTB and VCVTT instructions with D-register operands
19459         don't work for SP only targets.  */
19460      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
19461		  _(BAD_FPU));
19462
19463      inst.error = NULL;
19464      do_neon_cvttb_2 (t, /*to=*/FALSE, /*is_double=*/TRUE);
19465    }
19466  else if (neon_check_type (2, rs, N_BF16 | N_VFP, N_F32).type != NT_invtype)
19467    {
19468      constraint (!mark_feature_used (&arm_ext_bf16), _(BAD_BF16));
19469      inst.error = NULL;
19470      inst.instruction |= (1 << 8);
19471      inst.instruction &= ~(1 << 9);
19472      do_neon_cvttb_2 (t, /*to=*/TRUE, /*is_double=*/FALSE);
19473    }
19474  else
19475    return;
19476}
19477
19478static void
19479do_neon_cvtb (void)
19480{
19481  do_neon_cvttb_1 (FALSE);
19482}
19483
19484
19485static void
19486do_neon_cvtt (void)
19487{
19488  do_neon_cvttb_1 (TRUE);
19489}
19490
19491static void
19492neon_move_immediate (void)
19493{
19494  enum neon_shape rs = neon_select_shape (NS_DI, NS_QI, NS_NULL);
19495  struct neon_type_el et = neon_check_type (2, rs,
19496    N_I8 | N_I16 | N_I32 | N_I64 | N_F32 | N_KEY, N_EQK);
19497  unsigned immlo, immhi = 0, immbits;
19498  int op, cmode, float_p;
19499
19500  constraint (et.type == NT_invtype,
19501	      _("operand size must be specified for immediate VMOV"));
19502
19503  /* We start out as an MVN instruction if OP = 1, MOV otherwise.  */
19504  op = (inst.instruction & (1 << 5)) != 0;
19505
19506  immlo = inst.operands[1].imm;
19507  if (inst.operands[1].regisimm)
19508    immhi = inst.operands[1].reg;
19509
19510  constraint (et.size < 32 && (immlo & ~((1 << et.size) - 1)) != 0,
19511	      _("immediate has bits set outside the operand size"));
19512
19513  float_p = inst.operands[1].immisfloat;
19514
19515  if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits, &op,
19516					et.size, et.type)) == FAIL)
19517    {
19518      /* Invert relevant bits only.  */
19519      neon_invert_size (&immlo, &immhi, et.size);
19520      /* Flip from VMOV/VMVN to VMVN/VMOV. Some immediate types are unavailable
19521	 with one or the other; those cases are caught by
19522	 neon_cmode_for_move_imm.  */
19523      op = !op;
19524      if ((cmode = neon_cmode_for_move_imm (immlo, immhi, float_p, &immbits,
19525					    &op, et.size, et.type)) == FAIL)
19526	{
19527	  first_error (_("immediate out of range"));
19528	  return;
19529	}
19530    }
19531
19532  inst.instruction &= ~(1 << 5);
19533  inst.instruction |= op << 5;
19534
19535  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19536  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19537  inst.instruction |= neon_quad (rs) << 6;
19538  inst.instruction |= cmode << 8;
19539
19540  neon_write_immbits (immbits);
19541}
19542
19543static void
19544do_neon_mvn (void)
19545{
19546  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
19547    return;
19548
19549  if (inst.operands[1].isreg)
19550    {
19551      enum neon_shape rs;
19552      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19553	rs = neon_select_shape (NS_QQ, NS_NULL);
19554      else
19555	rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19556
19557      if (rs == NS_NULL)
19558	return;
19559
19560      NEON_ENCODE (INTEGER, inst);
19561      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19562      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19563      inst.instruction |= LOW4 (inst.operands[1].reg);
19564      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
19565      inst.instruction |= neon_quad (rs) << 6;
19566    }
19567  else
19568    {
19569      NEON_ENCODE (IMMED, inst);
19570      neon_move_immediate ();
19571    }
19572
19573  neon_dp_fixup (&inst);
19574
19575  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19576    {
19577      constraint (!inst.operands[1].isreg && !inst.operands[0].isquad, BAD_FPU);
19578    }
19579}
19580
19581/* Encode instructions of form:
19582
19583  |28/24|23|22|21 20|19 16|15 12|11    8|7|6|5|4|3  0|
19584  |  U  |x |D |size | Rn  | Rd  |x x x x|N|x|M|x| Rm |  */
19585
19586static void
19587neon_mixed_length (struct neon_type_el et, unsigned size)
19588{
19589  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19590  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19591  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19592  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19593  inst.instruction |= LOW4 (inst.operands[2].reg);
19594  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19595  inst.instruction |= (et.type == NT_unsigned) << 24;
19596  inst.instruction |= neon_logbits (size) << 20;
19597
19598  neon_dp_fixup (&inst);
19599}
19600
19601static void
19602do_neon_dyadic_long (void)
19603{
19604  enum neon_shape rs = neon_select_shape (NS_QDD, NS_HHH, NS_FFF, NS_DDD, NS_NULL);
19605  if (rs == NS_QDD)
19606    {
19607      if (vfp_or_neon_is_neon (NEON_CHECK_ARCH | NEON_CHECK_CC) == FAIL)
19608	return;
19609
19610      NEON_ENCODE (INTEGER, inst);
19611      /* FIXME: Type checking for lengthening op.  */
19612      struct neon_type_el et = neon_check_type (3, NS_QDD,
19613	N_EQK | N_DBL, N_EQK, N_SU_32 | N_KEY);
19614      neon_mixed_length (et, et.size);
19615    }
19616  else if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
19617	   && (inst.cond == 0xf || inst.cond == 0x10))
19618    {
19619      /* If parsing for MVE, vaddl/vsubl/vabdl{e,t} can only be vadd/vsub/vabd
19620	 in an IT block with le/lt conditions.  */
19621
19622      if (inst.cond == 0xf)
19623	inst.cond = 0xb;
19624      else if (inst.cond == 0x10)
19625	inst.cond = 0xd;
19626
19627      inst.pred_insn_type = INSIDE_IT_INSN;
19628
19629      if (inst.instruction == N_MNEM_vaddl)
19630	{
19631	  inst.instruction = N_MNEM_vadd;
19632	  do_neon_addsub_if_i ();
19633	}
19634      else if (inst.instruction == N_MNEM_vsubl)
19635	{
19636	  inst.instruction = N_MNEM_vsub;
19637	  do_neon_addsub_if_i ();
19638	}
19639      else if (inst.instruction == N_MNEM_vabdl)
19640	{
19641	  inst.instruction = N_MNEM_vabd;
19642	  do_neon_dyadic_if_su ();
19643	}
19644    }
19645  else
19646    first_error (BAD_FPU);
19647}
19648
19649static void
19650do_neon_abal (void)
19651{
19652  struct neon_type_el et = neon_check_type (3, NS_QDD,
19653    N_EQK | N_INT | N_DBL, N_EQK, N_SU_32 | N_KEY);
19654  neon_mixed_length (et, et.size);
19655}
19656
19657static void
19658neon_mac_reg_scalar_long (unsigned regtypes, unsigned scalartypes)
19659{
19660  if (inst.operands[2].isscalar)
19661    {
19662      struct neon_type_el et = neon_check_type (3, NS_QDS,
19663	N_EQK | N_DBL, N_EQK, regtypes | N_KEY);
19664      NEON_ENCODE (SCALAR, inst);
19665      neon_mul_mac (et, et.type == NT_unsigned);
19666    }
19667  else
19668    {
19669      struct neon_type_el et = neon_check_type (3, NS_QDD,
19670	N_EQK | N_DBL, N_EQK, scalartypes | N_KEY);
19671      NEON_ENCODE (INTEGER, inst);
19672      neon_mixed_length (et, et.size);
19673    }
19674}
19675
19676static void
19677do_neon_mac_maybe_scalar_long (void)
19678{
19679  neon_mac_reg_scalar_long (N_S16 | N_S32 | N_U16 | N_U32, N_SU_32);
19680}
19681
19682/* Like neon_scalar_for_mul, this function generate Rm encoding from GAS's
19683   internal SCALAR.  QUAD_P is 1 if it's for Q format, otherwise it's 0.  */
19684
19685static unsigned
19686neon_scalar_for_fmac_fp16_long (unsigned scalar, unsigned quad_p)
19687{
19688  unsigned regno = NEON_SCALAR_REG (scalar);
19689  unsigned elno = NEON_SCALAR_INDEX (scalar);
19690
19691  if (quad_p)
19692    {
19693      if (regno > 7 || elno > 3)
19694	goto bad_scalar;
19695
19696      return ((regno & 0x7)
19697	      | ((elno & 0x1) << 3)
19698	      | (((elno >> 1) & 0x1) << 5));
19699    }
19700  else
19701    {
19702      if (regno > 15 || elno > 1)
19703	goto bad_scalar;
19704
19705      return (((regno & 0x1) << 5)
19706	      | ((regno >> 1) & 0x7)
19707	      | ((elno & 0x1) << 3));
19708    }
19709
19710 bad_scalar:
19711  first_error (_("scalar out of range for multiply instruction"));
19712  return 0;
19713}
19714
19715static void
19716do_neon_fmac_maybe_scalar_long (int subtype)
19717{
19718  enum neon_shape rs;
19719  int high8;
19720  /* NOTE: vfmal/vfmsl use slightly different NEON three-same encoding.  'size"
19721     field (bits[21:20]) has different meaning.  For scalar index variant, it's
19722     used to differentiate add and subtract, otherwise it's with fixed value
19723     0x2.  */
19724  int size = -1;
19725
19726  /* vfmal/vfmsl are in three-same D/Q register format or the third operand can
19727     be a scalar index register.  */
19728  if (inst.operands[2].isscalar)
19729    {
19730      high8 = 0xfe000000;
19731      if (subtype)
19732	size = 16;
19733      rs = neon_select_shape (NS_DHS, NS_QDS, NS_NULL);
19734    }
19735  else
19736    {
19737      high8 = 0xfc000000;
19738      size = 32;
19739      if (subtype)
19740	inst.instruction |= (0x1 << 23);
19741      rs = neon_select_shape (NS_DHH, NS_QDD, NS_NULL);
19742    }
19743
19744
19745  if (inst.cond != COND_ALWAYS)
19746    as_warn (_("vfmal/vfmsl with FP16 type cannot be conditional, the "
19747	       "behaviour is UNPREDICTABLE"));
19748
19749  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_fp16_fml),
19750	      _(BAD_FP16));
19751
19752  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
19753	      _(BAD_FPU));
19754
19755  /* "opcode" from template has included "ubit", so simply pass 0 here.  Also,
19756     the "S" bit in size field has been reused to differentiate vfmal and vfmsl,
19757     so we simply pass -1 as size.  */
19758  unsigned quad_p = (rs == NS_QDD || rs == NS_QDS);
19759  neon_three_same (quad_p, 0, size);
19760
19761  /* Undo neon_dp_fixup.  Redo the high eight bits.  */
19762  inst.instruction &= 0x00ffffff;
19763  inst.instruction |= high8;
19764
19765  /* Unlike usually NEON three-same, encoding for Vn and Vm will depend on
19766     whether the instruction is in Q form and whether Vm is a scalar indexed
19767     operand.  */
19768  if (inst.operands[2].isscalar)
19769    {
19770      unsigned rm
19771	= neon_scalar_for_fmac_fp16_long (inst.operands[2].reg, quad_p);
19772      inst.instruction &= 0xffffffd0;
19773      inst.instruction |= rm;
19774
19775      if (!quad_p)
19776	{
19777	  /* Redo Rn as well.  */
19778	  inst.instruction &= 0xfff0ff7f;
19779	  inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19780	  inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19781	}
19782    }
19783  else if (!quad_p)
19784    {
19785      /* Redo Rn and Rm.  */
19786      inst.instruction &= 0xfff0ff50;
19787      inst.instruction |= HI4 (inst.operands[1].reg) << 16;
19788      inst.instruction |= LOW1 (inst.operands[1].reg) << 7;
19789      inst.instruction |= HI4 (inst.operands[2].reg);
19790      inst.instruction |= LOW1 (inst.operands[2].reg) << 5;
19791    }
19792}
19793
19794static void
19795do_neon_vfmal (void)
19796{
19797  return do_neon_fmac_maybe_scalar_long (0);
19798}
19799
19800static void
19801do_neon_vfmsl (void)
19802{
19803  return do_neon_fmac_maybe_scalar_long (1);
19804}
19805
19806static void
19807do_neon_dyadic_wide (void)
19808{
19809  struct neon_type_el et = neon_check_type (3, NS_QQD,
19810    N_EQK | N_DBL, N_EQK | N_DBL, N_SU_32 | N_KEY);
19811  neon_mixed_length (et, et.size);
19812}
19813
19814static void
19815do_neon_dyadic_narrow (void)
19816{
19817  struct neon_type_el et = neon_check_type (3, NS_QDD,
19818    N_EQK | N_DBL, N_EQK, N_I16 | N_I32 | N_I64 | N_KEY);
19819  /* Operand sign is unimportant, and the U bit is part of the opcode,
19820     so force the operand type to integer.  */
19821  et.type = NT_integer;
19822  neon_mixed_length (et, et.size / 2);
19823}
19824
19825static void
19826do_neon_mul_sat_scalar_long (void)
19827{
19828  neon_mac_reg_scalar_long (N_S16 | N_S32, N_S16 | N_S32);
19829}
19830
19831static void
19832do_neon_vmull (void)
19833{
19834  if (inst.operands[2].isscalar)
19835    do_neon_mac_maybe_scalar_long ();
19836  else
19837    {
19838      struct neon_type_el et = neon_check_type (3, NS_QDD,
19839	N_EQK | N_DBL, N_EQK, N_SU_32 | N_P8 | N_P64 | N_KEY);
19840
19841      if (et.type == NT_poly)
19842	NEON_ENCODE (POLY, inst);
19843      else
19844	NEON_ENCODE (INTEGER, inst);
19845
19846      /* For polynomial encoding the U bit must be zero, and the size must
19847	 be 8 (encoded as 0b00) or, on ARMv8 or later 64 (encoded, non
19848	 obviously, as 0b10).  */
19849      if (et.size == 64)
19850	{
19851	  /* Check we're on the correct architecture.  */
19852	  if (!mark_feature_used (&fpu_crypto_ext_armv8))
19853	    inst.error =
19854	      _("Instruction form not available on this architecture.");
19855
19856	  et.size = 32;
19857	}
19858
19859      neon_mixed_length (et, et.size);
19860    }
19861}
19862
19863static void
19864do_neon_ext (void)
19865{
19866  enum neon_shape rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
19867  struct neon_type_el et = neon_check_type (3, rs,
19868    N_EQK, N_EQK, N_8 | N_16 | N_32 | N_64 | N_KEY);
19869  unsigned imm = (inst.operands[3].imm * et.size) / 8;
19870
19871  constraint (imm >= (unsigned) (neon_quad (rs) ? 16 : 8),
19872	      _("shift out of range"));
19873  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19874  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19875  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
19876  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
19877  inst.instruction |= LOW4 (inst.operands[2].reg);
19878  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
19879  inst.instruction |= neon_quad (rs) << 6;
19880  inst.instruction |= imm << 8;
19881
19882  neon_dp_fixup (&inst);
19883}
19884
19885static void
19886do_neon_rev (void)
19887{
19888  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
19889   return;
19890
19891  enum neon_shape rs;
19892  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19893    rs = neon_select_shape (NS_QQ, NS_NULL);
19894  else
19895    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
19896
19897  struct neon_type_el et = neon_check_type (2, rs,
19898    N_EQK, N_8 | N_16 | N_32 | N_KEY);
19899
19900  unsigned op = (inst.instruction >> 7) & 3;
19901  /* N (width of reversed regions) is encoded as part of the bitmask. We
19902     extract it here to check the elements to be reversed are smaller.
19903     Otherwise we'd get a reserved instruction.  */
19904  unsigned elsize = (op == 2) ? 16 : (op == 1) ? 32 : (op == 0) ? 64 : 0;
19905
19906  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext) && elsize == 64
19907      && inst.operands[0].reg == inst.operands[1].reg)
19908    as_tsktsk (_("Warning: 64-bit element size and same destination and source"
19909		 " operands makes instruction UNPREDICTABLE"));
19910
19911  gas_assert (elsize != 0);
19912  constraint (et.size >= elsize,
19913	      _("elements must be smaller than reversal region"));
19914  neon_two_same (neon_quad (rs), 1, et.size);
19915}
19916
19917static void
19918do_neon_dup (void)
19919{
19920  if (inst.operands[1].isscalar)
19921    {
19922      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19923		  BAD_FPU);
19924      enum neon_shape rs = neon_select_shape (NS_DS, NS_QS, NS_NULL);
19925      struct neon_type_el et = neon_check_type (2, rs,
19926	N_EQK, N_8 | N_16 | N_32 | N_KEY);
19927      unsigned sizebits = et.size >> 3;
19928      unsigned dm = NEON_SCALAR_REG (inst.operands[1].reg);
19929      int logsize = neon_logbits (et.size);
19930      unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg) << logsize;
19931
19932      if (vfp_or_neon_is_neon (NEON_CHECK_CC) == FAIL)
19933	return;
19934
19935      NEON_ENCODE (SCALAR, inst);
19936      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
19937      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
19938      inst.instruction |= LOW4 (dm);
19939      inst.instruction |= HI1 (dm) << 5;
19940      inst.instruction |= neon_quad (rs) << 6;
19941      inst.instruction |= x << 17;
19942      inst.instruction |= sizebits << 16;
19943
19944      neon_dp_fixup (&inst);
19945    }
19946  else
19947    {
19948      enum neon_shape rs = neon_select_shape (NS_DR, NS_QR, NS_NULL);
19949      struct neon_type_el et = neon_check_type (2, rs,
19950	N_8 | N_16 | N_32 | N_KEY, N_EQK);
19951      if (rs == NS_QR)
19952	{
19953	  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH))
19954	    return;
19955	}
19956      else
19957	constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_v1),
19958		    BAD_FPU);
19959
19960      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19961	{
19962	  if (inst.operands[1].reg == REG_SP)
19963	    as_tsktsk (MVE_BAD_SP);
19964	  else if (inst.operands[1].reg == REG_PC)
19965	    as_tsktsk (MVE_BAD_PC);
19966	}
19967
19968      /* Duplicate ARM register to lanes of vector.  */
19969      NEON_ENCODE (ARMREG, inst);
19970      switch (et.size)
19971	{
19972	case 8:  inst.instruction |= 0x400000; break;
19973	case 16: inst.instruction |= 0x000020; break;
19974	case 32: inst.instruction |= 0x000000; break;
19975	default: break;
19976	}
19977      inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
19978      inst.instruction |= LOW4 (inst.operands[0].reg) << 16;
19979      inst.instruction |= HI1 (inst.operands[0].reg) << 7;
19980      inst.instruction |= neon_quad (rs) << 21;
19981      /* The encoding for this instruction is identical for the ARM and Thumb
19982	 variants, except for the condition field.  */
19983      do_vfp_cond_or_thumb ();
19984    }
19985}
19986
19987static void
19988do_mve_mov (int toQ)
19989{
19990  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
19991    return;
19992  if (inst.cond > COND_ALWAYS)
19993    inst.pred_insn_type = MVE_UNPREDICABLE_INSN;
19994
19995  unsigned Rt = 0, Rt2 = 1, Q0 = 2, Q1 = 3;
19996  if (toQ)
19997    {
19998      Q0 = 0;
19999      Q1 = 1;
20000      Rt = 2;
20001      Rt2 = 3;
20002    }
20003
20004  constraint (inst.operands[Q0].reg != inst.operands[Q1].reg + 2,
20005	      _("Index one must be [2,3] and index two must be two less than"
20006		" index one."));
20007  constraint (inst.operands[Rt].reg == inst.operands[Rt2].reg,
20008	      _("General purpose registers may not be the same"));
20009  constraint (inst.operands[Rt].reg == REG_SP
20010	      || inst.operands[Rt2].reg == REG_SP,
20011	      BAD_SP);
20012  constraint (inst.operands[Rt].reg == REG_PC
20013	      || inst.operands[Rt2].reg == REG_PC,
20014	      BAD_PC);
20015
20016  inst.instruction = 0xec000f00;
20017  inst.instruction |= HI1 (inst.operands[Q1].reg / 32) << 23;
20018  inst.instruction |= !!toQ << 20;
20019  inst.instruction |= inst.operands[Rt2].reg << 16;
20020  inst.instruction |= LOW4 (inst.operands[Q1].reg / 32) << 13;
20021  inst.instruction |= (inst.operands[Q1].reg % 4) << 4;
20022  inst.instruction |= inst.operands[Rt].reg;
20023}
20024
20025static void
20026do_mve_movn (void)
20027{
20028  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20029    return;
20030
20031  if (inst.cond > COND_ALWAYS)
20032    inst.pred_insn_type = INSIDE_VPT_INSN;
20033  else
20034    inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
20035
20036  struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_I16 | N_I32
20037					    | N_KEY);
20038
20039  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20040  inst.instruction |= (neon_logbits (et.size) - 1) << 18;
20041  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20042  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20043  inst.instruction |= LOW4 (inst.operands[1].reg);
20044  inst.is_neon = 1;
20045
20046}
20047
20048/* VMOV has particularly many variations. It can be one of:
20049     0. VMOV<c><q> <Qd>, <Qm>
20050     1. VMOV<c><q> <Dd>, <Dm>
20051   (Register operations, which are VORR with Rm = Rn.)
20052     2. VMOV<c><q>.<dt> <Qd>, #<imm>
20053     3. VMOV<c><q>.<dt> <Dd>, #<imm>
20054   (Immediate loads.)
20055     4. VMOV<c><q>.<size> <Dn[x]>, <Rd>
20056   (ARM register to scalar.)
20057     5. VMOV<c><q> <Dm>, <Rd>, <Rn>
20058   (Two ARM registers to vector.)
20059     6. VMOV<c><q>.<dt> <Rd>, <Dn[x]>
20060   (Scalar to ARM register.)
20061     7. VMOV<c><q> <Rd>, <Rn>, <Dm>
20062   (Vector to two ARM registers.)
20063     8. VMOV.F32 <Sd>, <Sm>
20064     9. VMOV.F64 <Dd>, <Dm>
20065   (VFP register moves.)
20066    10. VMOV.F32 <Sd>, #imm
20067    11. VMOV.F64 <Dd>, #imm
20068   (VFP float immediate load.)
20069    12. VMOV <Rd>, <Sm>
20070   (VFP single to ARM reg.)
20071    13. VMOV <Sd>, <Rm>
20072   (ARM reg to VFP single.)
20073    14. VMOV <Rd>, <Re>, <Sn>, <Sm>
20074   (Two ARM regs to two VFP singles.)
20075    15. VMOV <Sd>, <Se>, <Rn>, <Rm>
20076   (Two VFP singles to two ARM regs.)
20077   16. VMOV<c> <Rt>, <Rt2>, <Qd[idx]>, <Qd[idx2]>
20078   17. VMOV<c> <Qd[idx]>, <Qd[idx2]>, <Rt>, <Rt2>
20079   18. VMOV<c>.<dt> <Rt>, <Qn[idx]>
20080   19. VMOV<c>.<dt> <Qd[idx]>, <Rt>
20081
20082   These cases can be disambiguated using neon_select_shape, except cases 1/9
20083   and 3/11 which depend on the operand type too.
20084
20085   All the encoded bits are hardcoded by this function.
20086
20087   Cases 4, 6 may be used with VFPv1 and above (only 32-bit transfers!).
20088   Cases 5, 7 may be used with VFPv2 and above.
20089
20090   FIXME: Some of the checking may be a bit sloppy (in a couple of cases you
20091   can specify a type where it doesn't make sense to, and is ignored).  */
20092
20093static void
20094do_neon_mov (void)
20095{
20096  enum neon_shape rs = neon_select_shape (NS_RRSS, NS_SSRR, NS_RRFF, NS_FFRR,
20097					  NS_DRR, NS_RRD, NS_QQ, NS_DD, NS_QI,
20098					  NS_DI, NS_SR, NS_RS, NS_FF, NS_FI,
20099					  NS_RF, NS_FR, NS_HR, NS_RH, NS_HI,
20100					  NS_NULL);
20101  struct neon_type_el et;
20102  const char *ldconst = 0;
20103
20104  switch (rs)
20105    {
20106    case NS_DD:  /* case 1/9.  */
20107      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20108      /* It is not an error here if no type is given.  */
20109      inst.error = NULL;
20110
20111      /* In MVE we interpret the following instructions as same, so ignoring
20112	 the following type (float) and size (64) checks.
20113	 a: VMOV<c><q> <Dd>, <Dm>
20114	 b: VMOV<c><q>.F64 <Dd>, <Dm>.  */
20115      if ((et.type == NT_float && et.size == 64)
20116	  || (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)))
20117	{
20118	  do_vfp_nsyn_opcode ("fcpyd");
20119	  break;
20120	}
20121      /* fall through.  */
20122
20123    case NS_QQ:  /* case 0/1.  */
20124      {
20125	if (!check_simd_pred_availability (FALSE,
20126					   NEON_CHECK_CC | NEON_CHECK_ARCH))
20127	  return;
20128	/* The architecture manual I have doesn't explicitly state which
20129	   value the U bit should have for register->register moves, but
20130	   the equivalent VORR instruction has U = 0, so do that.  */
20131	inst.instruction = 0x0200110;
20132	inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20133	inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20134	inst.instruction |= LOW4 (inst.operands[1].reg);
20135	inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20136	inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20137	inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20138	inst.instruction |= neon_quad (rs) << 6;
20139
20140	neon_dp_fixup (&inst);
20141      }
20142      break;
20143
20144    case NS_DI:  /* case 3/11.  */
20145      et = neon_check_type (2, rs, N_EQK, N_F64 | N_KEY);
20146      inst.error = NULL;
20147      if (et.type == NT_float && et.size == 64)
20148	{
20149	  /* case 11 (fconstd).  */
20150	  ldconst = "fconstd";
20151	  goto encode_fconstd;
20152	}
20153      /* fall through.  */
20154
20155    case NS_QI:  /* case 2/3.  */
20156      if (!check_simd_pred_availability (FALSE,
20157					 NEON_CHECK_CC | NEON_CHECK_ARCH))
20158	return;
20159      inst.instruction = 0x0800010;
20160      neon_move_immediate ();
20161      neon_dp_fixup (&inst);
20162      break;
20163
20164    case NS_SR:  /* case 4.  */
20165      {
20166	unsigned bcdebits = 0;
20167	int logsize;
20168	unsigned dn = NEON_SCALAR_REG (inst.operands[0].reg);
20169	unsigned x = NEON_SCALAR_INDEX (inst.operands[0].reg);
20170
20171	/* .<size> is optional here, defaulting to .32. */
20172	if (inst.vectype.elems == 0
20173	    && inst.operands[0].vectype.type == NT_invtype
20174	    && inst.operands[1].vectype.type == NT_invtype)
20175	  {
20176	    inst.vectype.el[0].type = NT_untyped;
20177	    inst.vectype.el[0].size = 32;
20178	    inst.vectype.elems = 1;
20179	  }
20180
20181	et = neon_check_type (2, NS_NULL, N_8 | N_16 | N_32 | N_KEY, N_EQK);
20182	logsize = neon_logbits (et.size);
20183
20184	if (et.size != 32)
20185	  {
20186	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20187		&& vfp_or_neon_is_neon (NEON_CHECK_ARCH) == FAIL)
20188	      return;
20189	  }
20190	else
20191	  {
20192	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20193			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20194			_(BAD_FPU));
20195	  }
20196
20197	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20198	  {
20199	    if (inst.operands[1].reg == REG_SP)
20200	      as_tsktsk (MVE_BAD_SP);
20201	    else if (inst.operands[1].reg == REG_PC)
20202	      as_tsktsk (MVE_BAD_PC);
20203	  }
20204	unsigned size = inst.operands[0].isscalar == 1 ? 64 : 128;
20205
20206	constraint (et.type == NT_invtype, _("bad type for scalar"));
20207	constraint (x >= size / et.size, _("scalar index out of range"));
20208
20209
20210	switch (et.size)
20211	  {
20212	  case 8:  bcdebits = 0x8; break;
20213	  case 16: bcdebits = 0x1; break;
20214	  case 32: bcdebits = 0x0; break;
20215	  default: ;
20216	  }
20217
20218	bcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20219
20220	inst.instruction = 0xe000b10;
20221	do_vfp_cond_or_thumb ();
20222	inst.instruction |= LOW4 (dn) << 16;
20223	inst.instruction |= HI1 (dn) << 7;
20224	inst.instruction |= inst.operands[1].reg << 12;
20225	inst.instruction |= (bcdebits & 3) << 5;
20226	inst.instruction |= ((bcdebits >> 2) & 3) << 21;
20227	inst.instruction |= (x >> (3-logsize)) << 16;
20228      }
20229      break;
20230
20231    case NS_DRR:  /* case 5 (fmdrr).  */
20232      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20233		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20234		  _(BAD_FPU));
20235
20236      inst.instruction = 0xc400b10;
20237      do_vfp_cond_or_thumb ();
20238      inst.instruction |= LOW4 (inst.operands[0].reg);
20239      inst.instruction |= HI1 (inst.operands[0].reg) << 5;
20240      inst.instruction |= inst.operands[1].reg << 12;
20241      inst.instruction |= inst.operands[2].reg << 16;
20242      break;
20243
20244    case NS_RS:  /* case 6.  */
20245      {
20246	unsigned logsize;
20247	unsigned dn = NEON_SCALAR_REG (inst.operands[1].reg);
20248	unsigned x = NEON_SCALAR_INDEX (inst.operands[1].reg);
20249	unsigned abcdebits = 0;
20250
20251	/* .<dt> is optional here, defaulting to .32. */
20252	if (inst.vectype.elems == 0
20253	    && inst.operands[0].vectype.type == NT_invtype
20254	    && inst.operands[1].vectype.type == NT_invtype)
20255	  {
20256	    inst.vectype.el[0].type = NT_untyped;
20257	    inst.vectype.el[0].size = 32;
20258	    inst.vectype.elems = 1;
20259	  }
20260
20261	et = neon_check_type (2, NS_NULL,
20262			      N_EQK, N_S8 | N_S16 | N_U8 | N_U16 | N_32 | N_KEY);
20263	logsize = neon_logbits (et.size);
20264
20265	if (et.size != 32)
20266	  {
20267	    if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
20268		&& vfp_or_neon_is_neon (NEON_CHECK_CC
20269					| NEON_CHECK_ARCH) == FAIL)
20270	      return;
20271	  }
20272	else
20273	  {
20274	    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1)
20275			&& !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20276			_(BAD_FPU));
20277	  }
20278
20279	if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20280	  {
20281	    if (inst.operands[0].reg == REG_SP)
20282	      as_tsktsk (MVE_BAD_SP);
20283	    else if (inst.operands[0].reg == REG_PC)
20284	      as_tsktsk (MVE_BAD_PC);
20285	  }
20286
20287	unsigned size = inst.operands[1].isscalar == 1 ? 64 : 128;
20288
20289	constraint (et.type == NT_invtype, _("bad type for scalar"));
20290	constraint (x >= size / et.size, _("scalar index out of range"));
20291
20292	switch (et.size)
20293	  {
20294	  case 8:  abcdebits = (et.type == NT_signed) ? 0x08 : 0x18; break;
20295	  case 16: abcdebits = (et.type == NT_signed) ? 0x01 : 0x11; break;
20296	  case 32: abcdebits = 0x00; break;
20297	  default: ;
20298	  }
20299
20300	abcdebits |= (x & ((1 << (3-logsize)) - 1)) << logsize;
20301	inst.instruction = 0xe100b10;
20302	do_vfp_cond_or_thumb ();
20303	inst.instruction |= LOW4 (dn) << 16;
20304	inst.instruction |= HI1 (dn) << 7;
20305	inst.instruction |= inst.operands[0].reg << 12;
20306	inst.instruction |= (abcdebits & 3) << 5;
20307	inst.instruction |= (abcdebits >> 2) << 21;
20308	inst.instruction |= (x >> (3-logsize)) << 16;
20309      }
20310      break;
20311
20312    case NS_RRD:  /* case 7 (fmrrd).  */
20313      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20314		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20315		  _(BAD_FPU));
20316
20317      inst.instruction = 0xc500b10;
20318      do_vfp_cond_or_thumb ();
20319      inst.instruction |= inst.operands[0].reg << 12;
20320      inst.instruction |= inst.operands[1].reg << 16;
20321      inst.instruction |= LOW4 (inst.operands[2].reg);
20322      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20323      break;
20324
20325    case NS_FF:  /* case 8 (fcpys).  */
20326      do_vfp_nsyn_opcode ("fcpys");
20327      break;
20328
20329    case NS_HI:
20330    case NS_FI:  /* case 10 (fconsts).  */
20331      ldconst = "fconsts";
20332    encode_fconstd:
20333      if (!inst.operands[1].immisfloat)
20334	{
20335	  unsigned new_imm;
20336	  /* Immediate has to fit in 8 bits so float is enough.  */
20337	  float imm = (float) inst.operands[1].imm;
20338	  memcpy (&new_imm, &imm, sizeof (float));
20339	  /* But the assembly may have been written to provide an integer
20340	     bit pattern that equates to a float, so check that the
20341	     conversion has worked.  */
20342	  if (is_quarter_float (new_imm))
20343	    {
20344	      if (is_quarter_float (inst.operands[1].imm))
20345		as_warn (_("immediate constant is valid both as a bit-pattern and a floating point value (using the fp value)"));
20346
20347	      inst.operands[1].imm = new_imm;
20348	      inst.operands[1].immisfloat = 1;
20349	    }
20350	}
20351
20352      if (is_quarter_float (inst.operands[1].imm))
20353	{
20354	  inst.operands[1].imm = neon_qfloat_bits (inst.operands[1].imm);
20355	  do_vfp_nsyn_opcode (ldconst);
20356
20357	  /* ARMv8.2 fp16 vmov.f16 instruction.  */
20358	  if (rs == NS_HI)
20359	    do_scalar_fp16_v82_encode ();
20360	}
20361      else
20362	first_error (_("immediate out of range"));
20363      break;
20364
20365    case NS_RH:
20366    case NS_RF:  /* case 12 (fmrs).  */
20367      do_vfp_nsyn_opcode ("fmrs");
20368      /* ARMv8.2 fp16 vmov.f16 instruction.  */
20369      if (rs == NS_RH)
20370	do_scalar_fp16_v82_encode ();
20371      break;
20372
20373    case NS_HR:
20374    case NS_FR:  /* case 13 (fmsr).  */
20375      do_vfp_nsyn_opcode ("fmsr");
20376      /* ARMv8.2 fp16 vmov.f16 instruction.  */
20377      if (rs == NS_HR)
20378	do_scalar_fp16_v82_encode ();
20379      break;
20380
20381    case NS_RRSS:
20382      do_mve_mov (0);
20383      break;
20384    case NS_SSRR:
20385      do_mve_mov (1);
20386      break;
20387
20388    /* The encoders for the fmrrs and fmsrr instructions expect three operands
20389       (one of which is a list), but we have parsed four.  Do some fiddling to
20390       make the operands what do_vfp_reg2_from_sp2 and do_vfp_sp2_from_reg2
20391       expect.  */
20392    case NS_RRFF:  /* case 14 (fmrrs).  */
20393      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20394		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20395		  _(BAD_FPU));
20396      constraint (inst.operands[3].reg != inst.operands[2].reg + 1,
20397		  _("VFP registers must be adjacent"));
20398      inst.operands[2].imm = 2;
20399      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20400      do_vfp_nsyn_opcode ("fmrrs");
20401      break;
20402
20403    case NS_FFRR:  /* case 15 (fmsrr).  */
20404      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v2)
20405		  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20406		  _(BAD_FPU));
20407      constraint (inst.operands[1].reg != inst.operands[0].reg + 1,
20408		  _("VFP registers must be adjacent"));
20409      inst.operands[1] = inst.operands[2];
20410      inst.operands[2] = inst.operands[3];
20411      inst.operands[0].imm = 2;
20412      memset (&inst.operands[3], '\0', sizeof (inst.operands[3]));
20413      do_vfp_nsyn_opcode ("fmsrr");
20414      break;
20415
20416    case NS_NULL:
20417      /* neon_select_shape has determined that the instruction
20418	 shape is wrong and has already set the error message.  */
20419      break;
20420
20421    default:
20422      abort ();
20423    }
20424}
20425
20426static void
20427do_mve_movl (void)
20428{
20429  if (!(inst.operands[0].present && inst.operands[0].isquad
20430      && inst.operands[1].present && inst.operands[1].isquad
20431      && !inst.operands[2].present))
20432    {
20433      inst.instruction = 0;
20434      inst.cond = 0xb;
20435      if (thumb_mode)
20436	set_pred_insn_type (INSIDE_IT_INSN);
20437      do_neon_mov ();
20438      return;
20439    }
20440
20441  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20442    return;
20443
20444  if (inst.cond != COND_ALWAYS)
20445    inst.pred_insn_type = INSIDE_VPT_INSN;
20446
20447  struct neon_type_el et = neon_check_type (2, NS_QQ, N_EQK, N_S8 | N_U8
20448					    | N_S16 | N_U16 | N_KEY);
20449
20450  inst.instruction |= (et.type == NT_unsigned) << 28;
20451  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20452  inst.instruction |= (neon_logbits (et.size) + 1) << 19;
20453  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20454  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
20455  inst.instruction |= LOW4 (inst.operands[1].reg);
20456  inst.is_neon = 1;
20457}
20458
20459static void
20460do_neon_rshift_round_imm (void)
20461{
20462  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20463   return;
20464
20465  enum neon_shape rs;
20466  struct neon_type_el et;
20467
20468  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20469    {
20470      rs = neon_select_shape (NS_QQI, NS_NULL);
20471      et = neon_check_type (2, rs, N_EQK, N_SU_MVE | N_KEY);
20472    }
20473  else
20474    {
20475      rs = neon_select_shape (NS_DDI, NS_QQI, NS_NULL);
20476      et = neon_check_type (2, rs, N_EQK, N_SU_ALL | N_KEY);
20477    }
20478  int imm = inst.operands[2].imm;
20479
20480  /* imm == 0 case is encoded as VMOV for V{R}SHR.  */
20481  if (imm == 0)
20482    {
20483      inst.operands[2].present = 0;
20484      do_neon_mov ();
20485      return;
20486    }
20487
20488  constraint (imm < 1 || (unsigned)imm > et.size,
20489	      _("immediate out of range for shift"));
20490  neon_imm_shift (TRUE, et.type == NT_unsigned, neon_quad (rs), et,
20491		  et.size - imm);
20492}
20493
20494static void
20495do_neon_movhf (void)
20496{
20497  enum neon_shape rs = neon_select_shape (NS_HH, NS_NULL);
20498  constraint (rs != NS_HH, _("invalid suffix"));
20499
20500  if (inst.cond != COND_ALWAYS)
20501    {
20502      if (thumb_mode)
20503	{
20504	  as_warn (_("ARMv8.2 scalar fp16 instruction cannot be conditional,"
20505		     " the behaviour is UNPREDICTABLE"));
20506	}
20507      else
20508	{
20509	  inst.error = BAD_COND;
20510	  return;
20511	}
20512    }
20513
20514  do_vfp_sp_monadic ();
20515
20516  inst.is_neon = 1;
20517  inst.instruction |= 0xf0000000;
20518}
20519
20520static void
20521do_neon_movl (void)
20522{
20523  struct neon_type_el et = neon_check_type (2, NS_QD,
20524    N_EQK | N_DBL, N_SU_32 | N_KEY);
20525  unsigned sizebits = et.size >> 3;
20526  inst.instruction |= sizebits << 19;
20527  neon_two_same (0, et.type == NT_unsigned, -1);
20528}
20529
20530static void
20531do_neon_trn (void)
20532{
20533  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20534  struct neon_type_el et = neon_check_type (2, rs,
20535    N_EQK, N_8 | N_16 | N_32 | N_KEY);
20536  NEON_ENCODE (INTEGER, inst);
20537  neon_two_same (neon_quad (rs), 1, et.size);
20538}
20539
20540static void
20541do_neon_zip_uzp (void)
20542{
20543  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20544  struct neon_type_el et = neon_check_type (2, rs,
20545    N_EQK, N_8 | N_16 | N_32 | N_KEY);
20546  if (rs == NS_DD && et.size == 32)
20547    {
20548      /* Special case: encode as VTRN.32 <Dd>, <Dm>.  */
20549      inst.instruction = N_MNEM_vtrn;
20550      do_neon_trn ();
20551      return;
20552    }
20553  neon_two_same (neon_quad (rs), 1, et.size);
20554}
20555
20556static void
20557do_neon_sat_abs_neg (void)
20558{
20559  if (!check_simd_pred_availability (FALSE, NEON_CHECK_CC | NEON_CHECK_ARCH))
20560    return;
20561
20562  enum neon_shape rs;
20563  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20564    rs = neon_select_shape (NS_QQ, NS_NULL);
20565  else
20566    rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20567  struct neon_type_el et = neon_check_type (2, rs,
20568    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20569  neon_two_same (neon_quad (rs), 1, et.size);
20570}
20571
20572static void
20573do_neon_pair_long (void)
20574{
20575  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20576  struct neon_type_el et = neon_check_type (2, rs, N_EQK, N_SU_32 | N_KEY);
20577  /* Unsigned is encoded in OP field (bit 7) for these instruction.  */
20578  inst.instruction |= (et.type == NT_unsigned) << 7;
20579  neon_two_same (neon_quad (rs), 1, et.size);
20580}
20581
20582static void
20583do_neon_recip_est (void)
20584{
20585  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20586  struct neon_type_el et = neon_check_type (2, rs,
20587    N_EQK | N_FLT, N_F_16_32 | N_U32 | N_KEY);
20588  inst.instruction |= (et.type == NT_float) << 8;
20589  neon_two_same (neon_quad (rs), 1, et.size);
20590}
20591
20592static void
20593do_neon_cls (void)
20594{
20595  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20596    return;
20597
20598  enum neon_shape rs;
20599  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20600   rs = neon_select_shape (NS_QQ, NS_NULL);
20601  else
20602   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20603
20604  struct neon_type_el et = neon_check_type (2, rs,
20605    N_EQK, N_S8 | N_S16 | N_S32 | N_KEY);
20606  neon_two_same (neon_quad (rs), 1, et.size);
20607}
20608
20609static void
20610do_neon_clz (void)
20611{
20612  if (!check_simd_pred_availability (FALSE, NEON_CHECK_ARCH | NEON_CHECK_CC))
20613    return;
20614
20615  enum neon_shape rs;
20616  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20617   rs = neon_select_shape (NS_QQ, NS_NULL);
20618  else
20619   rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20620
20621  struct neon_type_el et = neon_check_type (2, rs,
20622    N_EQK, N_I8 | N_I16 | N_I32 | N_KEY);
20623  neon_two_same (neon_quad (rs), 1, et.size);
20624}
20625
20626static void
20627do_neon_cnt (void)
20628{
20629  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20630  struct neon_type_el et = neon_check_type (2, rs,
20631    N_EQK | N_INT, N_8 | N_KEY);
20632  neon_two_same (neon_quad (rs), 1, et.size);
20633}
20634
20635static void
20636do_neon_swp (void)
20637{
20638  enum neon_shape rs = neon_select_shape (NS_DD, NS_QQ, NS_NULL);
20639  if (rs == NS_NULL)
20640    return;
20641  neon_two_same (neon_quad (rs), 1, -1);
20642}
20643
20644static void
20645do_neon_tbl_tbx (void)
20646{
20647  unsigned listlenbits;
20648  neon_check_type (3, NS_DLD, N_EQK, N_EQK, N_8 | N_KEY);
20649
20650  if (inst.operands[1].imm < 1 || inst.operands[1].imm > 4)
20651    {
20652      first_error (_("bad list length for table lookup"));
20653      return;
20654    }
20655
20656  listlenbits = inst.operands[1].imm - 1;
20657  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
20658  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
20659  inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
20660  inst.instruction |= HI1 (inst.operands[1].reg) << 7;
20661  inst.instruction |= LOW4 (inst.operands[2].reg);
20662  inst.instruction |= HI1 (inst.operands[2].reg) << 5;
20663  inst.instruction |= listlenbits << 8;
20664
20665  neon_dp_fixup (&inst);
20666}
20667
20668static void
20669do_neon_ldm_stm (void)
20670{
20671  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd)
20672	      && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext),
20673	      _(BAD_FPU));
20674  /* P, U and L bits are part of bitmask.  */
20675  int is_dbmode = (inst.instruction & (1 << 24)) != 0;
20676  unsigned offsetbits = inst.operands[1].imm * 2;
20677
20678  if (inst.operands[1].issingle)
20679    {
20680      do_vfp_nsyn_ldm_stm (is_dbmode);
20681      return;
20682    }
20683
20684  constraint (is_dbmode && !inst.operands[0].writeback,
20685	      _("writeback (!) must be used for VLDMDB and VSTMDB"));
20686
20687  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20688	      _("register list must contain at least 1 and at most 16 "
20689		"registers"));
20690
20691  inst.instruction |= inst.operands[0].reg << 16;
20692  inst.instruction |= inst.operands[0].writeback << 21;
20693  inst.instruction |= LOW4 (inst.operands[1].reg) << 12;
20694  inst.instruction |= HI1 (inst.operands[1].reg) << 22;
20695
20696  inst.instruction |= offsetbits;
20697
20698  do_vfp_cond_or_thumb ();
20699}
20700
20701static void
20702do_vfp_nsyn_pop (void)
20703{
20704  nsyn_insert_sp ();
20705  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20706    return do_vfp_nsyn_opcode ("vldm");
20707  }
20708
20709  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20710	      _(BAD_FPU));
20711
20712  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20713	      _("register list must contain at least 1 and at most 16 "
20714		"registers"));
20715
20716  if (inst.operands[1].issingle)
20717    do_vfp_nsyn_opcode ("fldmias");
20718  else
20719    do_vfp_nsyn_opcode ("fldmiad");
20720}
20721
20722static void
20723do_vfp_nsyn_push (void)
20724{
20725  nsyn_insert_sp ();
20726  if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)) {
20727    return do_vfp_nsyn_opcode ("vstmdb");
20728  }
20729
20730  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_v1xd),
20731	      _(BAD_FPU));
20732
20733  constraint (inst.operands[1].imm < 1 || inst.operands[1].imm > 16,
20734	      _("register list must contain at least 1 and at most 16 "
20735		"registers"));
20736
20737  if (inst.operands[1].issingle)
20738    do_vfp_nsyn_opcode ("fstmdbs");
20739  else
20740    do_vfp_nsyn_opcode ("fstmdbd");
20741}
20742
20743
20744static void
20745do_neon_ldr_str (void)
20746{
20747  int is_ldr = (inst.instruction & (1 << 20)) != 0;
20748
20749  /* Use of PC in vstr in ARM mode is deprecated in ARMv7.
20750     And is UNPREDICTABLE in thumb mode.  */
20751  if (!is_ldr
20752      && inst.operands[1].reg == REG_PC
20753      && (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v7) || thumb_mode))
20754    {
20755      if (thumb_mode)
20756	inst.error = _("Use of PC here is UNPREDICTABLE");
20757      else if (warn_on_deprecated)
20758	as_tsktsk (_("Use of PC here is deprecated"));
20759    }
20760
20761  if (inst.operands[0].issingle)
20762    {
20763      if (is_ldr)
20764	do_vfp_nsyn_opcode ("flds");
20765      else
20766	do_vfp_nsyn_opcode ("fsts");
20767
20768      /* ARMv8.2 vldr.16/vstr.16 instruction.  */
20769      if (inst.vectype.el[0].size == 16)
20770	do_scalar_fp16_v82_encode ();
20771    }
20772  else
20773    {
20774      if (is_ldr)
20775	do_vfp_nsyn_opcode ("fldd");
20776      else
20777	do_vfp_nsyn_opcode ("fstd");
20778    }
20779}
20780
20781static void
20782do_t_vldr_vstr_sysreg (void)
20783{
20784  int fp_vldr_bitno = 20, sysreg_vldr_bitno = 20;
20785  bfd_boolean is_vldr = ((inst.instruction & (1 << fp_vldr_bitno)) != 0);
20786
20787  /* Use of PC is UNPREDICTABLE.  */
20788  if (inst.operands[1].reg == REG_PC)
20789    inst.error = _("Use of PC here is UNPREDICTABLE");
20790
20791  if (inst.operands[1].immisreg)
20792    inst.error = _("instruction does not accept register index");
20793
20794  if (!inst.operands[1].isreg)
20795    inst.error = _("instruction does not accept PC-relative addressing");
20796
20797  if (abs (inst.operands[1].imm) >= (1 << 7))
20798    inst.error = _("immediate value out of range");
20799
20800  inst.instruction = 0xec000f80;
20801  if (is_vldr)
20802    inst.instruction |= 1 << sysreg_vldr_bitno;
20803  encode_arm_cp_address (1, TRUE, FALSE, BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM);
20804  inst.instruction |= (inst.operands[0].imm & 0x7) << 13;
20805  inst.instruction |= (inst.operands[0].imm & 0x8) << 19;
20806}
20807
20808static void
20809do_vldr_vstr (void)
20810{
20811  bfd_boolean sysreg_op = !inst.operands[0].isreg;
20812
20813  /* VLDR/VSTR (System Register).  */
20814  if (sysreg_op)
20815    {
20816      if (!mark_feature_used (&arm_ext_v8_1m_main))
20817	as_bad (_("Instruction not permitted on this architecture"));
20818
20819      do_t_vldr_vstr_sysreg ();
20820    }
20821  /* VLDR/VSTR.  */
20822  else
20823    {
20824      if (!mark_feature_used (&fpu_vfp_ext_v1xd)
20825	  && !ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
20826	as_bad (_("Instruction not permitted on this architecture"));
20827      do_neon_ldr_str ();
20828    }
20829}
20830
20831/* "interleave" version also handles non-interleaving register VLD1/VST1
20832   instructions.  */
20833
20834static void
20835do_neon_ld_st_interleave (void)
20836{
20837  struct neon_type_el et = neon_check_type (1, NS_NULL,
20838					    N_8 | N_16 | N_32 | N_64);
20839  unsigned alignbits = 0;
20840  unsigned idx;
20841  /* The bits in this table go:
20842     0: register stride of one (0) or two (1)
20843     1,2: register list length, minus one (1, 2, 3, 4).
20844     3,4: <n> in instruction type, minus one (VLD<n> / VST<n>).
20845     We use -1 for invalid entries.  */
20846  const int typetable[] =
20847    {
20848      0x7,  -1, 0xa,  -1, 0x6,  -1, 0x2,  -1, /* VLD1 / VST1.  */
20849       -1,  -1, 0x8, 0x9,  -1,  -1, 0x3,  -1, /* VLD2 / VST2.  */
20850       -1,  -1,  -1,  -1, 0x4, 0x5,  -1,  -1, /* VLD3 / VST3.  */
20851       -1,  -1,  -1,  -1,  -1,  -1, 0x0, 0x1  /* VLD4 / VST4.  */
20852    };
20853  int typebits;
20854
20855  if (et.type == NT_invtype)
20856    return;
20857
20858  if (inst.operands[1].immisalign)
20859    switch (inst.operands[1].imm >> 8)
20860      {
20861      case 64: alignbits = 1; break;
20862      case 128:
20863	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2
20864	    && NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20865	  goto bad_alignment;
20866	alignbits = 2;
20867	break;
20868      case 256:
20869	if (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4)
20870	  goto bad_alignment;
20871	alignbits = 3;
20872	break;
20873      default:
20874      bad_alignment:
20875	first_error (_("bad alignment"));
20876	return;
20877      }
20878
20879  inst.instruction |= alignbits << 4;
20880  inst.instruction |= neon_logbits (et.size) << 6;
20881
20882  /* Bits [4:6] of the immediate in a list specifier encode register stride
20883     (minus 1) in bit 4, and list length in bits [5:6]. We put the <n> of
20884     VLD<n>/VST<n> in bits [9:8] of the initial bitmask. Suck it out here, look
20885     up the right value for "type" in a table based on this value and the given
20886     list style, then stick it back.  */
20887  idx = ((inst.operands[0].imm >> 4) & 7)
20888	| (((inst.instruction >> 8) & 3) << 3);
20889
20890  typebits = typetable[idx];
20891
20892  constraint (typebits == -1, _("bad list type for instruction"));
20893  constraint (((inst.instruction >> 8) & 3) && et.size == 64,
20894	      BAD_EL_TYPE);
20895
20896  inst.instruction &= ~0xf00;
20897  inst.instruction |= typebits << 8;
20898}
20899
20900/* Check alignment is valid for do_neon_ld_st_lane and do_neon_ld_dup.
20901   *DO_ALIGN is set to 1 if the relevant alignment bit should be set, 0
20902   otherwise. The variable arguments are a list of pairs of legal (size, align)
20903   values, terminated with -1.  */
20904
20905static int
20906neon_alignment_bit (int size, int align, int *do_alignment, ...)
20907{
20908  va_list ap;
20909  int result = FAIL, thissize, thisalign;
20910
20911  if (!inst.operands[1].immisalign)
20912    {
20913      *do_alignment = 0;
20914      return SUCCESS;
20915    }
20916
20917  va_start (ap, do_alignment);
20918
20919  do
20920    {
20921      thissize = va_arg (ap, int);
20922      if (thissize == -1)
20923	break;
20924      thisalign = va_arg (ap, int);
20925
20926      if (size == thissize && align == thisalign)
20927	result = SUCCESS;
20928    }
20929  while (result != SUCCESS);
20930
20931  va_end (ap);
20932
20933  if (result == SUCCESS)
20934    *do_alignment = 1;
20935  else
20936    first_error (_("unsupported alignment for instruction"));
20937
20938  return result;
20939}
20940
20941static void
20942do_neon_ld_st_lane (void)
20943{
20944  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
20945  int align_good, do_alignment = 0;
20946  int logsize = neon_logbits (et.size);
20947  int align = inst.operands[1].imm >> 8;
20948  int n = (inst.instruction >> 8) & 3;
20949  int max_el = 64 / et.size;
20950
20951  if (et.type == NT_invtype)
20952    return;
20953
20954  constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != n + 1,
20955	      _("bad list length"));
20956  constraint (NEON_LANE (inst.operands[0].imm) >= max_el,
20957	      _("scalar index out of range"));
20958  constraint (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2
20959	      && et.size == 8,
20960	      _("stride of 2 unavailable when element size is 8"));
20961
20962  switch (n)
20963    {
20964    case 0:  /* VLD1 / VST1.  */
20965      align_good = neon_alignment_bit (et.size, align, &do_alignment, 16, 16,
20966				       32, 32, -1);
20967      if (align_good == FAIL)
20968	return;
20969      if (do_alignment)
20970	{
20971	  unsigned alignbits = 0;
20972	  switch (et.size)
20973	    {
20974	    case 16: alignbits = 0x1; break;
20975	    case 32: alignbits = 0x3; break;
20976	    default: ;
20977	    }
20978	  inst.instruction |= alignbits << 4;
20979	}
20980      break;
20981
20982    case 1:  /* VLD2 / VST2.  */
20983      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 16,
20984		      16, 32, 32, 64, -1);
20985      if (align_good == FAIL)
20986	return;
20987      if (do_alignment)
20988	inst.instruction |= 1 << 4;
20989      break;
20990
20991    case 2:  /* VLD3 / VST3.  */
20992      constraint (inst.operands[1].immisalign,
20993		  _("can't use alignment with this instruction"));
20994      break;
20995
20996    case 3:  /* VLD4 / VST4.  */
20997      align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
20998				       16, 64, 32, 64, 32, 128, -1);
20999      if (align_good == FAIL)
21000	return;
21001      if (do_alignment)
21002	{
21003	  unsigned alignbits = 0;
21004	  switch (et.size)
21005	    {
21006	    case 8:  alignbits = 0x1; break;
21007	    case 16: alignbits = 0x1; break;
21008	    case 32: alignbits = (align == 64) ? 0x1 : 0x2; break;
21009	    default: ;
21010	    }
21011	  inst.instruction |= alignbits << 4;
21012	}
21013      break;
21014
21015    default: ;
21016    }
21017
21018  /* Reg stride of 2 is encoded in bit 5 when size==16, bit 6 when size==32.  */
21019  if (n != 0 && NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21020    inst.instruction |= 1 << (4 + logsize);
21021
21022  inst.instruction |= NEON_LANE (inst.operands[0].imm) << (logsize + 5);
21023  inst.instruction |= logsize << 10;
21024}
21025
21026/* Encode single n-element structure to all lanes VLD<n> instructions.  */
21027
21028static void
21029do_neon_ld_dup (void)
21030{
21031  struct neon_type_el et = neon_check_type (1, NS_NULL, N_8 | N_16 | N_32);
21032  int align_good, do_alignment = 0;
21033
21034  if (et.type == NT_invtype)
21035    return;
21036
21037  switch ((inst.instruction >> 8) & 3)
21038    {
21039    case 0:  /* VLD1.  */
21040      gas_assert (NEON_REG_STRIDE (inst.operands[0].imm) != 2);
21041      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21042				       &do_alignment, 16, 16, 32, 32, -1);
21043      if (align_good == FAIL)
21044	return;
21045      switch (NEON_REGLIST_LENGTH (inst.operands[0].imm))
21046	{
21047	case 1: break;
21048	case 2: inst.instruction |= 1 << 5; break;
21049	default: first_error (_("bad list length")); return;
21050	}
21051      inst.instruction |= neon_logbits (et.size) << 6;
21052      break;
21053
21054    case 1:  /* VLD2.  */
21055      align_good = neon_alignment_bit (et.size, inst.operands[1].imm >> 8,
21056				       &do_alignment, 8, 16, 16, 32, 32, 64,
21057				       -1);
21058      if (align_good == FAIL)
21059	return;
21060      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 2,
21061		  _("bad list length"));
21062      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21063	inst.instruction |= 1 << 5;
21064      inst.instruction |= neon_logbits (et.size) << 6;
21065      break;
21066
21067    case 2:  /* VLD3.  */
21068      constraint (inst.operands[1].immisalign,
21069		  _("can't use alignment with this instruction"));
21070      constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 3,
21071		  _("bad list length"));
21072      if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21073	inst.instruction |= 1 << 5;
21074      inst.instruction |= neon_logbits (et.size) << 6;
21075      break;
21076
21077    case 3:  /* VLD4.  */
21078      {
21079	int align = inst.operands[1].imm >> 8;
21080	align_good = neon_alignment_bit (et.size, align, &do_alignment, 8, 32,
21081					 16, 64, 32, 64, 32, 128, -1);
21082	if (align_good == FAIL)
21083	  return;
21084	constraint (NEON_REGLIST_LENGTH (inst.operands[0].imm) != 4,
21085		    _("bad list length"));
21086	if (NEON_REG_STRIDE (inst.operands[0].imm) == 2)
21087	  inst.instruction |= 1 << 5;
21088	if (et.size == 32 && align == 128)
21089	  inst.instruction |= 0x3 << 6;
21090	else
21091	  inst.instruction |= neon_logbits (et.size) << 6;
21092      }
21093      break;
21094
21095    default: ;
21096    }
21097
21098  inst.instruction |= do_alignment << 4;
21099}
21100
21101/* Disambiguate VLD<n> and VST<n> instructions, and fill in common bits (those
21102   apart from bits [11:4].  */
21103
21104static void
21105do_neon_ldx_stx (void)
21106{
21107  if (inst.operands[1].isreg)
21108    constraint (inst.operands[1].reg == REG_PC, BAD_PC);
21109
21110  switch (NEON_LANE (inst.operands[0].imm))
21111    {
21112    case NEON_INTERLEAVE_LANES:
21113      NEON_ENCODE (INTERLV, inst);
21114      do_neon_ld_st_interleave ();
21115      break;
21116
21117    case NEON_ALL_LANES:
21118      NEON_ENCODE (DUP, inst);
21119      if (inst.instruction == N_INV)
21120	{
21121	  first_error ("only loads support such operands");
21122	  break;
21123	}
21124      do_neon_ld_dup ();
21125      break;
21126
21127    default:
21128      NEON_ENCODE (LANE, inst);
21129      do_neon_ld_st_lane ();
21130    }
21131
21132  /* L bit comes from bit mask.  */
21133  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21134  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21135  inst.instruction |= inst.operands[1].reg << 16;
21136
21137  if (inst.operands[1].postind)
21138    {
21139      int postreg = inst.operands[1].imm & 0xf;
21140      constraint (!inst.operands[1].immisreg,
21141		  _("post-index must be a register"));
21142      constraint (postreg == 0xd || postreg == 0xf,
21143		  _("bad register for post-index"));
21144      inst.instruction |= postreg;
21145    }
21146  else
21147    {
21148      constraint (inst.operands[1].immisreg, BAD_ADDR_MODE);
21149      constraint (inst.relocs[0].exp.X_op != O_constant
21150		  || inst.relocs[0].exp.X_add_number != 0,
21151		  BAD_ADDR_MODE);
21152
21153      if (inst.operands[1].writeback)
21154	{
21155	  inst.instruction |= 0xd;
21156	}
21157      else
21158	inst.instruction |= 0xf;
21159    }
21160
21161  if (thumb_mode)
21162    inst.instruction |= 0xf9000000;
21163  else
21164    inst.instruction |= 0xf4000000;
21165}
21166
21167/* FP v8.  */
21168static void
21169do_vfp_nsyn_fpv8 (enum neon_shape rs)
21170{
21171  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21172     D register operands.  */
21173  if (neon_shape_class[rs] == SC_DOUBLE)
21174    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21175		_(BAD_FPU));
21176
21177  NEON_ENCODE (FPV8, inst);
21178
21179  if (rs == NS_FFF || rs == NS_HHH)
21180    {
21181      do_vfp_sp_dyadic ();
21182
21183      /* ARMv8.2 fp16 instruction.  */
21184      if (rs == NS_HHH)
21185	do_scalar_fp16_v82_encode ();
21186    }
21187  else
21188    do_vfp_dp_rd_rn_rm ();
21189
21190  if (rs == NS_DDD)
21191    inst.instruction |= 0x100;
21192
21193  inst.instruction |= 0xf0000000;
21194}
21195
21196static void
21197do_vsel (void)
21198{
21199  set_pred_insn_type (OUTSIDE_PRED_INSN);
21200
21201  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) != SUCCESS)
21202    first_error (_("invalid instruction shape"));
21203}
21204
21205static void
21206do_vmaxnm (void)
21207{
21208  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21209    set_pred_insn_type (OUTSIDE_PRED_INSN);
21210
21211  if (try_vfp_nsyn (3, do_vfp_nsyn_fpv8) == SUCCESS)
21212    return;
21213
21214  if (!check_simd_pred_availability (TRUE, NEON_CHECK_CC | NEON_CHECK_ARCH8))
21215    return;
21216
21217  neon_dyadic_misc (NT_untyped, N_F_16_32, 0);
21218}
21219
21220static void
21221do_vrint_1 (enum neon_cvt_mode mode)
21222{
21223  enum neon_shape rs = neon_select_shape (NS_HH, NS_FF, NS_DD, NS_QQ, NS_NULL);
21224  struct neon_type_el et;
21225
21226  if (rs == NS_NULL)
21227    return;
21228
21229  /* Targets like FPv5-SP-D16 don't support FP v8 instructions with
21230     D register operands.  */
21231  if (neon_shape_class[rs] == SC_DOUBLE)
21232    constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
21233		_(BAD_FPU));
21234
21235  et = neon_check_type (2, rs, N_EQK | N_VFP, N_F_ALL | N_KEY
21236			| N_VFP);
21237  if (et.type != NT_invtype)
21238    {
21239      /* VFP encodings.  */
21240      if (mode == neon_cvt_mode_a || mode == neon_cvt_mode_n
21241	  || mode == neon_cvt_mode_p || mode == neon_cvt_mode_m)
21242	set_pred_insn_type (OUTSIDE_PRED_INSN);
21243
21244      NEON_ENCODE (FPV8, inst);
21245      if (rs == NS_FF || rs == NS_HH)
21246	do_vfp_sp_monadic ();
21247      else
21248	do_vfp_dp_rd_rm ();
21249
21250      switch (mode)
21251	{
21252	case neon_cvt_mode_r: inst.instruction |= 0x00000000; break;
21253	case neon_cvt_mode_z: inst.instruction |= 0x00000080; break;
21254	case neon_cvt_mode_x: inst.instruction |= 0x00010000; break;
21255	case neon_cvt_mode_a: inst.instruction |= 0xf0000000; break;
21256	case neon_cvt_mode_n: inst.instruction |= 0xf0010000; break;
21257	case neon_cvt_mode_p: inst.instruction |= 0xf0020000; break;
21258	case neon_cvt_mode_m: inst.instruction |= 0xf0030000; break;
21259	default: abort ();
21260	}
21261
21262      inst.instruction |= (rs == NS_DD) << 8;
21263      do_vfp_cond_or_thumb ();
21264
21265      /* ARMv8.2 fp16 vrint instruction.  */
21266      if (rs == NS_HH)
21267      do_scalar_fp16_v82_encode ();
21268    }
21269  else
21270    {
21271      /* Neon encodings (or something broken...).  */
21272      inst.error = NULL;
21273      et = neon_check_type (2, rs, N_EQK, N_F_16_32 | N_KEY);
21274
21275      if (et.type == NT_invtype)
21276	return;
21277
21278      if (!check_simd_pred_availability (TRUE,
21279					 NEON_CHECK_CC | NEON_CHECK_ARCH8))
21280	return;
21281
21282      NEON_ENCODE (FLOAT, inst);
21283
21284      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21285      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21286      inst.instruction |= LOW4 (inst.operands[1].reg);
21287      inst.instruction |= HI1 (inst.operands[1].reg) << 5;
21288      inst.instruction |= neon_quad (rs) << 6;
21289      /* Mask off the original size bits and reencode them.  */
21290      inst.instruction = ((inst.instruction & 0xfff3ffff)
21291			  | neon_logbits (et.size) << 18);
21292
21293      switch (mode)
21294	{
21295	case neon_cvt_mode_z: inst.instruction |= 3 << 7; break;
21296	case neon_cvt_mode_x: inst.instruction |= 1 << 7; break;
21297	case neon_cvt_mode_a: inst.instruction |= 2 << 7; break;
21298	case neon_cvt_mode_n: inst.instruction |= 0 << 7; break;
21299	case neon_cvt_mode_p: inst.instruction |= 7 << 7; break;
21300	case neon_cvt_mode_m: inst.instruction |= 5 << 7; break;
21301	case neon_cvt_mode_r: inst.error = _("invalid rounding mode"); break;
21302	default: abort ();
21303	}
21304
21305      if (thumb_mode)
21306	inst.instruction |= 0xfc000000;
21307      else
21308	inst.instruction |= 0xf0000000;
21309    }
21310}
21311
21312static void
21313do_vrintx (void)
21314{
21315  do_vrint_1 (neon_cvt_mode_x);
21316}
21317
21318static void
21319do_vrintz (void)
21320{
21321  do_vrint_1 (neon_cvt_mode_z);
21322}
21323
21324static void
21325do_vrintr (void)
21326{
21327  do_vrint_1 (neon_cvt_mode_r);
21328}
21329
21330static void
21331do_vrinta (void)
21332{
21333  do_vrint_1 (neon_cvt_mode_a);
21334}
21335
21336static void
21337do_vrintn (void)
21338{
21339  do_vrint_1 (neon_cvt_mode_n);
21340}
21341
21342static void
21343do_vrintp (void)
21344{
21345  do_vrint_1 (neon_cvt_mode_p);
21346}
21347
21348static void
21349do_vrintm (void)
21350{
21351  do_vrint_1 (neon_cvt_mode_m);
21352}
21353
21354static unsigned
21355neon_scalar_for_vcmla (unsigned opnd, unsigned elsize)
21356{
21357  unsigned regno = NEON_SCALAR_REG (opnd);
21358  unsigned elno = NEON_SCALAR_INDEX (opnd);
21359
21360  if (elsize == 16 && elno < 2 && regno < 16)
21361    return regno | (elno << 4);
21362  else if (elsize == 32 && elno == 0)
21363    return regno;
21364
21365  first_error (_("scalar out of range"));
21366  return 0;
21367}
21368
21369static void
21370do_vcmla (void)
21371{
21372  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext)
21373	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21374		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21375  constraint (inst.relocs[0].exp.X_op != O_constant,
21376	      _("expression too complex"));
21377  unsigned rot = inst.relocs[0].exp.X_add_number;
21378  constraint (rot != 0 && rot != 90 && rot != 180 && rot != 270,
21379	      _("immediate out of range"));
21380  rot /= 90;
21381
21382  if (!check_simd_pred_availability (TRUE,
21383				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21384    return;
21385
21386  if (inst.operands[2].isscalar)
21387    {
21388      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21389	first_error (_("invalid instruction shape"));
21390      enum neon_shape rs = neon_select_shape (NS_DDSI, NS_QQSI, NS_NULL);
21391      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21392				       N_KEY | N_F16 | N_F32).size;
21393      unsigned m = neon_scalar_for_vcmla (inst.operands[2].reg, size);
21394      inst.is_neon = 1;
21395      inst.instruction = 0xfe000800;
21396      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21397      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21398      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21399      inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21400      inst.instruction |= LOW4 (m);
21401      inst.instruction |= HI1 (m) << 5;
21402      inst.instruction |= neon_quad (rs) << 6;
21403      inst.instruction |= rot << 20;
21404      inst.instruction |= (size == 32) << 23;
21405    }
21406  else
21407    {
21408      enum neon_shape rs;
21409      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext))
21410	rs = neon_select_shape (NS_QQQI, NS_NULL);
21411      else
21412	rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21413
21414      unsigned size = neon_check_type (3, rs, N_EQK, N_EQK,
21415				       N_KEY | N_F16 | N_F32).size;
21416      if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_fp_ext) && size == 32
21417	  && (inst.operands[0].reg == inst.operands[1].reg
21418	      || inst.operands[0].reg == inst.operands[2].reg))
21419	as_tsktsk (BAD_MVE_SRCDEST);
21420
21421      neon_three_same (neon_quad (rs), 0, -1);
21422      inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21423      inst.instruction |= 0xfc200800;
21424      inst.instruction |= rot << 23;
21425      inst.instruction |= (size == 32) << 20;
21426    }
21427}
21428
21429static void
21430do_vcadd (void)
21431{
21432  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext)
21433	      && (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8)
21434		  || !mark_feature_used (&arm_ext_v8_3)), (BAD_FPU));
21435  constraint (inst.relocs[0].exp.X_op != O_constant,
21436	      _("expression too complex"));
21437
21438  unsigned rot = inst.relocs[0].exp.X_add_number;
21439  constraint (rot != 90 && rot != 270, _("immediate out of range"));
21440  enum neon_shape rs;
21441  struct neon_type_el et;
21442  if (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
21443    {
21444      rs = neon_select_shape (NS_DDDI, NS_QQQI, NS_NULL);
21445      et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32);
21446    }
21447  else
21448    {
21449      rs = neon_select_shape (NS_QQQI, NS_NULL);
21450      et = neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_F16 | N_F32 | N_I8
21451			    | N_I16 | N_I32);
21452      if (et.size == 32 && inst.operands[0].reg == inst.operands[2].reg)
21453	as_tsktsk (_("Warning: 32-bit element size and same first and third "
21454		     "operand makes instruction UNPREDICTABLE"));
21455    }
21456
21457  if (et.type == NT_invtype)
21458    return;
21459
21460  if (!check_simd_pred_availability (et.type == NT_float,
21461				     NEON_CHECK_ARCH8 | NEON_CHECK_CC))
21462    return;
21463
21464  if (et.type == NT_float)
21465    {
21466      neon_three_same (neon_quad (rs), 0, -1);
21467      inst.instruction &= 0x00ffffff; /* Undo neon_dp_fixup.  */
21468      inst.instruction |= 0xfc800800;
21469      inst.instruction |= (rot == 270) << 24;
21470      inst.instruction |= (et.size == 32) << 20;
21471    }
21472  else
21473    {
21474      constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext), BAD_FPU);
21475      inst.instruction = 0xfe000f00;
21476      inst.instruction |= HI1 (inst.operands[0].reg) << 22;
21477      inst.instruction |= neon_logbits (et.size) << 20;
21478      inst.instruction |= LOW4 (inst.operands[1].reg) << 16;
21479      inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
21480      inst.instruction |= (rot == 270) << 12;
21481      inst.instruction |= HI1 (inst.operands[1].reg) << 7;
21482      inst.instruction |= HI1 (inst.operands[2].reg) << 5;
21483      inst.instruction |= LOW4 (inst.operands[2].reg);
21484      inst.is_neon = 1;
21485    }
21486}
21487
21488/* Dot Product instructions encoding support.  */
21489
21490static void
21491do_neon_dotproduct (int unsigned_p)
21492{
21493  enum neon_shape rs;
21494  unsigned scalar_oprd2 = 0;
21495  int high8;
21496
21497  if (inst.cond != COND_ALWAYS)
21498    as_warn (_("Dot Product instructions cannot be conditional,  the behaviour "
21499	       "is UNPREDICTABLE"));
21500
21501  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_neon_ext_armv8),
21502	      _(BAD_FPU));
21503
21504  /* Dot Product instructions are in three-same D/Q register format or the third
21505     operand can be a scalar index register.  */
21506  if (inst.operands[2].isscalar)
21507    {
21508      scalar_oprd2 = neon_scalar_for_mul (inst.operands[2].reg, 32);
21509      high8 = 0xfe000000;
21510      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21511    }
21512  else
21513    {
21514      high8 = 0xfc000000;
21515      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21516    }
21517
21518  if (unsigned_p)
21519    neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_U8);
21520  else
21521    neon_check_type (3, rs, N_EQK, N_EQK, N_KEY | N_S8);
21522
21523  /* The "U" bit in traditional Three Same encoding is fixed to 0 for Dot
21524     Product instruction, so we pass 0 as the "ubit" parameter.  And the
21525     "Size" field are fixed to 0x2, so we pass 32 as the "size" parameter.  */
21526  neon_three_same (neon_quad (rs), 0, 32);
21527
21528  /* Undo neon_dp_fixup.  Dot Product instructions are using a slightly
21529     different NEON three-same encoding.  */
21530  inst.instruction &= 0x00ffffff;
21531  inst.instruction |= high8;
21532  /* Encode 'U' bit which indicates signedness.  */
21533  inst.instruction |= (unsigned_p ? 1 : 0) << 4;
21534  /* Re-encode operand2 if it's indexed scalar operand.  What has been encoded
21535     from inst.operand[2].reg in neon_three_same is GAS's internal encoding, not
21536     the instruction encoding.  */
21537  if (inst.operands[2].isscalar)
21538    {
21539      inst.instruction &= 0xffffffd0;
21540      inst.instruction |= LOW4 (scalar_oprd2);
21541      inst.instruction |= HI1 (scalar_oprd2) << 5;
21542    }
21543}
21544
21545/* Dot Product instructions for signed integer.  */
21546
21547static void
21548do_neon_dotproduct_s (void)
21549{
21550  return do_neon_dotproduct (0);
21551}
21552
21553/* Dot Product instructions for unsigned integer.  */
21554
21555static void
21556do_neon_dotproduct_u (void)
21557{
21558  return do_neon_dotproduct (1);
21559}
21560
21561static void
21562do_vusdot (void)
21563{
21564  enum neon_shape rs;
21565  set_pred_insn_type (OUTSIDE_PRED_INSN);
21566  if (inst.operands[2].isscalar)
21567    {
21568      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21569      neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21570
21571      inst.instruction |= (1 << 25);
21572      int index = inst.operands[2].reg & 0xf;
21573      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21574      inst.operands[2].reg >>= 4;
21575      constraint (!(inst.operands[2].reg < 16),
21576		  _("indexed register must be less than 16"));
21577      neon_three_args (rs == NS_QQS);
21578      inst.instruction |= (index << 5);
21579    }
21580  else
21581    {
21582      inst.instruction |= (1 << 21);
21583      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
21584      neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21585      neon_three_args (rs == NS_QQQ);
21586    }
21587}
21588
21589static void
21590do_vsudot (void)
21591{
21592  enum neon_shape rs;
21593  set_pred_insn_type (OUTSIDE_PRED_INSN);
21594  if (inst.operands[2].isscalar)
21595    {
21596      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
21597      neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21598
21599      inst.instruction |= (1 << 25);
21600      int index = inst.operands[2].reg & 0xf;
21601      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
21602      inst.operands[2].reg >>= 4;
21603      constraint (!(inst.operands[2].reg < 16),
21604		  _("indexed register must be less than 16"));
21605      neon_three_args (rs == NS_QQS);
21606      inst.instruction |= (index << 5);
21607    }
21608}
21609
21610static void
21611do_vsmmla (void)
21612{
21613  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21614  neon_check_type (3, rs, N_EQK, N_EQK, N_S8 | N_KEY);
21615
21616  set_pred_insn_type (OUTSIDE_PRED_INSN);
21617
21618  neon_three_args (1);
21619
21620}
21621
21622static void
21623do_vummla (void)
21624{
21625  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
21626  neon_check_type (3, rs, N_EQK, N_EQK, N_U8 | N_KEY);
21627
21628  set_pred_insn_type (OUTSIDE_PRED_INSN);
21629
21630  neon_three_args (1);
21631
21632}
21633
21634static void
21635check_cde_operand (size_t index, int is_dual)
21636{
21637  unsigned Rx = inst.operands[index].reg;
21638  bfd_boolean isvec = inst.operands[index].isvec;
21639  if (is_dual == 0 && thumb_mode)
21640    constraint (
21641		!((Rx <= 14 && Rx != 13) || (Rx == REG_PC && isvec)),
21642		_("Register must be r0-r14 except r13, or APSR_nzcv."));
21643  else
21644    constraint ( !((Rx <= 10 && Rx % 2 == 0 )),
21645      _("Register must be an even register between r0-r10."));
21646}
21647
21648static bfd_boolean
21649cde_coproc_enabled (unsigned coproc)
21650{
21651  switch (coproc)
21652  {
21653    case 0: return mark_feature_used (&arm_ext_cde0);
21654    case 1: return mark_feature_used (&arm_ext_cde1);
21655    case 2: return mark_feature_used (&arm_ext_cde2);
21656    case 3: return mark_feature_used (&arm_ext_cde3);
21657    case 4: return mark_feature_used (&arm_ext_cde4);
21658    case 5: return mark_feature_used (&arm_ext_cde5);
21659    case 6: return mark_feature_used (&arm_ext_cde6);
21660    case 7: return mark_feature_used (&arm_ext_cde7);
21661    default: return FALSE;
21662  }
21663}
21664
21665#define cde_coproc_pos 8
21666static void
21667cde_handle_coproc (void)
21668{
21669  unsigned coproc = inst.operands[0].reg;
21670  constraint (coproc > 7, _("CDE Coprocessor must be in range 0-7"));
21671  constraint (!(cde_coproc_enabled (coproc)), BAD_CDE_COPROC);
21672  inst.instruction |= coproc << cde_coproc_pos;
21673}
21674#undef cde_coproc_pos
21675
21676static void
21677cxn_handle_predication (bfd_boolean is_accum)
21678{
21679  if (is_accum && conditional_insn ())
21680    set_pred_insn_type (INSIDE_IT_INSN);
21681  else if (conditional_insn ())
21682  /* conditional_insn essentially checks for a suffix, not whether the
21683     instruction is inside an IT block or not.
21684     The non-accumulator versions should not have suffixes.  */
21685    inst.error = BAD_SYNTAX;
21686  else
21687    set_pred_insn_type (OUTSIDE_PRED_INSN);
21688}
21689
21690static void
21691do_custom_instruction_1 (int is_dual, bfd_boolean is_accum)
21692{
21693
21694  constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21695
21696  unsigned imm, Rd;
21697
21698  Rd = inst.operands[1].reg;
21699  check_cde_operand (1, is_dual);
21700
21701  if (is_dual == 1)
21702    {
21703      constraint (inst.operands[2].reg != Rd + 1,
21704		  _("cx1d requires consecutive destination registers."));
21705      imm = inst.operands[3].imm;
21706    }
21707  else if (is_dual == 0)
21708    imm = inst.operands[2].imm;
21709  else
21710    abort ();
21711
21712  inst.instruction |= Rd << 12;
21713  inst.instruction |= (imm & 0x1F80) << 9;
21714  inst.instruction |= (imm & 0x0040) << 1;
21715  inst.instruction |= (imm & 0x003f);
21716
21717  cde_handle_coproc ();
21718  cxn_handle_predication (is_accum);
21719}
21720
21721static void
21722do_custom_instruction_2 (int is_dual, bfd_boolean is_accum)
21723{
21724
21725  constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21726
21727  unsigned imm, Rd, Rn;
21728
21729  Rd = inst.operands[1].reg;
21730
21731  if (is_dual == 1)
21732    {
21733      constraint (inst.operands[2].reg != Rd + 1,
21734		  _("cx2d requires consecutive destination registers."));
21735      imm = inst.operands[4].imm;
21736      Rn = inst.operands[3].reg;
21737    }
21738  else if (is_dual == 0)
21739  {
21740    imm = inst.operands[3].imm;
21741    Rn = inst.operands[2].reg;
21742  }
21743  else
21744    abort ();
21745
21746  check_cde_operand (2 + is_dual, /* is_dual = */0);
21747  check_cde_operand (1, is_dual);
21748
21749  inst.instruction |= Rd << 12;
21750  inst.instruction |= Rn << 16;
21751
21752  inst.instruction |= (imm & 0x0380) << 13;
21753  inst.instruction |= (imm & 0x0040) << 1;
21754  inst.instruction |= (imm & 0x003f);
21755
21756  cde_handle_coproc ();
21757  cxn_handle_predication (is_accum);
21758}
21759
21760static void
21761do_custom_instruction_3 (int is_dual, bfd_boolean is_accum)
21762{
21763
21764  constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
21765
21766  unsigned imm, Rd, Rn, Rm;
21767
21768  Rd = inst.operands[1].reg;
21769
21770  if (is_dual == 1)
21771    {
21772      constraint (inst.operands[2].reg != Rd + 1,
21773		  _("cx3d requires consecutive destination registers."));
21774      imm = inst.operands[5].imm;
21775      Rn = inst.operands[3].reg;
21776      Rm = inst.operands[4].reg;
21777    }
21778  else if (is_dual == 0)
21779  {
21780    imm = inst.operands[4].imm;
21781    Rn = inst.operands[2].reg;
21782    Rm = inst.operands[3].reg;
21783  }
21784  else
21785    abort ();
21786
21787  check_cde_operand (1, is_dual);
21788  check_cde_operand (2 + is_dual, /* is_dual = */0);
21789  check_cde_operand (3 + is_dual, /* is_dual = */0);
21790
21791  inst.instruction |= Rd;
21792  inst.instruction |= Rn << 16;
21793  inst.instruction |= Rm << 12;
21794
21795  inst.instruction |= (imm & 0x0038) << 17;
21796  inst.instruction |= (imm & 0x0004) << 5;
21797  inst.instruction |= (imm & 0x0003) << 4;
21798
21799  cde_handle_coproc ();
21800  cxn_handle_predication (is_accum);
21801}
21802
21803static void
21804do_cx1 (void)
21805{
21806  return do_custom_instruction_1 (0, 0);
21807}
21808
21809static void
21810do_cx1a (void)
21811{
21812  return do_custom_instruction_1 (0, 1);
21813}
21814
21815static void
21816do_cx1d (void)
21817{
21818  return do_custom_instruction_1 (1, 0);
21819}
21820
21821static void
21822do_cx1da (void)
21823{
21824  return do_custom_instruction_1 (1, 1);
21825}
21826
21827static void
21828do_cx2 (void)
21829{
21830  return do_custom_instruction_2 (0, 0);
21831}
21832
21833static void
21834do_cx2a (void)
21835{
21836  return do_custom_instruction_2 (0, 1);
21837}
21838
21839static void
21840do_cx2d (void)
21841{
21842  return do_custom_instruction_2 (1, 0);
21843}
21844
21845static void
21846do_cx2da (void)
21847{
21848  return do_custom_instruction_2 (1, 1);
21849}
21850
21851static void
21852do_cx3 (void)
21853{
21854  return do_custom_instruction_3 (0, 0);
21855}
21856
21857static void
21858do_cx3a (void)
21859{
21860  return do_custom_instruction_3 (0, 1);
21861}
21862
21863static void
21864do_cx3d (void)
21865{
21866  return do_custom_instruction_3 (1, 0);
21867}
21868
21869static void
21870do_cx3da (void)
21871{
21872  return do_custom_instruction_3 (1, 1);
21873}
21874
21875static void
21876vcx_assign_vec_d (unsigned regnum)
21877{
21878  inst.instruction |= HI4 (regnum) << 12;
21879  inst.instruction |= LOW1 (regnum) << 22;
21880}
21881
21882static void
21883vcx_assign_vec_m (unsigned regnum)
21884{
21885  inst.instruction |= HI4 (regnum);
21886  inst.instruction |= LOW1 (regnum) << 5;
21887}
21888
21889static void
21890vcx_assign_vec_n (unsigned regnum)
21891{
21892  inst.instruction |= HI4 (regnum) << 16;
21893  inst.instruction |= LOW1 (regnum) << 7;
21894}
21895
21896enum vcx_reg_type {
21897    q_reg,
21898    d_reg,
21899    s_reg
21900};
21901
21902static enum vcx_reg_type
21903vcx_get_reg_type (enum neon_shape ns)
21904{
21905  gas_assert (ns == NS_PQI
21906	      || ns == NS_PDI
21907	      || ns == NS_PFI
21908	      || ns == NS_PQQI
21909	      || ns == NS_PDDI
21910	      || ns == NS_PFFI
21911	      || ns == NS_PQQQI
21912	      || ns == NS_PDDDI
21913	      || ns == NS_PFFFI);
21914  if (ns == NS_PQI || ns == NS_PQQI || ns == NS_PQQQI)
21915    return q_reg;
21916  if (ns == NS_PDI || ns == NS_PDDI || ns == NS_PDDDI)
21917    return d_reg;
21918  return s_reg;
21919}
21920
21921#define vcx_size_pos 24
21922#define vcx_vec_pos 6
21923static unsigned
21924vcx_handle_shape (enum vcx_reg_type reg_type)
21925{
21926  unsigned mult = 2;
21927  if (reg_type == q_reg)
21928    inst.instruction |= 1 << vcx_vec_pos;
21929  else if (reg_type == d_reg)
21930    inst.instruction |= 1 << vcx_size_pos;
21931  else
21932    mult = 1;
21933  /* NOTE:
21934     The documentation says that the Q registers are encoded as 2*N in the D:Vd
21935     bits (or equivalent for N and M registers).
21936     Similarly the D registers are encoded as N in D:Vd bits.
21937     While the S registers are encoded as N in the Vd:D bits.
21938
21939     Taking into account the maximum values of these registers we can see a
21940     nicer pattern for calculation:
21941       Q -> 7, D -> 15, S -> 31
21942
21943     If we say that everything is encoded in the Vd:D bits, then we can say
21944     that Q is encoded as 4*N, and D is encoded as 2*N.
21945     This way the bits will end up the same, and calculation is simpler.
21946     (calculation is now:
21947	1. Multiply by a number determined by the register letter.
21948	2. Encode resulting number in Vd:D bits.)
21949
21950      This is made a little more complicated by automatic handling of 'Q'
21951      registers elsewhere, which means the register number is already 2*N where
21952      N is the number the user wrote after the register letter.
21953     */
21954  return mult;
21955}
21956#undef vcx_vec_pos
21957#undef vcx_size_pos
21958
21959static void
21960vcx_ensure_register_in_range (unsigned R, enum vcx_reg_type reg_type)
21961{
21962  if (reg_type == q_reg)
21963    {
21964      gas_assert (R % 2 == 0);
21965      constraint (R >= 16, _("'q' register must be in range 0-7"));
21966    }
21967  else if (reg_type == d_reg)
21968    constraint (R >= 16, _("'d' register must be in range 0-15"));
21969  else
21970    constraint (R >= 32, _("'s' register must be in range 0-31"));
21971}
21972
21973static void (*vcx_assign_vec[3]) (unsigned) = {
21974    vcx_assign_vec_d,
21975    vcx_assign_vec_m,
21976    vcx_assign_vec_n
21977};
21978
21979static void
21980vcx_handle_register_arguments (unsigned num_registers,
21981			       enum vcx_reg_type reg_type)
21982{
21983  unsigned R, i;
21984  unsigned reg_mult = vcx_handle_shape (reg_type);
21985  for (i = 0; i < num_registers; i++)
21986    {
21987      R = inst.operands[i+1].reg;
21988      vcx_ensure_register_in_range (R, reg_type);
21989      if (num_registers == 3 && i > 0)
21990	{
21991	  if (i == 2)
21992	    vcx_assign_vec[1] (R * reg_mult);
21993	  else
21994	    vcx_assign_vec[2] (R * reg_mult);
21995	  continue;
21996	}
21997      vcx_assign_vec[i](R * reg_mult);
21998    }
21999}
22000
22001static void
22002vcx_handle_insn_block (enum vcx_reg_type reg_type)
22003{
22004  if (reg_type == q_reg)
22005    if (inst.cond > COND_ALWAYS)
22006      inst.pred_insn_type = INSIDE_VPT_INSN;
22007    else
22008      inst.pred_insn_type = MVE_OUTSIDE_PRED_INSN;
22009  else if (inst.cond == COND_ALWAYS)
22010    inst.pred_insn_type = OUTSIDE_PRED_INSN;
22011  else
22012    inst.error = BAD_NOT_IT;
22013}
22014
22015static void
22016vcx_handle_common_checks (unsigned num_args, enum neon_shape rs)
22017{
22018  constraint (!mark_feature_used (&arm_ext_cde), _(BAD_CDE));
22019  cde_handle_coproc ();
22020  enum vcx_reg_type reg_type = vcx_get_reg_type (rs);
22021  vcx_handle_register_arguments (num_args, reg_type);
22022  vcx_handle_insn_block (reg_type);
22023  if (reg_type == q_reg)
22024    constraint (!mark_feature_used (&mve_ext),
22025		_("vcx instructions with Q registers require MVE"));
22026  else
22027    constraint (!(ARM_FSET_CPU_SUBSET (armv8m_fp, cpu_variant)
22028		  && mark_feature_used (&armv8m_fp))
22029		&& !mark_feature_used (&mve_ext),
22030		_("vcx instructions with S or D registers require either MVE"
22031		  " or Armv8-M floating point extension."));
22032}
22033
22034static void
22035do_vcx1 (void)
22036{
22037  enum neon_shape rs = neon_select_shape (NS_PQI, NS_PDI, NS_PFI, NS_NULL);
22038  vcx_handle_common_checks (1, rs);
22039
22040  unsigned imm = inst.operands[2].imm;
22041  inst.instruction |= (imm & 0x03f);
22042  inst.instruction |= (imm & 0x040) << 1;
22043  inst.instruction |= (imm & 0x780) << 9;
22044  if (rs != NS_PQI)
22045    constraint (imm >= 2048,
22046		_("vcx1 with S or D registers takes immediate within 0-2047"));
22047  inst.instruction |= (imm & 0x800) << 13;
22048}
22049
22050static void
22051do_vcx2 (void)
22052{
22053  enum neon_shape rs = neon_select_shape (NS_PQQI, NS_PDDI, NS_PFFI, NS_NULL);
22054  vcx_handle_common_checks (2, rs);
22055
22056  unsigned imm = inst.operands[3].imm;
22057  inst.instruction |= (imm & 0x01) << 4;
22058  inst.instruction |= (imm & 0x02) << 6;
22059  inst.instruction |= (imm & 0x3c) << 14;
22060  if (rs != NS_PQQI)
22061    constraint (imm >= 64,
22062		_("vcx2 with S or D registers takes immediate within 0-63"));
22063  inst.instruction |= (imm & 0x40) << 18;
22064}
22065
22066static void
22067do_vcx3 (void)
22068{
22069  enum neon_shape rs = neon_select_shape (NS_PQQQI, NS_PDDDI, NS_PFFFI, NS_NULL);
22070  vcx_handle_common_checks (3, rs);
22071
22072  unsigned imm = inst.operands[4].imm;
22073  inst.instruction |= (imm & 0x1) << 4;
22074  inst.instruction |= (imm & 0x6) << 19;
22075  if (rs != NS_PQQQI)
22076    constraint (imm >= 8,
22077		_("vcx2 with S or D registers takes immediate within 0-7"));
22078  inst.instruction |= (imm & 0x8) << 21;
22079}
22080
22081/* Crypto v1 instructions.  */
22082static void
22083do_crypto_2op_1 (unsigned elttype, int op)
22084{
22085  set_pred_insn_type (OUTSIDE_PRED_INSN);
22086
22087  if (neon_check_type (2, NS_QQ, N_EQK | N_UNT, elttype | N_UNT | N_KEY).type
22088      == NT_invtype)
22089    return;
22090
22091  inst.error = NULL;
22092
22093  NEON_ENCODE (INTEGER, inst);
22094  inst.instruction |= LOW4 (inst.operands[0].reg) << 12;
22095  inst.instruction |= HI1 (inst.operands[0].reg) << 22;
22096  inst.instruction |= LOW4 (inst.operands[1].reg);
22097  inst.instruction |= HI1 (inst.operands[1].reg) << 5;
22098  if (op != -1)
22099    inst.instruction |= op << 6;
22100
22101  if (thumb_mode)
22102    inst.instruction |= 0xfc000000;
22103  else
22104    inst.instruction |= 0xf0000000;
22105}
22106
22107static void
22108do_crypto_3op_1 (int u, int op)
22109{
22110  set_pred_insn_type (OUTSIDE_PRED_INSN);
22111
22112  if (neon_check_type (3, NS_QQQ, N_EQK | N_UNT, N_EQK | N_UNT,
22113		       N_32 | N_UNT | N_KEY).type == NT_invtype)
22114    return;
22115
22116  inst.error = NULL;
22117
22118  NEON_ENCODE (INTEGER, inst);
22119  neon_three_same (1, u, 8 << op);
22120}
22121
22122static void
22123do_aese (void)
22124{
22125  do_crypto_2op_1 (N_8, 0);
22126}
22127
22128static void
22129do_aesd (void)
22130{
22131  do_crypto_2op_1 (N_8, 1);
22132}
22133
22134static void
22135do_aesmc (void)
22136{
22137  do_crypto_2op_1 (N_8, 2);
22138}
22139
22140static void
22141do_aesimc (void)
22142{
22143  do_crypto_2op_1 (N_8, 3);
22144}
22145
22146static void
22147do_sha1c (void)
22148{
22149  do_crypto_3op_1 (0, 0);
22150}
22151
22152static void
22153do_sha1p (void)
22154{
22155  do_crypto_3op_1 (0, 1);
22156}
22157
22158static void
22159do_sha1m (void)
22160{
22161  do_crypto_3op_1 (0, 2);
22162}
22163
22164static void
22165do_sha1su0 (void)
22166{
22167  do_crypto_3op_1 (0, 3);
22168}
22169
22170static void
22171do_sha256h (void)
22172{
22173  do_crypto_3op_1 (1, 0);
22174}
22175
22176static void
22177do_sha256h2 (void)
22178{
22179  do_crypto_3op_1 (1, 1);
22180}
22181
22182static void
22183do_sha256su1 (void)
22184{
22185  do_crypto_3op_1 (1, 2);
22186}
22187
22188static void
22189do_sha1h (void)
22190{
22191  do_crypto_2op_1 (N_32, -1);
22192}
22193
22194static void
22195do_sha1su1 (void)
22196{
22197  do_crypto_2op_1 (N_32, 0);
22198}
22199
22200static void
22201do_sha256su0 (void)
22202{
22203  do_crypto_2op_1 (N_32, 1);
22204}
22205
22206static void
22207do_crc32_1 (unsigned int poly, unsigned int sz)
22208{
22209  unsigned int Rd = inst.operands[0].reg;
22210  unsigned int Rn = inst.operands[1].reg;
22211  unsigned int Rm = inst.operands[2].reg;
22212
22213  set_pred_insn_type (OUTSIDE_PRED_INSN);
22214  inst.instruction |= LOW4 (Rd) << (thumb_mode ? 8 : 12);
22215  inst.instruction |= LOW4 (Rn) << 16;
22216  inst.instruction |= LOW4 (Rm);
22217  inst.instruction |= sz << (thumb_mode ? 4 : 21);
22218  inst.instruction |= poly << (thumb_mode ? 20 : 9);
22219
22220  if (Rd == REG_PC || Rn == REG_PC || Rm == REG_PC)
22221    as_warn (UNPRED_REG ("r15"));
22222}
22223
22224static void
22225do_crc32b (void)
22226{
22227  do_crc32_1 (0, 0);
22228}
22229
22230static void
22231do_crc32h (void)
22232{
22233  do_crc32_1 (0, 1);
22234}
22235
22236static void
22237do_crc32w (void)
22238{
22239  do_crc32_1 (0, 2);
22240}
22241
22242static void
22243do_crc32cb (void)
22244{
22245  do_crc32_1 (1, 0);
22246}
22247
22248static void
22249do_crc32ch (void)
22250{
22251  do_crc32_1 (1, 1);
22252}
22253
22254static void
22255do_crc32cw (void)
22256{
22257  do_crc32_1 (1, 2);
22258}
22259
22260static void
22261do_vjcvt (void)
22262{
22263  constraint (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_vfp_ext_armv8),
22264	      _(BAD_FPU));
22265  neon_check_type (2, NS_FD, N_S32, N_F64);
22266  do_vfp_sp_dp_cvt ();
22267  do_vfp_cond_or_thumb ();
22268}
22269
22270static void
22271do_vdot (void)
22272{
22273  enum neon_shape rs;
22274  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22275  set_pred_insn_type (OUTSIDE_PRED_INSN);
22276  if (inst.operands[2].isscalar)
22277    {
22278      rs = neon_select_shape (NS_DDS, NS_QQS, NS_NULL);
22279      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22280
22281      inst.instruction |= (1 << 25);
22282      int index = inst.operands[2].reg & 0xf;
22283      constraint ((index != 1 && index != 0), _("index must be 0 or 1"));
22284      inst.operands[2].reg >>= 4;
22285      constraint (!(inst.operands[2].reg < 16),
22286		  _("indexed register must be less than 16"));
22287      neon_three_args (rs == NS_QQS);
22288      inst.instruction |= (index << 5);
22289    }
22290  else
22291    {
22292      rs = neon_select_shape (NS_DDD, NS_QQQ, NS_NULL);
22293      neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22294      neon_three_args (rs == NS_QQQ);
22295    }
22296}
22297
22298static void
22299do_vmmla (void)
22300{
22301  enum neon_shape rs = neon_select_shape (NS_QQQ, NS_NULL);
22302  neon_check_type (3, rs, N_EQK, N_EQK, N_BF16 | N_KEY);
22303
22304  constraint (!mark_feature_used (&fpu_neon_ext_armv8), _(BAD_FPU));
22305  set_pred_insn_type (OUTSIDE_PRED_INSN);
22306
22307  neon_three_args (1);
22308}
22309
22310
22311/* Overall per-instruction processing.	*/
22312
22313/* We need to be able to fix up arbitrary expressions in some statements.
22314   This is so that we can handle symbols that are an arbitrary distance from
22315   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
22316   which returns part of an address in a form which will be valid for
22317   a data instruction.	We do this by pushing the expression into a symbol
22318   in the expr_section, and creating a fix for that.  */
22319
22320static void
22321fix_new_arm (fragS *	   frag,
22322	     int	   where,
22323	     short int	   size,
22324	     expressionS * exp,
22325	     int	   pc_rel,
22326	     int	   reloc)
22327{
22328  fixS *	   new_fix;
22329
22330  switch (exp->X_op)
22331    {
22332    case O_constant:
22333      if (pc_rel)
22334	{
22335	  /* Create an absolute valued symbol, so we have something to
22336	     refer to in the object file.  Unfortunately for us, gas's
22337	     generic expression parsing will already have folded out
22338	     any use of .set foo/.type foo %function that may have
22339	     been used to set type information of the target location,
22340	     that's being specified symbolically.  We have to presume
22341	     the user knows what they are doing.  */
22342	  char name[16 + 8];
22343	  symbolS *symbol;
22344
22345	  sprintf (name, "*ABS*0x%lx", (unsigned long)exp->X_add_number);
22346
22347	  symbol = symbol_find_or_make (name);
22348	  S_SET_SEGMENT (symbol, absolute_section);
22349	  symbol_set_frag (symbol, &zero_address_frag);
22350	  S_SET_VALUE (symbol, exp->X_add_number);
22351	  exp->X_op = O_symbol;
22352	  exp->X_add_symbol = symbol;
22353	  exp->X_add_number = 0;
22354	}
22355      /* FALLTHROUGH */
22356    case O_symbol:
22357    case O_add:
22358    case O_subtract:
22359      new_fix = fix_new_exp (frag, where, size, exp, pc_rel,
22360			     (enum bfd_reloc_code_real) reloc);
22361      break;
22362
22363    default:
22364      new_fix = (fixS *) fix_new (frag, where, size, make_expr_symbol (exp), 0,
22365				  pc_rel, (enum bfd_reloc_code_real) reloc);
22366      break;
22367    }
22368
22369  /* Mark whether the fix is to a THUMB instruction, or an ARM
22370     instruction.  */
22371  new_fix->tc_fix_data = thumb_mode;
22372}
22373
22374/* Create a frg for an instruction requiring relaxation.  */
22375static void
22376output_relax_insn (void)
22377{
22378  char * to;
22379  symbolS *sym;
22380  int offset;
22381
22382  /* The size of the instruction is unknown, so tie the debug info to the
22383     start of the instruction.  */
22384  dwarf2_emit_insn (0);
22385
22386  switch (inst.relocs[0].exp.X_op)
22387    {
22388    case O_symbol:
22389      sym = inst.relocs[0].exp.X_add_symbol;
22390      offset = inst.relocs[0].exp.X_add_number;
22391      break;
22392    case O_constant:
22393      sym = NULL;
22394      offset = inst.relocs[0].exp.X_add_number;
22395      break;
22396    default:
22397      sym = make_expr_symbol (&inst.relocs[0].exp);
22398      offset = 0;
22399      break;
22400  }
22401  to = frag_var (rs_machine_dependent, INSN_SIZE, THUMB_SIZE,
22402		 inst.relax, sym, offset, NULL/*offset, opcode*/);
22403  md_number_to_chars (to, inst.instruction, THUMB_SIZE);
22404}
22405
22406/* Write a 32-bit thumb instruction to buf.  */
22407static void
22408put_thumb32_insn (char * buf, unsigned long insn)
22409{
22410  md_number_to_chars (buf, insn >> 16, THUMB_SIZE);
22411  md_number_to_chars (buf + THUMB_SIZE, insn, THUMB_SIZE);
22412}
22413
22414static void
22415output_inst (const char * str)
22416{
22417  char * to = NULL;
22418
22419  if (inst.error)
22420    {
22421      as_bad ("%s -- `%s'", inst.error, str);
22422      return;
22423    }
22424  if (inst.relax)
22425    {
22426      output_relax_insn ();
22427      return;
22428    }
22429  if (inst.size == 0)
22430    return;
22431
22432  to = frag_more (inst.size);
22433  /* PR 9814: Record the thumb mode into the current frag so that we know
22434     what type of NOP padding to use, if necessary.  We override any previous
22435     setting so that if the mode has changed then the NOPS that we use will
22436     match the encoding of the last instruction in the frag.  */
22437  frag_now->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
22438
22439  if (thumb_mode && (inst.size > THUMB_SIZE))
22440    {
22441      gas_assert (inst.size == (2 * THUMB_SIZE));
22442      put_thumb32_insn (to, inst.instruction);
22443    }
22444  else if (inst.size > INSN_SIZE)
22445    {
22446      gas_assert (inst.size == (2 * INSN_SIZE));
22447      md_number_to_chars (to, inst.instruction, INSN_SIZE);
22448      md_number_to_chars (to + INSN_SIZE, inst.instruction, INSN_SIZE);
22449    }
22450  else
22451    md_number_to_chars (to, inst.instruction, inst.size);
22452
22453  int r;
22454  for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
22455    {
22456      if (inst.relocs[r].type != BFD_RELOC_UNUSED)
22457	fix_new_arm (frag_now, to - frag_now->fr_literal,
22458		     inst.size, & inst.relocs[r].exp, inst.relocs[r].pc_rel,
22459		     inst.relocs[r].type);
22460    }
22461
22462  dwarf2_emit_insn (inst.size);
22463}
22464
22465static char *
22466output_it_inst (int cond, int mask, char * to)
22467{
22468  unsigned long instruction = 0xbf00;
22469
22470  mask &= 0xf;
22471  instruction |= mask;
22472  instruction |= cond << 4;
22473
22474  if (to == NULL)
22475    {
22476      to = frag_more (2);
22477#ifdef OBJ_ELF
22478      dwarf2_emit_insn (2);
22479#endif
22480    }
22481
22482  md_number_to_chars (to, instruction, 2);
22483
22484  return to;
22485}
22486
22487/* Tag values used in struct asm_opcode's tag field.  */
22488enum opcode_tag
22489{
22490  OT_unconditional,	/* Instruction cannot be conditionalized.
22491			   The ARM condition field is still 0xE.  */
22492  OT_unconditionalF,	/* Instruction cannot be conditionalized
22493			   and carries 0xF in its ARM condition field.  */
22494  OT_csuffix,		/* Instruction takes a conditional suffix.  */
22495  OT_csuffixF,		/* Some forms of the instruction take a scalar
22496			   conditional suffix, others place 0xF where the
22497			   condition field would be, others take a vector
22498			   conditional suffix.  */
22499  OT_cinfix3,		/* Instruction takes a conditional infix,
22500			   beginning at character index 3.  (In
22501			   unified mode, it becomes a suffix.)  */
22502  OT_cinfix3_deprecated, /* The same as OT_cinfix3.  This is used for
22503			    tsts, cmps, cmns, and teqs. */
22504  OT_cinfix3_legacy,	/* Legacy instruction takes a conditional infix at
22505			   character index 3, even in unified mode.  Used for
22506			   legacy instructions where suffix and infix forms
22507			   may be ambiguous.  */
22508  OT_csuf_or_in3,	/* Instruction takes either a conditional
22509			   suffix or an infix at character index 3.  */
22510  OT_odd_infix_unc,	/* This is the unconditional variant of an
22511			   instruction that takes a conditional infix
22512			   at an unusual position.  In unified mode,
22513			   this variant will accept a suffix.  */
22514  OT_odd_infix_0	/* Values greater than or equal to OT_odd_infix_0
22515			   are the conditional variants of instructions that
22516			   take conditional infixes in unusual positions.
22517			   The infix appears at character index
22518			   (tag - OT_odd_infix_0).  These are not accepted
22519			   in unified mode.  */
22520};
22521
22522/* Subroutine of md_assemble, responsible for looking up the primary
22523   opcode from the mnemonic the user wrote.  STR points to the
22524   beginning of the mnemonic.
22525
22526   This is not simply a hash table lookup, because of conditional
22527   variants.  Most instructions have conditional variants, which are
22528   expressed with a _conditional affix_ to the mnemonic.  If we were
22529   to encode each conditional variant as a literal string in the opcode
22530   table, it would have approximately 20,000 entries.
22531
22532   Most mnemonics take this affix as a suffix, and in unified syntax,
22533   'most' is upgraded to 'all'.  However, in the divided syntax, some
22534   instructions take the affix as an infix, notably the s-variants of
22535   the arithmetic instructions.  Of those instructions, all but six
22536   have the infix appear after the third character of the mnemonic.
22537
22538   Accordingly, the algorithm for looking up primary opcodes given
22539   an identifier is:
22540
22541   1. Look up the identifier in the opcode table.
22542      If we find a match, go to step U.
22543
22544   2. Look up the last two characters of the identifier in the
22545      conditions table.  If we find a match, look up the first N-2
22546      characters of the identifier in the opcode table.  If we
22547      find a match, go to step CE.
22548
22549   3. Look up the fourth and fifth characters of the identifier in
22550      the conditions table.  If we find a match, extract those
22551      characters from the identifier, and look up the remaining
22552      characters in the opcode table.  If we find a match, go
22553      to step CM.
22554
22555   4. Fail.
22556
22557   U. Examine the tag field of the opcode structure, in case this is
22558      one of the six instructions with its conditional infix in an
22559      unusual place.  If it is, the tag tells us where to find the
22560      infix; look it up in the conditions table and set inst.cond
22561      accordingly.  Otherwise, this is an unconditional instruction.
22562      Again set inst.cond accordingly.  Return the opcode structure.
22563
22564  CE. Examine the tag field to make sure this is an instruction that
22565      should receive a conditional suffix.  If it is not, fail.
22566      Otherwise, set inst.cond from the suffix we already looked up,
22567      and return the opcode structure.
22568
22569  CM. Examine the tag field to make sure this is an instruction that
22570      should receive a conditional infix after the third character.
22571      If it is not, fail.  Otherwise, undo the edits to the current
22572      line of input and proceed as for case CE.  */
22573
22574static const struct asm_opcode *
22575opcode_lookup (char **str)
22576{
22577  char *end, *base;
22578  char *affix;
22579  const struct asm_opcode *opcode;
22580  const struct asm_cond *cond;
22581  char save[2];
22582
22583  /* Scan up to the end of the mnemonic, which must end in white space,
22584     '.' (in unified mode, or for Neon/VFP instructions), or end of string.  */
22585  for (base = end = *str; *end != '\0'; end++)
22586    if (*end == ' ' || *end == '.')
22587      break;
22588
22589  if (end == base)
22590    return NULL;
22591
22592  /* Handle a possible width suffix and/or Neon type suffix.  */
22593  if (end[0] == '.')
22594    {
22595      int offset = 2;
22596
22597      /* The .w and .n suffixes are only valid if the unified syntax is in
22598	 use.  */
22599      if (unified_syntax && end[1] == 'w')
22600	inst.size_req = 4;
22601      else if (unified_syntax && end[1] == 'n')
22602	inst.size_req = 2;
22603      else
22604	offset = 0;
22605
22606      inst.vectype.elems = 0;
22607
22608      *str = end + offset;
22609
22610      if (end[offset] == '.')
22611	{
22612	  /* See if we have a Neon type suffix (possible in either unified or
22613	     non-unified ARM syntax mode).  */
22614	  if (parse_neon_type (&inst.vectype, str) == FAIL)
22615	    return NULL;
22616	}
22617      else if (end[offset] != '\0' && end[offset] != ' ')
22618	return NULL;
22619    }
22620  else
22621    *str = end;
22622
22623  /* Look for unaffixed or special-case affixed mnemonic.  */
22624  opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22625							end - base);
22626  cond = NULL;
22627  if (opcode)
22628    {
22629      /* step U */
22630      if (opcode->tag < OT_odd_infix_0)
22631	{
22632	  inst.cond = COND_ALWAYS;
22633	  return opcode;
22634	}
22635
22636      if (warn_on_deprecated && unified_syntax)
22637	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22638      affix = base + (opcode->tag - OT_odd_infix_0);
22639      cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22640      gas_assert (cond);
22641
22642      inst.cond = cond->value;
22643      return opcode;
22644    }
22645 if (ARM_CPU_HAS_FEATURE (cpu_variant, mve_ext))
22646   {
22647    /* Cannot have a conditional suffix on a mnemonic of less than a character.
22648     */
22649    if (end - base < 2)
22650      return NULL;
22651     affix = end - 1;
22652     cond = (const struct asm_cond *) str_hash_find_n (arm_vcond_hsh, affix, 1);
22653     opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22654							   affix - base);
22655     /* If this opcode can not be vector predicated then don't accept it with a
22656	vector predication code.  */
22657     if (opcode && !opcode->mayBeVecPred)
22658       opcode = NULL;
22659   }
22660  if (!opcode || !cond)
22661    {
22662      /* Cannot have a conditional suffix on a mnemonic of less than two
22663	 characters.  */
22664      if (end - base < 3)
22665	return NULL;
22666
22667      /* Look for suffixed mnemonic.  */
22668      affix = end - 2;
22669      cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22670      opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22671							    affix - base);
22672    }
22673
22674  if (opcode && cond)
22675    {
22676      /* step CE */
22677      switch (opcode->tag)
22678	{
22679	case OT_cinfix3_legacy:
22680	  /* Ignore conditional suffixes matched on infix only mnemonics.  */
22681	  break;
22682
22683	case OT_cinfix3:
22684	case OT_cinfix3_deprecated:
22685	case OT_odd_infix_unc:
22686	  if (!unified_syntax)
22687	    return NULL;
22688	  /* Fall through.  */
22689
22690	case OT_csuffix:
22691	case OT_csuffixF:
22692	case OT_csuf_or_in3:
22693	  inst.cond = cond->value;
22694	  return opcode;
22695
22696	case OT_unconditional:
22697	case OT_unconditionalF:
22698	  if (thumb_mode)
22699	    inst.cond = cond->value;
22700	  else
22701	    {
22702	      /* Delayed diagnostic.  */
22703	      inst.error = BAD_COND;
22704	      inst.cond = COND_ALWAYS;
22705	    }
22706	  return opcode;
22707
22708	default:
22709	  return NULL;
22710	}
22711    }
22712
22713  /* Cannot have a usual-position infix on a mnemonic of less than
22714     six characters (five would be a suffix).  */
22715  if (end - base < 6)
22716    return NULL;
22717
22718  /* Look for infixed mnemonic in the usual position.  */
22719  affix = base + 3;
22720  cond = (const struct asm_cond *) str_hash_find_n (arm_cond_hsh, affix, 2);
22721  if (!cond)
22722    return NULL;
22723
22724  memcpy (save, affix, 2);
22725  memmove (affix, affix + 2, (end - affix) - 2);
22726  opcode = (const struct asm_opcode *) str_hash_find_n (arm_ops_hsh, base,
22727							(end - base) - 2);
22728  memmove (affix + 2, affix, (end - affix) - 2);
22729  memcpy (affix, save, 2);
22730
22731  if (opcode
22732      && (opcode->tag == OT_cinfix3
22733	  || opcode->tag == OT_cinfix3_deprecated
22734	  || opcode->tag == OT_csuf_or_in3
22735	  || opcode->tag == OT_cinfix3_legacy))
22736    {
22737      /* Step CM.  */
22738      if (warn_on_deprecated && unified_syntax
22739	  && (opcode->tag == OT_cinfix3
22740	      || opcode->tag == OT_cinfix3_deprecated))
22741	as_tsktsk (_("conditional infixes are deprecated in unified syntax"));
22742
22743      inst.cond = cond->value;
22744      return opcode;
22745    }
22746
22747  return NULL;
22748}
22749
22750/* This function generates an initial IT instruction, leaving its block
22751   virtually open for the new instructions. Eventually,
22752   the mask will be updated by now_pred_add_mask () each time
22753   a new instruction needs to be included in the IT block.
22754   Finally, the block is closed with close_automatic_it_block ().
22755   The block closure can be requested either from md_assemble (),
22756   a tencode (), or due to a label hook.  */
22757
22758static void
22759new_automatic_it_block (int cond)
22760{
22761  now_pred.state = AUTOMATIC_PRED_BLOCK;
22762  now_pred.mask = 0x18;
22763  now_pred.cc = cond;
22764  now_pred.block_length = 1;
22765  mapping_state (MAP_THUMB);
22766  now_pred.insn = output_it_inst (cond, now_pred.mask, NULL);
22767  now_pred.warn_deprecated = FALSE;
22768  now_pred.insn_cond = TRUE;
22769}
22770
22771/* Close an automatic IT block.
22772   See comments in new_automatic_it_block ().  */
22773
22774static void
22775close_automatic_it_block (void)
22776{
22777  now_pred.mask = 0x10;
22778  now_pred.block_length = 0;
22779}
22780
22781/* Update the mask of the current automatically-generated IT
22782   instruction. See comments in new_automatic_it_block ().  */
22783
22784static void
22785now_pred_add_mask (int cond)
22786{
22787#define CLEAR_BIT(value, nbit)  ((value) & ~(1 << (nbit)))
22788#define SET_BIT_VALUE(value, bitvalue, nbit)  (CLEAR_BIT (value, nbit) \
22789					      | ((bitvalue) << (nbit)))
22790  const int resulting_bit = (cond & 1);
22791
22792  now_pred.mask &= 0xf;
22793  now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22794				   resulting_bit,
22795				  (5 - now_pred.block_length));
22796  now_pred.mask = SET_BIT_VALUE (now_pred.mask,
22797				   1,
22798				   ((5 - now_pred.block_length) - 1));
22799  output_it_inst (now_pred.cc, now_pred.mask, now_pred.insn);
22800
22801#undef CLEAR_BIT
22802#undef SET_BIT_VALUE
22803}
22804
22805/* The IT blocks handling machinery is accessed through the these functions:
22806     it_fsm_pre_encode ()               from md_assemble ()
22807     set_pred_insn_type ()		optional, from the tencode functions
22808     set_pred_insn_type_last ()		ditto
22809     in_pred_block ()			ditto
22810     it_fsm_post_encode ()              from md_assemble ()
22811     force_automatic_it_block_close ()  from label handling functions
22812
22813   Rationale:
22814     1) md_assemble () calls it_fsm_pre_encode () before calling tencode (),
22815	initializing the IT insn type with a generic initial value depending
22816	on the inst.condition.
22817     2) During the tencode function, two things may happen:
22818	a) The tencode function overrides the IT insn type by
22819	   calling either set_pred_insn_type (type) or
22820	   set_pred_insn_type_last ().
22821	b) The tencode function queries the IT block state by
22822	   calling in_pred_block () (i.e. to determine narrow/not narrow mode).
22823
22824	Both set_pred_insn_type and in_pred_block run the internal FSM state
22825	handling function (handle_pred_state), because: a) setting the IT insn
22826	type may incur in an invalid state (exiting the function),
22827	and b) querying the state requires the FSM to be updated.
22828	Specifically we want to avoid creating an IT block for conditional
22829	branches, so it_fsm_pre_encode is actually a guess and we can't
22830	determine whether an IT block is required until the tencode () routine
22831	has decided what type of instruction this actually it.
22832	Because of this, if set_pred_insn_type and in_pred_block have to be
22833	used, set_pred_insn_type has to be called first.
22834
22835	set_pred_insn_type_last () is a wrapper of set_pred_insn_type (type),
22836	that determines the insn IT type depending on the inst.cond code.
22837	When a tencode () routine encodes an instruction that can be
22838	either outside an IT block, or, in the case of being inside, has to be
22839	the last one, set_pred_insn_type_last () will determine the proper
22840	IT instruction type based on the inst.cond code. Otherwise,
22841	set_pred_insn_type can be called for overriding that logic or
22842	for covering other cases.
22843
22844	Calling handle_pred_state () may not transition the IT block state to
22845	OUTSIDE_PRED_BLOCK immediately, since the (current) state could be
22846	still queried. Instead, if the FSM determines that the state should
22847	be transitioned to OUTSIDE_PRED_BLOCK, a flag is marked to be closed
22848	after the tencode () function: that's what it_fsm_post_encode () does.
22849
22850	Since in_pred_block () calls the state handling function to get an
22851	updated state, an error may occur (due to invalid insns combination).
22852	In that case, inst.error is set.
22853	Therefore, inst.error has to be checked after the execution of
22854	the tencode () routine.
22855
22856     3) Back in md_assemble(), it_fsm_post_encode () is called to commit
22857	any pending state change (if any) that didn't take place in
22858	handle_pred_state () as explained above.  */
22859
22860static void
22861it_fsm_pre_encode (void)
22862{
22863  if (inst.cond != COND_ALWAYS)
22864    inst.pred_insn_type =  INSIDE_IT_INSN;
22865  else
22866    inst.pred_insn_type = OUTSIDE_PRED_INSN;
22867
22868  now_pred.state_handled = 0;
22869}
22870
22871/* IT state FSM handling function.  */
22872/* MVE instructions and non-MVE instructions are handled differently because of
22873   the introduction of VPT blocks.
22874   Specifications say that any non-MVE instruction inside a VPT block is
22875   UNPREDICTABLE, with the exception of the BKPT instruction.  Whereas most MVE
22876   instructions are deemed to be UNPREDICTABLE if inside an IT block.  For the
22877   few exceptions we have MVE_UNPREDICABLE_INSN.
22878   The error messages provided depending on the different combinations possible
22879   are described in the cases below:
22880   For 'most' MVE instructions:
22881   1) In an IT block, with an IT code: syntax error
22882   2) In an IT block, with a VPT code: error: must be in a VPT block
22883   3) In an IT block, with no code: warning: UNPREDICTABLE
22884   4) In a VPT block, with an IT code: syntax error
22885   5) In a VPT block, with a VPT code: OK!
22886   6) In a VPT block, with no code: error: missing code
22887   7) Outside a pred block, with an IT code: error: syntax error
22888   8) Outside a pred block, with a VPT code: error: should be in a VPT block
22889   9) Outside a pred block, with no code: OK!
22890   For non-MVE instructions:
22891   10) In an IT block, with an IT code: OK!
22892   11) In an IT block, with a VPT code: syntax error
22893   12) In an IT block, with no code: error: missing code
22894   13) In a VPT block, with an IT code: error: should be in an IT block
22895   14) In a VPT block, with a VPT code: syntax error
22896   15) In a VPT block, with no code: UNPREDICTABLE
22897   16) Outside a pred block, with an IT code: error: should be in an IT block
22898   17) Outside a pred block, with a VPT code: syntax error
22899   18) Outside a pred block, with no code: OK!
22900 */
22901
22902
22903static int
22904handle_pred_state (void)
22905{
22906  now_pred.state_handled = 1;
22907  now_pred.insn_cond = FALSE;
22908
22909  switch (now_pred.state)
22910    {
22911    case OUTSIDE_PRED_BLOCK:
22912      switch (inst.pred_insn_type)
22913	{
22914	case MVE_UNPREDICABLE_INSN:
22915	case MVE_OUTSIDE_PRED_INSN:
22916	  if (inst.cond < COND_ALWAYS)
22917	    {
22918	      /* Case 7: Outside a pred block, with an IT code: error: syntax
22919		 error.  */
22920	      inst.error = BAD_SYNTAX;
22921	      return FAIL;
22922	    }
22923	  /* Case 9:  Outside a pred block, with no code: OK!  */
22924	  break;
22925	case OUTSIDE_PRED_INSN:
22926	  if (inst.cond > COND_ALWAYS)
22927	    {
22928	      /* Case 17:  Outside a pred block, with a VPT code: syntax error.
22929	       */
22930	      inst.error = BAD_SYNTAX;
22931	      return FAIL;
22932	    }
22933	  /* Case 18: Outside a pred block, with no code: OK!  */
22934	  break;
22935
22936	case INSIDE_VPT_INSN:
22937	  /* Case 8: Outside a pred block, with a VPT code: error: should be in
22938	     a VPT block.  */
22939	  inst.error = BAD_OUT_VPT;
22940	  return FAIL;
22941
22942	case INSIDE_IT_INSN:
22943	case INSIDE_IT_LAST_INSN:
22944	  if (inst.cond < COND_ALWAYS)
22945	    {
22946	      /* Case 16: Outside a pred block, with an IT code: error: should
22947		 be in an IT block.  */
22948	      if (thumb_mode == 0)
22949		{
22950		  if (unified_syntax
22951		      && !(implicit_it_mode & IMPLICIT_IT_MODE_ARM))
22952		    as_tsktsk (_("Warning: conditional outside an IT block"\
22953				 " for Thumb."));
22954		}
22955	      else
22956		{
22957		  if ((implicit_it_mode & IMPLICIT_IT_MODE_THUMB)
22958		      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
22959		    {
22960		      /* Automatically generate the IT instruction.  */
22961		      new_automatic_it_block (inst.cond);
22962		      if (inst.pred_insn_type == INSIDE_IT_LAST_INSN)
22963			close_automatic_it_block ();
22964		    }
22965		  else
22966		    {
22967		      inst.error = BAD_OUT_IT;
22968		      return FAIL;
22969		    }
22970		}
22971	      break;
22972	    }
22973	  else if (inst.cond > COND_ALWAYS)
22974	    {
22975	      /* Case 17: Outside a pred block, with a VPT code: syntax error.
22976	       */
22977	      inst.error = BAD_SYNTAX;
22978	      return FAIL;
22979	    }
22980	  else
22981	    gas_assert (0);
22982	case IF_INSIDE_IT_LAST_INSN:
22983	case NEUTRAL_IT_INSN:
22984	  break;
22985
22986	case VPT_INSN:
22987	  if (inst.cond != COND_ALWAYS)
22988	    first_error (BAD_SYNTAX);
22989	  now_pred.state = MANUAL_PRED_BLOCK;
22990	  now_pred.block_length = 0;
22991	  now_pred.type = VECTOR_PRED;
22992	  now_pred.cc = 0;
22993	  break;
22994	case IT_INSN:
22995	  now_pred.state = MANUAL_PRED_BLOCK;
22996	  now_pred.block_length = 0;
22997	  now_pred.type = SCALAR_PRED;
22998	  break;
22999	}
23000      break;
23001
23002    case AUTOMATIC_PRED_BLOCK:
23003      /* Three things may happen now:
23004	 a) We should increment current it block size;
23005	 b) We should close current it block (closing insn or 4 insns);
23006	 c) We should close current it block and start a new one (due
23007	 to incompatible conditions or
23008	 4 insns-length block reached).  */
23009
23010      switch (inst.pred_insn_type)
23011	{
23012	case INSIDE_VPT_INSN:
23013	case VPT_INSN:
23014	case MVE_UNPREDICABLE_INSN:
23015	case MVE_OUTSIDE_PRED_INSN:
23016	  gas_assert (0);
23017	case OUTSIDE_PRED_INSN:
23018	  /* The closure of the block shall happen immediately,
23019	     so any in_pred_block () call reports the block as closed.  */
23020	  force_automatic_it_block_close ();
23021	  break;
23022
23023	case INSIDE_IT_INSN:
23024	case INSIDE_IT_LAST_INSN:
23025	case IF_INSIDE_IT_LAST_INSN:
23026	  now_pred.block_length++;
23027
23028	  if (now_pred.block_length > 4
23029	      || !now_pred_compatible (inst.cond))
23030	    {
23031	      force_automatic_it_block_close ();
23032	      if (inst.pred_insn_type != IF_INSIDE_IT_LAST_INSN)
23033		new_automatic_it_block (inst.cond);
23034	    }
23035	  else
23036	    {
23037	      now_pred.insn_cond = TRUE;
23038	      now_pred_add_mask (inst.cond);
23039	    }
23040
23041	  if (now_pred.state == AUTOMATIC_PRED_BLOCK
23042	      && (inst.pred_insn_type == INSIDE_IT_LAST_INSN
23043		  || inst.pred_insn_type == IF_INSIDE_IT_LAST_INSN))
23044	    close_automatic_it_block ();
23045	  break;
23046
23047	  /* Fallthrough.  */
23048	case NEUTRAL_IT_INSN:
23049	  now_pred.block_length++;
23050	  now_pred.insn_cond = TRUE;
23051
23052	  if (now_pred.block_length > 4)
23053	    force_automatic_it_block_close ();
23054	  else
23055	    now_pred_add_mask (now_pred.cc & 1);
23056	  break;
23057
23058	case IT_INSN:
23059	  close_automatic_it_block ();
23060	  now_pred.state = MANUAL_PRED_BLOCK;
23061	  break;
23062	}
23063      break;
23064
23065    case MANUAL_PRED_BLOCK:
23066      {
23067	unsigned int cond;
23068	int is_last;
23069	if (now_pred.type == SCALAR_PRED)
23070	  {
23071	    /* Check conditional suffixes.  */
23072	    cond = now_pred.cc ^ ((now_pred.mask >> 4) & 1) ^ 1;
23073	    now_pred.mask <<= 1;
23074	    now_pred.mask &= 0x1f;
23075	    is_last = (now_pred.mask == 0x10);
23076	  }
23077	else
23078	  {
23079	    now_pred.cc ^= (now_pred.mask >> 4);
23080	    cond = now_pred.cc + 0xf;
23081	    now_pred.mask <<= 1;
23082	    now_pred.mask &= 0x1f;
23083	    is_last = now_pred.mask == 0x10;
23084	  }
23085	now_pred.insn_cond = TRUE;
23086
23087	switch (inst.pred_insn_type)
23088	  {
23089	  case OUTSIDE_PRED_INSN:
23090	    if (now_pred.type == SCALAR_PRED)
23091	      {
23092		if (inst.cond == COND_ALWAYS)
23093		  {
23094		    /* Case 12: In an IT block, with no code: error: missing
23095		       code.  */
23096		    inst.error = BAD_NOT_IT;
23097		    return FAIL;
23098		  }
23099		else if (inst.cond > COND_ALWAYS)
23100		  {
23101		    /* Case 11: In an IT block, with a VPT code: syntax error.
23102		     */
23103		    inst.error = BAD_SYNTAX;
23104		    return FAIL;
23105		  }
23106		else if (thumb_mode)
23107		  {
23108		    /* This is for some special cases where a non-MVE
23109		       instruction is not allowed in an IT block, such as cbz,
23110		       but are put into one with a condition code.
23111		       You could argue this should be a syntax error, but we
23112		       gave the 'not allowed in IT block' diagnostic in the
23113		       past so we will keep doing so.  */
23114		    inst.error = BAD_NOT_IT;
23115		    return FAIL;
23116		  }
23117		break;
23118	      }
23119	    else
23120	      {
23121		/* Case 15: In a VPT block, with no code: UNPREDICTABLE.  */
23122		as_tsktsk (MVE_NOT_VPT);
23123		return SUCCESS;
23124	      }
23125	  case MVE_OUTSIDE_PRED_INSN:
23126	    if (now_pred.type == SCALAR_PRED)
23127	      {
23128		if (inst.cond == COND_ALWAYS)
23129		  {
23130		    /* Case 3: In an IT block, with no code: warning:
23131		       UNPREDICTABLE.  */
23132		    as_tsktsk (MVE_NOT_IT);
23133		    return SUCCESS;
23134		  }
23135		else if (inst.cond < COND_ALWAYS)
23136		  {
23137		    /* Case 1: In an IT block, with an IT code: syntax error.
23138		     */
23139		    inst.error = BAD_SYNTAX;
23140		    return FAIL;
23141		  }
23142		else
23143		  gas_assert (0);
23144	      }
23145	    else
23146	      {
23147		if (inst.cond < COND_ALWAYS)
23148		  {
23149		    /* Case 4: In a VPT block, with an IT code: syntax error.
23150		     */
23151		    inst.error = BAD_SYNTAX;
23152		    return FAIL;
23153		  }
23154		else if (inst.cond == COND_ALWAYS)
23155		  {
23156		    /* Case 6: In a VPT block, with no code: error: missing
23157		       code.  */
23158		    inst.error = BAD_NOT_VPT;
23159		    return FAIL;
23160		  }
23161		else
23162		  {
23163		    gas_assert (0);
23164		  }
23165	      }
23166	  case MVE_UNPREDICABLE_INSN:
23167	    as_tsktsk (now_pred.type == SCALAR_PRED ? MVE_NOT_IT : MVE_NOT_VPT);
23168	    return SUCCESS;
23169	  case INSIDE_IT_INSN:
23170	    if (inst.cond > COND_ALWAYS)
23171	      {
23172		/* Case 11: In an IT block, with a VPT code: syntax error.  */
23173		/* Case 14: In a VPT block, with a VPT code: syntax error.  */
23174		inst.error = BAD_SYNTAX;
23175		return FAIL;
23176	      }
23177	    else if (now_pred.type == SCALAR_PRED)
23178	      {
23179		/* Case 10: In an IT block, with an IT code: OK!  */
23180		if (cond != inst.cond)
23181		  {
23182		    inst.error = now_pred.type == SCALAR_PRED ? BAD_IT_COND :
23183		      BAD_VPT_COND;
23184		    return FAIL;
23185		  }
23186	      }
23187	    else
23188	      {
23189		/* Case 13: In a VPT block, with an IT code: error: should be
23190		   in an IT block.  */
23191		inst.error = BAD_OUT_IT;
23192		return FAIL;
23193	      }
23194	    break;
23195
23196	  case INSIDE_VPT_INSN:
23197	    if (now_pred.type == SCALAR_PRED)
23198	      {
23199		/* Case 2: In an IT block, with a VPT code: error: must be in a
23200		   VPT block.  */
23201		inst.error = BAD_OUT_VPT;
23202		return FAIL;
23203	      }
23204	    /* Case 5:  In a VPT block, with a VPT code: OK!  */
23205	    else if (cond != inst.cond)
23206	      {
23207		inst.error = BAD_VPT_COND;
23208		return FAIL;
23209	      }
23210	    break;
23211	  case INSIDE_IT_LAST_INSN:
23212	  case IF_INSIDE_IT_LAST_INSN:
23213	    if (now_pred.type == VECTOR_PRED || inst.cond > COND_ALWAYS)
23214	      {
23215		/* Case 4: In a VPT block, with an IT code: syntax error.  */
23216		/* Case 11: In an IT block, with a VPT code: syntax error.  */
23217		inst.error = BAD_SYNTAX;
23218		return FAIL;
23219	      }
23220	    else if (cond != inst.cond)
23221	      {
23222		inst.error = BAD_IT_COND;
23223		return FAIL;
23224	      }
23225	    if (!is_last)
23226	      {
23227		inst.error = BAD_BRANCH;
23228		return FAIL;
23229	      }
23230	    break;
23231
23232	  case NEUTRAL_IT_INSN:
23233	    /* The BKPT instruction is unconditional even in a IT or VPT
23234	       block.  */
23235	    break;
23236
23237	  case IT_INSN:
23238	    if (now_pred.type == SCALAR_PRED)
23239	      {
23240		inst.error = BAD_IT_IT;
23241		return FAIL;
23242	      }
23243	    /* fall through.  */
23244	  case VPT_INSN:
23245	    if (inst.cond == COND_ALWAYS)
23246	      {
23247		/* Executing a VPT/VPST instruction inside an IT block or a
23248		   VPT/VPST/IT instruction inside a VPT block is UNPREDICTABLE.
23249		 */
23250		if (now_pred.type == SCALAR_PRED)
23251		  as_tsktsk (MVE_NOT_IT);
23252		else
23253		  as_tsktsk (MVE_NOT_VPT);
23254		return SUCCESS;
23255	      }
23256	    else
23257	      {
23258		/* VPT/VPST do not accept condition codes.  */
23259		inst.error = BAD_SYNTAX;
23260		return FAIL;
23261	      }
23262	  }
23263	}
23264      break;
23265    }
23266
23267  return SUCCESS;
23268}
23269
23270struct depr_insn_mask
23271{
23272  unsigned long pattern;
23273  unsigned long mask;
23274  const char* description;
23275};
23276
23277/* List of 16-bit instruction patterns deprecated in an IT block in
23278   ARMv8.  */
23279static const struct depr_insn_mask depr_it_insns[] = {
23280  { 0xc000, 0xc000, N_("Short branches, Undefined, SVC, LDM/STM") },
23281  { 0xb000, 0xb000, N_("Miscellaneous 16-bit instructions") },
23282  { 0xa000, 0xb800, N_("ADR") },
23283  { 0x4800, 0xf800, N_("Literal loads") },
23284  { 0x4478, 0xf478, N_("Hi-register ADD, MOV, CMP, BX, BLX using pc") },
23285  { 0x4487, 0xfc87, N_("Hi-register ADD, MOV, CMP using pc") },
23286  /* NOTE: 0x00dd is not the real encoding, instead, it is the 'tvalue'
23287     field in asm_opcode. 'tvalue' is used at the stage this check happen.  */
23288  { 0x00dd, 0x7fff, N_("ADD/SUB sp, sp #imm") },
23289  { 0, 0, NULL }
23290};
23291
23292static void
23293it_fsm_post_encode (void)
23294{
23295  int is_last;
23296
23297  if (!now_pred.state_handled)
23298    handle_pred_state ();
23299
23300  if (now_pred.insn_cond
23301      && warn_on_restrict_it
23302      && !now_pred.warn_deprecated
23303      && warn_on_deprecated
23304      && (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8)
23305          || ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v8r))
23306      && !ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_m))
23307    {
23308      if (inst.instruction >= 0x10000)
23309	{
23310	  as_tsktsk (_("IT blocks containing 32-bit Thumb instructions are "
23311		     "performance deprecated in ARMv8-A and ARMv8-R"));
23312	  now_pred.warn_deprecated = TRUE;
23313	}
23314      else
23315	{
23316	  const struct depr_insn_mask *p = depr_it_insns;
23317
23318	  while (p->mask != 0)
23319	    {
23320	      if ((inst.instruction & p->mask) == p->pattern)
23321		{
23322		  as_tsktsk (_("IT blocks containing 16-bit Thumb "
23323			       "instructions of the following class are "
23324			       "performance deprecated in ARMv8-A and "
23325			       "ARMv8-R: %s"), p->description);
23326		  now_pred.warn_deprecated = TRUE;
23327		  break;
23328		}
23329
23330	      ++p;
23331	    }
23332	}
23333
23334      if (now_pred.block_length > 1)
23335	{
23336	  as_tsktsk (_("IT blocks containing more than one conditional "
23337		     "instruction are performance deprecated in ARMv8-A and "
23338		     "ARMv8-R"));
23339	  now_pred.warn_deprecated = TRUE;
23340	}
23341    }
23342
23343    is_last = (now_pred.mask == 0x10);
23344    if (is_last)
23345      {
23346	now_pred.state = OUTSIDE_PRED_BLOCK;
23347	now_pred.mask = 0;
23348      }
23349}
23350
23351static void
23352force_automatic_it_block_close (void)
23353{
23354  if (now_pred.state == AUTOMATIC_PRED_BLOCK)
23355    {
23356      close_automatic_it_block ();
23357      now_pred.state = OUTSIDE_PRED_BLOCK;
23358      now_pred.mask = 0;
23359    }
23360}
23361
23362static int
23363in_pred_block (void)
23364{
23365  if (!now_pred.state_handled)
23366    handle_pred_state ();
23367
23368  return now_pred.state != OUTSIDE_PRED_BLOCK;
23369}
23370
23371/* Whether OPCODE only has T32 encoding.  Since this function is only used by
23372   t32_insn_ok, OPCODE enabled by v6t2 extension bit do not need to be listed
23373   here, hence the "known" in the function name.  */
23374
23375static bfd_boolean
23376known_t32_only_insn (const struct asm_opcode *opcode)
23377{
23378  /* Original Thumb-1 wide instruction.  */
23379  if (opcode->tencode == do_t_blx
23380      || opcode->tencode == do_t_branch23
23381      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_msr)
23382      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_barrier))
23383    return TRUE;
23384
23385  /* Wide-only instruction added to ARMv8-M Baseline.  */
23386  if (ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v8m_m_only)
23387      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_atomics)
23388      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_v6t2_v8m)
23389      || ARM_CPU_HAS_FEATURE (*opcode->tvariant, arm_ext_div))
23390    return TRUE;
23391
23392  return FALSE;
23393}
23394
23395/* Whether wide instruction variant can be used if available for a valid OPCODE
23396   in ARCH.  */
23397
23398static bfd_boolean
23399t32_insn_ok (arm_feature_set arch, const struct asm_opcode *opcode)
23400{
23401  if (known_t32_only_insn (opcode))
23402    return TRUE;
23403
23404  /* Instruction with narrow and wide encoding added to ARMv8-M.  Availability
23405     of variant T3 of B.W is checked in do_t_branch.  */
23406  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23407      && opcode->tencode == do_t_branch)
23408    return TRUE;
23409
23410  /* MOV accepts T1/T3 encodings under Baseline, T3 encoding is 32bit.  */
23411  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v8m)
23412      && opcode->tencode == do_t_mov_cmp
23413      /* Make sure CMP instruction is not affected.  */
23414      && opcode->aencode == do_mov)
23415    return TRUE;
23416
23417  /* Wide instruction variants of all instructions with narrow *and* wide
23418     variants become available with ARMv6t2.  Other opcodes are either
23419     narrow-only or wide-only and are thus available if OPCODE is valid.  */
23420  if (ARM_CPU_HAS_FEATURE (arch, arm_ext_v6t2))
23421    return TRUE;
23422
23423  /* OPCODE with narrow only instruction variant or wide variant not
23424     available.  */
23425  return FALSE;
23426}
23427
23428void
23429md_assemble (char *str)
23430{
23431  char *p = str;
23432  const struct asm_opcode * opcode;
23433
23434  /* Align the previous label if needed.  */
23435  if (last_label_seen != NULL)
23436    {
23437      symbol_set_frag (last_label_seen, frag_now);
23438      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
23439      S_SET_SEGMENT (last_label_seen, now_seg);
23440    }
23441
23442  memset (&inst, '\0', sizeof (inst));
23443  int r;
23444  for (r = 0; r < ARM_IT_MAX_RELOCS; r++)
23445    inst.relocs[r].type = BFD_RELOC_UNUSED;
23446
23447  opcode = opcode_lookup (&p);
23448  if (!opcode)
23449    {
23450      /* It wasn't an instruction, but it might be a register alias of
23451	 the form alias .req reg, or a Neon .dn/.qn directive.  */
23452      if (! create_register_alias (str, p)
23453	  && ! create_neon_reg_alias (str, p))
23454	as_bad (_("bad instruction `%s'"), str);
23455
23456      return;
23457    }
23458
23459  if (warn_on_deprecated && opcode->tag == OT_cinfix3_deprecated)
23460    as_tsktsk (_("s suffix on comparison instruction is deprecated"));
23461
23462  /* The value which unconditional instructions should have in place of the
23463     condition field.  */
23464  inst.uncond_value = (opcode->tag == OT_csuffixF) ? 0xf : -1u;
23465
23466  if (thumb_mode)
23467    {
23468      arm_feature_set variant;
23469
23470      variant = cpu_variant;
23471      /* Only allow coprocessor instructions on Thumb-2 capable devices.  */
23472      if (!ARM_CPU_HAS_FEATURE (variant, arm_arch_t2))
23473	ARM_CLEAR_FEATURE (variant, variant, fpu_any_hard);
23474      /* Check that this instruction is supported for this CPU.  */
23475      if (!opcode->tvariant
23476	  || (thumb_mode == 1
23477	      && !ARM_CPU_HAS_FEATURE (variant, *opcode->tvariant)))
23478	{
23479	  if (opcode->tencode == do_t_swi)
23480	    as_bad (_("SVC is not permitted on this architecture"));
23481	  else
23482	    as_bad (_("selected processor does not support `%s' in Thumb mode"), str);
23483	  return;
23484	}
23485      if (inst.cond != COND_ALWAYS && !unified_syntax
23486	  && opcode->tencode != do_t_branch)
23487	{
23488	  as_bad (_("Thumb does not support conditional execution"));
23489	  return;
23490	}
23491
23492      /* Two things are addressed here:
23493	 1) Implicit require narrow instructions on Thumb-1.
23494	    This avoids relaxation accidentally introducing Thumb-2
23495	    instructions.
23496	 2) Reject wide instructions in non Thumb-2 cores.
23497
23498	 Only instructions with narrow and wide variants need to be handled
23499	 but selecting all non wide-only instructions is easier.  */
23500      if (!ARM_CPU_HAS_FEATURE (variant, arm_ext_v6t2)
23501	  && !t32_insn_ok (variant, opcode))
23502	{
23503	  if (inst.size_req == 0)
23504	    inst.size_req = 2;
23505	  else if (inst.size_req == 4)
23506	    {
23507	      if (ARM_CPU_HAS_FEATURE (variant, arm_ext_v8m))
23508		as_bad (_("selected processor does not support 32bit wide "
23509			  "variant of instruction `%s'"), str);
23510	      else
23511		as_bad (_("selected processor does not support `%s' in "
23512			  "Thumb-2 mode"), str);
23513	      return;
23514	    }
23515	}
23516
23517      inst.instruction = opcode->tvalue;
23518
23519      if (!parse_operands (p, opcode->operands, /*thumb=*/TRUE))
23520	{
23521	  /* Prepare the pred_insn_type for those encodings that don't set
23522	     it.  */
23523	  it_fsm_pre_encode ();
23524
23525	  opcode->tencode ();
23526
23527	  it_fsm_post_encode ();
23528	}
23529
23530      if (!(inst.error || inst.relax))
23531	{
23532	  gas_assert (inst.instruction < 0xe800 || inst.instruction > 0xffff);
23533	  inst.size = (inst.instruction > 0xffff ? 4 : 2);
23534	  if (inst.size_req && inst.size_req != inst.size)
23535	    {
23536	      as_bad (_("cannot honor width suffix -- `%s'"), str);
23537	      return;
23538	    }
23539	}
23540
23541      /* Something has gone badly wrong if we try to relax a fixed size
23542	 instruction.  */
23543      gas_assert (inst.size_req == 0 || !inst.relax);
23544
23545      ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23546			      *opcode->tvariant);
23547      /* Many Thumb-2 instructions also have Thumb-1 variants, so explicitly
23548	 set those bits when Thumb-2 32-bit instructions are seen.  The impact
23549	 of relaxable instructions will be considered later after we finish all
23550	 relaxation.  */
23551      if (ARM_FEATURE_CORE_EQUAL (cpu_variant, arm_arch_any))
23552	variant = arm_arch_none;
23553      else
23554	variant = cpu_variant;
23555      if (inst.size == 4 && !t32_insn_ok (variant, opcode))
23556	ARM_MERGE_FEATURE_SETS (thumb_arch_used, thumb_arch_used,
23557				arm_ext_v6t2);
23558
23559      check_neon_suffixes;
23560
23561      if (!inst.error)
23562	{
23563	  mapping_state (MAP_THUMB);
23564	}
23565    }
23566  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
23567    {
23568      bfd_boolean is_bx;
23569
23570      /* bx is allowed on v5 cores, and sometimes on v4 cores.  */
23571      is_bx = (opcode->aencode == do_bx);
23572
23573      /* Check that this instruction is supported for this CPU.  */
23574      if (!(is_bx && fix_v4bx)
23575	  && !(opcode->avariant &&
23576	       ARM_CPU_HAS_FEATURE (cpu_variant, *opcode->avariant)))
23577	{
23578	  as_bad (_("selected processor does not support `%s' in ARM mode"), str);
23579	  return;
23580	}
23581      if (inst.size_req)
23582	{
23583	  as_bad (_("width suffixes are invalid in ARM mode -- `%s'"), str);
23584	  return;
23585	}
23586
23587      inst.instruction = opcode->avalue;
23588      if (opcode->tag == OT_unconditionalF)
23589	inst.instruction |= 0xFU << 28;
23590      else
23591	inst.instruction |= inst.cond << 28;
23592      inst.size = INSN_SIZE;
23593      if (!parse_operands (p, opcode->operands, /*thumb=*/FALSE))
23594	{
23595	  it_fsm_pre_encode ();
23596	  opcode->aencode ();
23597	  it_fsm_post_encode ();
23598	}
23599      /* Arm mode bx is marked as both v4T and v5 because it's still required
23600	 on a hypothetical non-thumb v5 core.  */
23601      if (is_bx)
23602	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used, arm_ext_v4t);
23603      else
23604	ARM_MERGE_FEATURE_SETS (arm_arch_used, arm_arch_used,
23605				*opcode->avariant);
23606
23607      check_neon_suffixes;
23608
23609      if (!inst.error)
23610	{
23611	  mapping_state (MAP_ARM);
23612	}
23613    }
23614  else
23615    {
23616      as_bad (_("attempt to use an ARM instruction on a Thumb-only processor "
23617		"-- `%s'"), str);
23618      return;
23619    }
23620  output_inst (str);
23621}
23622
23623static void
23624check_pred_blocks_finished (void)
23625{
23626#ifdef OBJ_ELF
23627  asection *sect;
23628
23629  for (sect = stdoutput->sections; sect != NULL; sect = sect->next)
23630    if (seg_info (sect)->tc_segment_info_data.current_pred.state
23631	== MANUAL_PRED_BLOCK)
23632      {
23633	if (now_pred.type == SCALAR_PRED)
23634	  as_warn (_("section '%s' finished with an open IT block."),
23635		   sect->name);
23636	else
23637	  as_warn (_("section '%s' finished with an open VPT/VPST block."),
23638		   sect->name);
23639      }
23640#else
23641  if (now_pred.state == MANUAL_PRED_BLOCK)
23642    {
23643      if (now_pred.type == SCALAR_PRED)
23644       as_warn (_("file finished with an open IT block."));
23645      else
23646	as_warn (_("file finished with an open VPT/VPST block."));
23647    }
23648#endif
23649}
23650
23651/* Various frobbings of labels and their addresses.  */
23652
23653void
23654arm_start_line_hook (void)
23655{
23656  last_label_seen = NULL;
23657}
23658
23659void
23660arm_frob_label (symbolS * sym)
23661{
23662  last_label_seen = sym;
23663
23664  ARM_SET_THUMB (sym, thumb_mode);
23665
23666#if defined OBJ_COFF || defined OBJ_ELF
23667  ARM_SET_INTERWORK (sym, support_interwork);
23668#endif
23669
23670  force_automatic_it_block_close ();
23671
23672  /* Note - do not allow local symbols (.Lxxx) to be labelled
23673     as Thumb functions.  This is because these labels, whilst
23674     they exist inside Thumb code, are not the entry points for
23675     possible ARM->Thumb calls.	 Also, these labels can be used
23676     as part of a computed goto or switch statement.  eg gcc
23677     can generate code that looks like this:
23678
23679		ldr  r2, [pc, .Laaa]
23680		lsl  r3, r3, #2
23681		ldr  r2, [r3, r2]
23682		mov  pc, r2
23683
23684       .Lbbb:  .word .Lxxx
23685       .Lccc:  .word .Lyyy
23686       ..etc...
23687       .Laaa:	.word Lbbb
23688
23689     The first instruction loads the address of the jump table.
23690     The second instruction converts a table index into a byte offset.
23691     The third instruction gets the jump address out of the table.
23692     The fourth instruction performs the jump.
23693
23694     If the address stored at .Laaa is that of a symbol which has the
23695     Thumb_Func bit set, then the linker will arrange for this address
23696     to have the bottom bit set, which in turn would mean that the
23697     address computation performed by the third instruction would end
23698     up with the bottom bit set.  Since the ARM is capable of unaligned
23699     word loads, the instruction would then load the incorrect address
23700     out of the jump table, and chaos would ensue.  */
23701  if (label_is_thumb_function_name
23702      && (S_GET_NAME (sym)[0] != '.' || S_GET_NAME (sym)[1] != 'L')
23703      && (bfd_section_flags (now_seg) & SEC_CODE) != 0)
23704    {
23705      /* When the address of a Thumb function is taken the bottom
23706	 bit of that address should be set.  This will allow
23707	 interworking between Arm and Thumb functions to work
23708	 correctly.  */
23709
23710      THUMB_SET_FUNC (sym, 1);
23711
23712      label_is_thumb_function_name = FALSE;
23713    }
23714
23715  dwarf2_emit_label (sym);
23716}
23717
23718bfd_boolean
23719arm_data_in_code (void)
23720{
23721  if (thumb_mode && ! strncmp (input_line_pointer + 1, "data:", 5))
23722    {
23723      *input_line_pointer = '/';
23724      input_line_pointer += 5;
23725      *input_line_pointer = 0;
23726      return TRUE;
23727    }
23728
23729  return FALSE;
23730}
23731
23732char *
23733arm_canonicalize_symbol_name (char * name)
23734{
23735  int len;
23736
23737  if (thumb_mode && (len = strlen (name)) > 5
23738      && streq (name + len - 5, "/data"))
23739    *(name + len - 5) = 0;
23740
23741  return name;
23742}
23743
23744/* Table of all register names defined by default.  The user can
23745   define additional names with .req.  Note that all register names
23746   should appear in both upper and lowercase variants.	Some registers
23747   also have mixed-case names.	*/
23748
23749#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE, 0 }
23750#define REGNUM(p,n,t) REGDEF(p##n, n, t)
23751#define REGNUM2(p,n,t) REGDEF(p##n, 2 * n, t)
23752#define REGSET(p,t) \
23753  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
23754  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
23755  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
23756  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
23757#define REGSETH(p,t) \
23758  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
23759  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
23760  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
23761  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t), REGNUM(p,31,t)
23762#define REGSET2(p,t) \
23763  REGNUM2(p, 0,t), REGNUM2(p, 1,t), REGNUM2(p, 2,t), REGNUM2(p, 3,t), \
23764  REGNUM2(p, 4,t), REGNUM2(p, 5,t), REGNUM2(p, 6,t), REGNUM2(p, 7,t), \
23765  REGNUM2(p, 8,t), REGNUM2(p, 9,t), REGNUM2(p,10,t), REGNUM2(p,11,t), \
23766  REGNUM2(p,12,t), REGNUM2(p,13,t), REGNUM2(p,14,t), REGNUM2(p,15,t)
23767#define SPLRBANK(base,bank,t) \
23768  REGDEF(lr_##bank, 768|((base+0)<<16), t), \
23769  REGDEF(sp_##bank, 768|((base+1)<<16), t), \
23770  REGDEF(spsr_##bank, 768|(base<<16)|SPSR_BIT, t), \
23771  REGDEF(LR_##bank, 768|((base+0)<<16), t), \
23772  REGDEF(SP_##bank, 768|((base+1)<<16), t), \
23773  REGDEF(SPSR_##bank, 768|(base<<16)|SPSR_BIT, t)
23774
23775static const struct reg_entry reg_names[] =
23776{
23777  /* ARM integer registers.  */
23778  REGSET(r, RN), REGSET(R, RN),
23779
23780  /* ATPCS synonyms.  */
23781  REGDEF(a1,0,RN), REGDEF(a2,1,RN), REGDEF(a3, 2,RN), REGDEF(a4, 3,RN),
23782  REGDEF(v1,4,RN), REGDEF(v2,5,RN), REGDEF(v3, 6,RN), REGDEF(v4, 7,RN),
23783  REGDEF(v5,8,RN), REGDEF(v6,9,RN), REGDEF(v7,10,RN), REGDEF(v8,11,RN),
23784
23785  REGDEF(A1,0,RN), REGDEF(A2,1,RN), REGDEF(A3, 2,RN), REGDEF(A4, 3,RN),
23786  REGDEF(V1,4,RN), REGDEF(V2,5,RN), REGDEF(V3, 6,RN), REGDEF(V4, 7,RN),
23787  REGDEF(V5,8,RN), REGDEF(V6,9,RN), REGDEF(V7,10,RN), REGDEF(V8,11,RN),
23788
23789  /* Well-known aliases.  */
23790  REGDEF(wr, 7,RN), REGDEF(sb, 9,RN), REGDEF(sl,10,RN), REGDEF(fp,11,RN),
23791  REGDEF(ip,12,RN), REGDEF(sp,13,RN), REGDEF(lr,14,RN), REGDEF(pc,15,RN),
23792
23793  REGDEF(WR, 7,RN), REGDEF(SB, 9,RN), REGDEF(SL,10,RN), REGDEF(FP,11,RN),
23794  REGDEF(IP,12,RN), REGDEF(SP,13,RN), REGDEF(LR,14,RN), REGDEF(PC,15,RN),
23795
23796  /* Defining the new Zero register from ARMv8.1-M.  */
23797  REGDEF(zr,15,ZR),
23798  REGDEF(ZR,15,ZR),
23799
23800  /* Coprocessor numbers.  */
23801  REGSET(p, CP), REGSET(P, CP),
23802
23803  /* Coprocessor register numbers.  The "cr" variants are for backward
23804     compatibility.  */
23805  REGSET(c,  CN), REGSET(C, CN),
23806  REGSET(cr, CN), REGSET(CR, CN),
23807
23808  /* ARM banked registers.  */
23809  REGDEF(R8_usr,512|(0<<16),RNB), REGDEF(r8_usr,512|(0<<16),RNB),
23810  REGDEF(R9_usr,512|(1<<16),RNB), REGDEF(r9_usr,512|(1<<16),RNB),
23811  REGDEF(R10_usr,512|(2<<16),RNB), REGDEF(r10_usr,512|(2<<16),RNB),
23812  REGDEF(R11_usr,512|(3<<16),RNB), REGDEF(r11_usr,512|(3<<16),RNB),
23813  REGDEF(R12_usr,512|(4<<16),RNB), REGDEF(r12_usr,512|(4<<16),RNB),
23814  REGDEF(SP_usr,512|(5<<16),RNB), REGDEF(sp_usr,512|(5<<16),RNB),
23815  REGDEF(LR_usr,512|(6<<16),RNB), REGDEF(lr_usr,512|(6<<16),RNB),
23816
23817  REGDEF(R8_fiq,512|(8<<16),RNB), REGDEF(r8_fiq,512|(8<<16),RNB),
23818  REGDEF(R9_fiq,512|(9<<16),RNB), REGDEF(r9_fiq,512|(9<<16),RNB),
23819  REGDEF(R10_fiq,512|(10<<16),RNB), REGDEF(r10_fiq,512|(10<<16),RNB),
23820  REGDEF(R11_fiq,512|(11<<16),RNB), REGDEF(r11_fiq,512|(11<<16),RNB),
23821  REGDEF(R12_fiq,512|(12<<16),RNB), REGDEF(r12_fiq,512|(12<<16),RNB),
23822  REGDEF(SP_fiq,512|(13<<16),RNB), REGDEF(sp_fiq,512|(13<<16),RNB),
23823  REGDEF(LR_fiq,512|(14<<16),RNB), REGDEF(lr_fiq,512|(14<<16),RNB),
23824  REGDEF(SPSR_fiq,512|(14<<16)|SPSR_BIT,RNB), REGDEF(spsr_fiq,512|(14<<16)|SPSR_BIT,RNB),
23825
23826  SPLRBANK(0,IRQ,RNB), SPLRBANK(0,irq,RNB),
23827  SPLRBANK(2,SVC,RNB), SPLRBANK(2,svc,RNB),
23828  SPLRBANK(4,ABT,RNB), SPLRBANK(4,abt,RNB),
23829  SPLRBANK(6,UND,RNB), SPLRBANK(6,und,RNB),
23830  SPLRBANK(12,MON,RNB), SPLRBANK(12,mon,RNB),
23831  REGDEF(elr_hyp,768|(14<<16),RNB), REGDEF(ELR_hyp,768|(14<<16),RNB),
23832  REGDEF(sp_hyp,768|(15<<16),RNB), REGDEF(SP_hyp,768|(15<<16),RNB),
23833  REGDEF(spsr_hyp,768|(14<<16)|SPSR_BIT,RNB),
23834  REGDEF(SPSR_hyp,768|(14<<16)|SPSR_BIT,RNB),
23835
23836  /* FPA registers.  */
23837  REGNUM(f,0,FN), REGNUM(f,1,FN), REGNUM(f,2,FN), REGNUM(f,3,FN),
23838  REGNUM(f,4,FN), REGNUM(f,5,FN), REGNUM(f,6,FN), REGNUM(f,7, FN),
23839
23840  REGNUM(F,0,FN), REGNUM(F,1,FN), REGNUM(F,2,FN), REGNUM(F,3,FN),
23841  REGNUM(F,4,FN), REGNUM(F,5,FN), REGNUM(F,6,FN), REGNUM(F,7, FN),
23842
23843  /* VFP SP registers.	*/
23844  REGSET(s,VFS),  REGSET(S,VFS),
23845  REGSETH(s,VFS), REGSETH(S,VFS),
23846
23847  /* VFP DP Registers.	*/
23848  REGSET(d,VFD),  REGSET(D,VFD),
23849  /* Extra Neon DP registers.  */
23850  REGSETH(d,VFD), REGSETH(D,VFD),
23851
23852  /* Neon QP registers.  */
23853  REGSET2(q,NQ),  REGSET2(Q,NQ),
23854
23855  /* VFP control registers.  */
23856  REGDEF(fpsid,0,VFC), REGDEF(fpscr,1,VFC), REGDEF(fpexc,8,VFC),
23857  REGDEF(FPSID,0,VFC), REGDEF(FPSCR,1,VFC), REGDEF(FPEXC,8,VFC),
23858  REGDEF(fpinst,9,VFC), REGDEF(fpinst2,10,VFC),
23859  REGDEF(FPINST,9,VFC), REGDEF(FPINST2,10,VFC),
23860  REGDEF(mvfr0,7,VFC), REGDEF(mvfr1,6,VFC),
23861  REGDEF(MVFR0,7,VFC), REGDEF(MVFR1,6,VFC),
23862  REGDEF(mvfr2,5,VFC), REGDEF(MVFR2,5,VFC),
23863  REGDEF(fpscr_nzcvqc,2,VFC), REGDEF(FPSCR_nzcvqc,2,VFC),
23864  REGDEF(vpr,12,VFC), REGDEF(VPR,12,VFC),
23865  REGDEF(fpcxt_ns,14,VFC), REGDEF(FPCXT_NS,14,VFC),
23866  REGDEF(fpcxt_s,15,VFC), REGDEF(FPCXT_S,15,VFC),
23867
23868  /* Maverick DSP coprocessor registers.  */
23869  REGSET(mvf,MVF),  REGSET(mvd,MVD),  REGSET(mvfx,MVFX),  REGSET(mvdx,MVDX),
23870  REGSET(MVF,MVF),  REGSET(MVD,MVD),  REGSET(MVFX,MVFX),  REGSET(MVDX,MVDX),
23871
23872  REGNUM(mvax,0,MVAX), REGNUM(mvax,1,MVAX),
23873  REGNUM(mvax,2,MVAX), REGNUM(mvax,3,MVAX),
23874  REGDEF(dspsc,0,DSPSC),
23875
23876  REGNUM(MVAX,0,MVAX), REGNUM(MVAX,1,MVAX),
23877  REGNUM(MVAX,2,MVAX), REGNUM(MVAX,3,MVAX),
23878  REGDEF(DSPSC,0,DSPSC),
23879
23880  /* iWMMXt data registers - p0, c0-15.	 */
23881  REGSET(wr,MMXWR), REGSET(wR,MMXWR), REGSET(WR, MMXWR),
23882
23883  /* iWMMXt control registers - p1, c0-3.  */
23884  REGDEF(wcid,	0,MMXWC),  REGDEF(wCID,	 0,MMXWC),  REGDEF(WCID,  0,MMXWC),
23885  REGDEF(wcon,	1,MMXWC),  REGDEF(wCon,	 1,MMXWC),  REGDEF(WCON,  1,MMXWC),
23886  REGDEF(wcssf, 2,MMXWC),  REGDEF(wCSSF, 2,MMXWC),  REGDEF(WCSSF, 2,MMXWC),
23887  REGDEF(wcasf, 3,MMXWC),  REGDEF(wCASF, 3,MMXWC),  REGDEF(WCASF, 3,MMXWC),
23888
23889  /* iWMMXt scalar (constant/offset) registers - p1, c8-11.  */
23890  REGDEF(wcgr0, 8,MMXWCG),  REGDEF(wCGR0, 8,MMXWCG),  REGDEF(WCGR0, 8,MMXWCG),
23891  REGDEF(wcgr1, 9,MMXWCG),  REGDEF(wCGR1, 9,MMXWCG),  REGDEF(WCGR1, 9,MMXWCG),
23892  REGDEF(wcgr2,10,MMXWCG),  REGDEF(wCGR2,10,MMXWCG),  REGDEF(WCGR2,10,MMXWCG),
23893  REGDEF(wcgr3,11,MMXWCG),  REGDEF(wCGR3,11,MMXWCG),  REGDEF(WCGR3,11,MMXWCG),
23894
23895  /* XScale accumulator registers.  */
23896  REGNUM(acc,0,XSCALE), REGNUM(ACC,0,XSCALE),
23897};
23898#undef REGDEF
23899#undef REGNUM
23900#undef REGSET
23901
23902/* Table of all PSR suffixes.  Bare "CPSR" and "SPSR" are handled
23903   within psr_required_here.  */
23904static const struct asm_psr psrs[] =
23905{
23906  /* Backward compatibility notation.  Note that "all" is no longer
23907     truly all possible PSR bits.  */
23908  {"all",  PSR_c | PSR_f},
23909  {"flg",  PSR_f},
23910  {"ctl",  PSR_c},
23911
23912  /* Individual flags.	*/
23913  {"f",	   PSR_f},
23914  {"c",	   PSR_c},
23915  {"x",	   PSR_x},
23916  {"s",	   PSR_s},
23917
23918  /* Combinations of flags.  */
23919  {"fs",   PSR_f | PSR_s},
23920  {"fx",   PSR_f | PSR_x},
23921  {"fc",   PSR_f | PSR_c},
23922  {"sf",   PSR_s | PSR_f},
23923  {"sx",   PSR_s | PSR_x},
23924  {"sc",   PSR_s | PSR_c},
23925  {"xf",   PSR_x | PSR_f},
23926  {"xs",   PSR_x | PSR_s},
23927  {"xc",   PSR_x | PSR_c},
23928  {"cf",   PSR_c | PSR_f},
23929  {"cs",   PSR_c | PSR_s},
23930  {"cx",   PSR_c | PSR_x},
23931  {"fsx",  PSR_f | PSR_s | PSR_x},
23932  {"fsc",  PSR_f | PSR_s | PSR_c},
23933  {"fxs",  PSR_f | PSR_x | PSR_s},
23934  {"fxc",  PSR_f | PSR_x | PSR_c},
23935  {"fcs",  PSR_f | PSR_c | PSR_s},
23936  {"fcx",  PSR_f | PSR_c | PSR_x},
23937  {"sfx",  PSR_s | PSR_f | PSR_x},
23938  {"sfc",  PSR_s | PSR_f | PSR_c},
23939  {"sxf",  PSR_s | PSR_x | PSR_f},
23940  {"sxc",  PSR_s | PSR_x | PSR_c},
23941  {"scf",  PSR_s | PSR_c | PSR_f},
23942  {"scx",  PSR_s | PSR_c | PSR_x},
23943  {"xfs",  PSR_x | PSR_f | PSR_s},
23944  {"xfc",  PSR_x | PSR_f | PSR_c},
23945  {"xsf",  PSR_x | PSR_s | PSR_f},
23946  {"xsc",  PSR_x | PSR_s | PSR_c},
23947  {"xcf",  PSR_x | PSR_c | PSR_f},
23948  {"xcs",  PSR_x | PSR_c | PSR_s},
23949  {"cfs",  PSR_c | PSR_f | PSR_s},
23950  {"cfx",  PSR_c | PSR_f | PSR_x},
23951  {"csf",  PSR_c | PSR_s | PSR_f},
23952  {"csx",  PSR_c | PSR_s | PSR_x},
23953  {"cxf",  PSR_c | PSR_x | PSR_f},
23954  {"cxs",  PSR_c | PSR_x | PSR_s},
23955  {"fsxc", PSR_f | PSR_s | PSR_x | PSR_c},
23956  {"fscx", PSR_f | PSR_s | PSR_c | PSR_x},
23957  {"fxsc", PSR_f | PSR_x | PSR_s | PSR_c},
23958  {"fxcs", PSR_f | PSR_x | PSR_c | PSR_s},
23959  {"fcsx", PSR_f | PSR_c | PSR_s | PSR_x},
23960  {"fcxs", PSR_f | PSR_c | PSR_x | PSR_s},
23961  {"sfxc", PSR_s | PSR_f | PSR_x | PSR_c},
23962  {"sfcx", PSR_s | PSR_f | PSR_c | PSR_x},
23963  {"sxfc", PSR_s | PSR_x | PSR_f | PSR_c},
23964  {"sxcf", PSR_s | PSR_x | PSR_c | PSR_f},
23965  {"scfx", PSR_s | PSR_c | PSR_f | PSR_x},
23966  {"scxf", PSR_s | PSR_c | PSR_x | PSR_f},
23967  {"xfsc", PSR_x | PSR_f | PSR_s | PSR_c},
23968  {"xfcs", PSR_x | PSR_f | PSR_c | PSR_s},
23969  {"xsfc", PSR_x | PSR_s | PSR_f | PSR_c},
23970  {"xscf", PSR_x | PSR_s | PSR_c | PSR_f},
23971  {"xcfs", PSR_x | PSR_c | PSR_f | PSR_s},
23972  {"xcsf", PSR_x | PSR_c | PSR_s | PSR_f},
23973  {"cfsx", PSR_c | PSR_f | PSR_s | PSR_x},
23974  {"cfxs", PSR_c | PSR_f | PSR_x | PSR_s},
23975  {"csfx", PSR_c | PSR_s | PSR_f | PSR_x},
23976  {"csxf", PSR_c | PSR_s | PSR_x | PSR_f},
23977  {"cxfs", PSR_c | PSR_x | PSR_f | PSR_s},
23978  {"cxsf", PSR_c | PSR_x | PSR_s | PSR_f},
23979};
23980
23981/* Table of V7M psr names.  */
23982static const struct asm_psr v7m_psrs[] =
23983{
23984  {"apsr",	   0x0 }, {"APSR",	   0x0 },
23985  {"iapsr",	   0x1 }, {"IAPSR",	   0x1 },
23986  {"eapsr",	   0x2 }, {"EAPSR",	   0x2 },
23987  {"psr",	   0x3 }, {"PSR",	   0x3 },
23988  {"xpsr",	   0x3 }, {"XPSR",	   0x3 }, {"xPSR",	  3 },
23989  {"ipsr",	   0x5 }, {"IPSR",	   0x5 },
23990  {"epsr",	   0x6 }, {"EPSR",	   0x6 },
23991  {"iepsr",	   0x7 }, {"IEPSR",	   0x7 },
23992  {"msp",	   0x8 }, {"MSP",	   0x8 },
23993  {"psp",	   0x9 }, {"PSP",	   0x9 },
23994  {"msplim",	   0xa }, {"MSPLIM",	   0xa },
23995  {"psplim",	   0xb }, {"PSPLIM",	   0xb },
23996  {"primask",	   0x10}, {"PRIMASK",	   0x10},
23997  {"basepri",	   0x11}, {"BASEPRI",	   0x11},
23998  {"basepri_max",  0x12}, {"BASEPRI_MAX",  0x12},
23999  {"faultmask",	   0x13}, {"FAULTMASK",	   0x13},
24000  {"control",	   0x14}, {"CONTROL",	   0x14},
24001  {"msp_ns",	   0x88}, {"MSP_NS",	   0x88},
24002  {"psp_ns",	   0x89}, {"PSP_NS",	   0x89},
24003  {"msplim_ns",	   0x8a}, {"MSPLIM_NS",	   0x8a},
24004  {"psplim_ns",	   0x8b}, {"PSPLIM_NS",	   0x8b},
24005  {"primask_ns",   0x90}, {"PRIMASK_NS",   0x90},
24006  {"basepri_ns",   0x91}, {"BASEPRI_NS",   0x91},
24007  {"faultmask_ns", 0x93}, {"FAULTMASK_NS", 0x93},
24008  {"control_ns",   0x94}, {"CONTROL_NS",   0x94},
24009  {"sp_ns",	   0x98}, {"SP_NS",	   0x98 }
24010};
24011
24012/* Table of all shift-in-operand names.	 */
24013static const struct asm_shift_name shift_names [] =
24014{
24015  { "asl", SHIFT_LSL },	 { "ASL", SHIFT_LSL },
24016  { "lsl", SHIFT_LSL },	 { "LSL", SHIFT_LSL },
24017  { "lsr", SHIFT_LSR },	 { "LSR", SHIFT_LSR },
24018  { "asr", SHIFT_ASR },	 { "ASR", SHIFT_ASR },
24019  { "ror", SHIFT_ROR },	 { "ROR", SHIFT_ROR },
24020  { "rrx", SHIFT_RRX },	 { "RRX", SHIFT_RRX },
24021  { "uxtw", SHIFT_UXTW}, { "UXTW", SHIFT_UXTW}
24022};
24023
24024/* Table of all explicit relocation names.  */
24025#ifdef OBJ_ELF
24026static struct reloc_entry reloc_names[] =
24027{
24028  { "got",     BFD_RELOC_ARM_GOT32   },	 { "GOT",     BFD_RELOC_ARM_GOT32   },
24029  { "gotoff",  BFD_RELOC_ARM_GOTOFF  },	 { "GOTOFF",  BFD_RELOC_ARM_GOTOFF  },
24030  { "plt",     BFD_RELOC_ARM_PLT32   },	 { "PLT",     BFD_RELOC_ARM_PLT32   },
24031  { "target1", BFD_RELOC_ARM_TARGET1 },	 { "TARGET1", BFD_RELOC_ARM_TARGET1 },
24032  { "target2", BFD_RELOC_ARM_TARGET2 },	 { "TARGET2", BFD_RELOC_ARM_TARGET2 },
24033  { "sbrel",   BFD_RELOC_ARM_SBREL32 },	 { "SBREL",   BFD_RELOC_ARM_SBREL32 },
24034  { "tlsgd",   BFD_RELOC_ARM_TLS_GD32},  { "TLSGD",   BFD_RELOC_ARM_TLS_GD32},
24035  { "tlsldm",  BFD_RELOC_ARM_TLS_LDM32}, { "TLSLDM",  BFD_RELOC_ARM_TLS_LDM32},
24036  { "tlsldo",  BFD_RELOC_ARM_TLS_LDO32}, { "TLSLDO",  BFD_RELOC_ARM_TLS_LDO32},
24037  { "gottpoff",BFD_RELOC_ARM_TLS_IE32},  { "GOTTPOFF",BFD_RELOC_ARM_TLS_IE32},
24038  { "tpoff",   BFD_RELOC_ARM_TLS_LE32},  { "TPOFF",   BFD_RELOC_ARM_TLS_LE32},
24039  { "got_prel", BFD_RELOC_ARM_GOT_PREL}, { "GOT_PREL", BFD_RELOC_ARM_GOT_PREL},
24040  { "tlsdesc", BFD_RELOC_ARM_TLS_GOTDESC},
24041	{ "TLSDESC", BFD_RELOC_ARM_TLS_GOTDESC},
24042  { "tlscall", BFD_RELOC_ARM_TLS_CALL},
24043	{ "TLSCALL", BFD_RELOC_ARM_TLS_CALL},
24044  { "tlsdescseq", BFD_RELOC_ARM_TLS_DESCSEQ},
24045	{ "TLSDESCSEQ", BFD_RELOC_ARM_TLS_DESCSEQ},
24046  { "gotfuncdesc", BFD_RELOC_ARM_GOTFUNCDESC },
24047	{ "GOTFUNCDESC", BFD_RELOC_ARM_GOTFUNCDESC },
24048  { "gotofffuncdesc", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24049	{ "GOTOFFFUNCDESC", BFD_RELOC_ARM_GOTOFFFUNCDESC },
24050  { "funcdesc", BFD_RELOC_ARM_FUNCDESC },
24051	{ "FUNCDESC", BFD_RELOC_ARM_FUNCDESC },
24052   { "tlsgd_fdpic", BFD_RELOC_ARM_TLS_GD32_FDPIC },      { "TLSGD_FDPIC", BFD_RELOC_ARM_TLS_GD32_FDPIC },
24053   { "tlsldm_fdpic", BFD_RELOC_ARM_TLS_LDM32_FDPIC },    { "TLSLDM_FDPIC", BFD_RELOC_ARM_TLS_LDM32_FDPIC },
24054   { "gottpoff_fdpic", BFD_RELOC_ARM_TLS_IE32_FDPIC },   { "GOTTPOFF_FDIC", BFD_RELOC_ARM_TLS_IE32_FDPIC },
24055};
24056#endif
24057
24058/* Table of all conditional affixes.  */
24059static const struct asm_cond conds[] =
24060{
24061  {"eq", 0x0},
24062  {"ne", 0x1},
24063  {"cs", 0x2}, {"hs", 0x2},
24064  {"cc", 0x3}, {"ul", 0x3}, {"lo", 0x3},
24065  {"mi", 0x4},
24066  {"pl", 0x5},
24067  {"vs", 0x6},
24068  {"vc", 0x7},
24069  {"hi", 0x8},
24070  {"ls", 0x9},
24071  {"ge", 0xa},
24072  {"lt", 0xb},
24073  {"gt", 0xc},
24074  {"le", 0xd},
24075  {"al", 0xe}
24076};
24077static const struct asm_cond vconds[] =
24078{
24079    {"t", 0xf},
24080    {"e", 0x10}
24081};
24082
24083#define UL_BARRIER(L,U,CODE,FEAT) \
24084  { L, CODE, ARM_FEATURE_CORE_LOW (FEAT) }, \
24085  { U, CODE, ARM_FEATURE_CORE_LOW (FEAT) }
24086
24087static struct asm_barrier_opt barrier_opt_names[] =
24088{
24089  UL_BARRIER ("sy",	"SY",	 0xf, ARM_EXT_BARRIER),
24090  UL_BARRIER ("st",	"ST",	 0xe, ARM_EXT_BARRIER),
24091  UL_BARRIER ("ld",	"LD",	 0xd, ARM_EXT_V8),
24092  UL_BARRIER ("ish",	"ISH",	 0xb, ARM_EXT_BARRIER),
24093  UL_BARRIER ("sh",	"SH",	 0xb, ARM_EXT_BARRIER),
24094  UL_BARRIER ("ishst",	"ISHST", 0xa, ARM_EXT_BARRIER),
24095  UL_BARRIER ("shst",	"SHST",	 0xa, ARM_EXT_BARRIER),
24096  UL_BARRIER ("ishld",	"ISHLD", 0x9, ARM_EXT_V8),
24097  UL_BARRIER ("un",	"UN",	 0x7, ARM_EXT_BARRIER),
24098  UL_BARRIER ("nsh",	"NSH",	 0x7, ARM_EXT_BARRIER),
24099  UL_BARRIER ("unst",	"UNST",	 0x6, ARM_EXT_BARRIER),
24100  UL_BARRIER ("nshst",	"NSHST", 0x6, ARM_EXT_BARRIER),
24101  UL_BARRIER ("nshld",	"NSHLD", 0x5, ARM_EXT_V8),
24102  UL_BARRIER ("osh",	"OSH",	 0x3, ARM_EXT_BARRIER),
24103  UL_BARRIER ("oshst",	"OSHST", 0x2, ARM_EXT_BARRIER),
24104  UL_BARRIER ("oshld",	"OSHLD", 0x1, ARM_EXT_V8)
24105};
24106
24107#undef UL_BARRIER
24108
24109/* Table of ARM-format instructions.	*/
24110
24111/* Macros for gluing together operand strings.  N.B. In all cases
24112   other than OPS0, the trailing OP_stop comes from default
24113   zero-initialization of the unspecified elements of the array.  */
24114#define OPS0()		  { OP_stop, }
24115#define OPS1(a)		  { OP_##a, }
24116#define OPS2(a,b)	  { OP_##a,OP_##b, }
24117#define OPS3(a,b,c)	  { OP_##a,OP_##b,OP_##c, }
24118#define OPS4(a,b,c,d)	  { OP_##a,OP_##b,OP_##c,OP_##d, }
24119#define OPS5(a,b,c,d,e)	  { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e, }
24120#define OPS6(a,b,c,d,e,f) { OP_##a,OP_##b,OP_##c,OP_##d,OP_##e,OP_##f, }
24121
24122/* These macros are similar to the OPSn, but do not prepend the OP_ prefix.
24123   This is useful when mixing operands for ARM and THUMB, i.e. using the
24124   MIX_ARM_THUMB_OPERANDS macro.
24125   In order to use these macros, prefix the number of operands with _
24126   e.g. _3.  */
24127#define OPS_1(a)	   { a, }
24128#define OPS_2(a,b)	   { a,b, }
24129#define OPS_3(a,b,c)	   { a,b,c, }
24130#define OPS_4(a,b,c,d)	   { a,b,c,d, }
24131#define OPS_5(a,b,c,d,e)   { a,b,c,d,e, }
24132#define OPS_6(a,b,c,d,e,f) { a,b,c,d,e,f, }
24133
24134/* These macros abstract out the exact format of the mnemonic table and
24135   save some repeated characters.  */
24136
24137/* The normal sort of mnemonic; has a Thumb variant; takes a conditional suffix.  */
24138#define TxCE(mnem, op, top, nops, ops, ae, te) \
24139  { mnem, OPS##nops ops, OT_csuffix, 0x##op, top, ARM_VARIANT, \
24140    THUMB_VARIANT, do_##ae, do_##te, 0 }
24141
24142/* Two variants of the above - TCE for a numeric Thumb opcode, tCE for
24143   a T_MNEM_xyz enumerator.  */
24144#define TCE(mnem, aop, top, nops, ops, ae, te) \
24145      TxCE (mnem, aop, 0x##top, nops, ops, ae, te)
24146#define tCE(mnem, aop, top, nops, ops, ae, te) \
24147      TxCE (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24148
24149/* Second most common sort of mnemonic: has a Thumb variant, takes a conditional
24150   infix after the third character.  */
24151#define TxC3(mnem, op, top, nops, ops, ae, te) \
24152  { mnem, OPS##nops ops, OT_cinfix3, 0x##op, top, ARM_VARIANT, \
24153    THUMB_VARIANT, do_##ae, do_##te, 0 }
24154#define TxC3w(mnem, op, top, nops, ops, ae, te) \
24155  { mnem, OPS##nops ops, OT_cinfix3_deprecated, 0x##op, top, ARM_VARIANT, \
24156    THUMB_VARIANT, do_##ae, do_##te, 0 }
24157#define TC3(mnem, aop, top, nops, ops, ae, te) \
24158      TxC3 (mnem, aop, 0x##top, nops, ops, ae, te)
24159#define TC3w(mnem, aop, top, nops, ops, ae, te) \
24160      TxC3w (mnem, aop, 0x##top, nops, ops, ae, te)
24161#define tC3(mnem, aop, top, nops, ops, ae, te) \
24162      TxC3 (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24163#define tC3w(mnem, aop, top, nops, ops, ae, te) \
24164      TxC3w (mnem, aop, T_MNEM##top, nops, ops, ae, te)
24165
24166/* Mnemonic that cannot be conditionalized.  The ARM condition-code
24167   field is still 0xE.  Many of the Thumb variants can be executed
24168   conditionally, so this is checked separately.  */
24169#define TUE(mnem, op, top, nops, ops, ae, te)				\
24170  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24171    THUMB_VARIANT, do_##ae, do_##te, 0 }
24172
24173/* Same as TUE but the encoding function for ARM and Thumb modes is the same.
24174   Used by mnemonics that have very minimal differences in the encoding for
24175   ARM and Thumb variants and can be handled in a common function.  */
24176#define TUEc(mnem, op, top, nops, ops, en) \
24177  { mnem, OPS##nops ops, OT_unconditional, 0x##op, 0x##top, ARM_VARIANT, \
24178    THUMB_VARIANT, do_##en, do_##en, 0 }
24179
24180/* Mnemonic that cannot be conditionalized, and bears 0xF in its ARM
24181   condition code field.  */
24182#define TUF(mnem, op, top, nops, ops, ae, te)				\
24183  { mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##top, ARM_VARIANT, \
24184    THUMB_VARIANT, do_##ae, do_##te, 0 }
24185
24186/* ARM-only variants of all the above.  */
24187#define CE(mnem,  op, nops, ops, ae)	\
24188  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24189
24190#define C3(mnem, op, nops, ops, ae)	\
24191  { #mnem, OPS##nops ops, OT_cinfix3, 0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24192
24193/* Thumb-only variants of TCE and TUE.  */
24194#define ToC(mnem, top, nops, ops, te) \
24195  { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24196    do_##te, 0 }
24197
24198#define ToU(mnem, top, nops, ops, te) \
24199  { mnem, OPS##nops ops, OT_unconditional, 0x0, 0x##top, 0, THUMB_VARIANT, \
24200    NULL, do_##te, 0 }
24201
24202/* T_MNEM_xyz enumerator variants of ToC.  */
24203#define toC(mnem, top, nops, ops, te) \
24204  { mnem, OPS##nops ops, OT_csuffix, 0x0, T_MNEM##top, 0, THUMB_VARIANT, NULL, \
24205    do_##te, 0 }
24206
24207/* T_MNEM_xyz enumerator variants of ToU.  */
24208#define toU(mnem, top, nops, ops, te) \
24209  { mnem, OPS##nops ops, OT_unconditional, 0x0, T_MNEM##top, 0, THUMB_VARIANT, \
24210    NULL, do_##te, 0 }
24211
24212/* Legacy mnemonics that always have conditional infix after the third
24213   character.  */
24214#define CL(mnem, op, nops, ops, ae)	\
24215  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24216    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24217
24218/* Coprocessor instructions.  Isomorphic between Arm and Thumb-2.  */
24219#define cCE(mnem,  op, nops, ops, ae)	\
24220  { mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24221
24222/* mov instructions that are shared between coprocessor and MVE.  */
24223#define mcCE(mnem,  op, nops, ops, ae)	\
24224  { #mnem, OPS##nops ops, OT_csuffix, 0x##op, 0xe##op, ARM_VARIANT, THUMB_VARIANT, do_##ae, do_##ae, 0 }
24225
24226/* Legacy coprocessor instructions where conditional infix and conditional
24227   suffix are ambiguous.  For consistency this includes all FPA instructions,
24228   not just the potentially ambiguous ones.  */
24229#define cCL(mnem, op, nops, ops, ae)	\
24230  { mnem, OPS##nops ops, OT_cinfix3_legacy, \
24231    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24232
24233/* Coprocessor, takes either a suffix or a position-3 infix
24234   (for an FPA corner case). */
24235#define C3E(mnem, op, nops, ops, ae) \
24236  { mnem, OPS##nops ops, OT_csuf_or_in3, \
24237    0x##op, 0xe##op, ARM_VARIANT, ARM_VARIANT, do_##ae, do_##ae, 0 }
24238
24239#define xCM_(m1, m2, m3, op, nops, ops, ae)	\
24240  { m1 #m2 m3, OPS##nops ops, \
24241    sizeof (#m2) == 1 ? OT_odd_infix_unc : OT_odd_infix_0 + sizeof (m1) - 1, \
24242    0x##op, 0x0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24243
24244#define CM(m1, m2, op, nops, ops, ae)	\
24245  xCM_ (m1,   , m2, op, nops, ops, ae),	\
24246  xCM_ (m1, eq, m2, op, nops, ops, ae),	\
24247  xCM_ (m1, ne, m2, op, nops, ops, ae),	\
24248  xCM_ (m1, cs, m2, op, nops, ops, ae),	\
24249  xCM_ (m1, hs, m2, op, nops, ops, ae),	\
24250  xCM_ (m1, cc, m2, op, nops, ops, ae),	\
24251  xCM_ (m1, ul, m2, op, nops, ops, ae),	\
24252  xCM_ (m1, lo, m2, op, nops, ops, ae),	\
24253  xCM_ (m1, mi, m2, op, nops, ops, ae),	\
24254  xCM_ (m1, pl, m2, op, nops, ops, ae),	\
24255  xCM_ (m1, vs, m2, op, nops, ops, ae),	\
24256  xCM_ (m1, vc, m2, op, nops, ops, ae),	\
24257  xCM_ (m1, hi, m2, op, nops, ops, ae),	\
24258  xCM_ (m1, ls, m2, op, nops, ops, ae),	\
24259  xCM_ (m1, ge, m2, op, nops, ops, ae),	\
24260  xCM_ (m1, lt, m2, op, nops, ops, ae),	\
24261  xCM_ (m1, gt, m2, op, nops, ops, ae),	\
24262  xCM_ (m1, le, m2, op, nops, ops, ae),	\
24263  xCM_ (m1, al, m2, op, nops, ops, ae)
24264
24265#define UE(mnem, op, nops, ops, ae)	\
24266  { #mnem, OPS##nops ops, OT_unconditional, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24267
24268#define UF(mnem, op, nops, ops, ae)	\
24269  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0, ARM_VARIANT, 0, do_##ae, NULL, 0 }
24270
24271/* Neon data-processing. ARM versions are unconditional with cond=0xf.
24272   The Thumb and ARM variants are mostly the same (bits 0-23 and 24/28), so we
24273   use the same encoding function for each.  */
24274#define NUF(mnem, op, nops, ops, enc)					\
24275  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
24276    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24277
24278/* Neon data processing, version which indirects through neon_enc_tab for
24279   the various overloaded versions of opcodes.  */
24280#define nUF(mnem, op, nops, ops, enc)					\
24281  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
24282    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 0 }
24283
24284/* Neon insn with conditional suffix for the ARM version, non-overloaded
24285   version.  */
24286#define NCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
24287  { #mnem, OPS##nops ops, tag, 0x##op, 0x##op, ARM_VARIANT,		\
24288    THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24289
24290#define NCE(mnem, op, nops, ops, enc)					\
24291   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24292
24293#define NCEF(mnem, op, nops, ops, enc)					\
24294    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24295
24296/* Neon insn with conditional suffix for the ARM version, overloaded types.  */
24297#define nCE_tag(mnem, op, nops, ops, enc, tag, mve_p)				\
24298  { #mnem, OPS##nops ops, tag, N_MNEM##op, N_MNEM##op,		\
24299    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, mve_p }
24300
24301#define nCE(mnem, op, nops, ops, enc)					\
24302   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 0)
24303
24304#define nCEF(mnem, op, nops, ops, enc)					\
24305    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 0)
24306
24307/*   */
24308#define mCEF(mnem, op, nops, ops, enc)				\
24309  { #mnem, OPS##nops ops, OT_csuffixF, M_MNEM##op, M_MNEM##op,	\
24310    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24311
24312
24313/* nCEF but for MVE predicated instructions.  */
24314#define mnCEF(mnem, op, nops, ops, enc)					\
24315    nCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24316
24317/* nCE but for MVE predicated instructions.  */
24318#define mnCE(mnem, op, nops, ops, enc)					\
24319   nCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24320
24321/* NUF but for potentially MVE predicated instructions.  */
24322#define MNUF(mnem, op, nops, ops, enc)					\
24323  { #mnem, OPS##nops ops, OT_unconditionalF, 0x##op, 0x##op,		\
24324    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24325
24326/* nUF but for potentially MVE predicated instructions.  */
24327#define mnUF(mnem, op, nops, ops, enc)					\
24328  { #mnem, OPS##nops ops, OT_unconditionalF, N_MNEM##op, N_MNEM##op,	\
24329    ARM_VARIANT, THUMB_VARIANT, do_##enc, do_##enc, 1 }
24330
24331/* ToC but for potentially MVE predicated instructions.  */
24332#define mToC(mnem, top, nops, ops, te) \
24333  { mnem, OPS##nops ops, OT_csuffix, 0x0, 0x##top, 0, THUMB_VARIANT, NULL, \
24334    do_##te, 1 }
24335
24336/* NCE but for MVE predicated instructions.  */
24337#define MNCE(mnem, op, nops, ops, enc)					\
24338   NCE_tag (mnem, op, nops, ops, enc, OT_csuffix, 1)
24339
24340/* NCEF but for MVE predicated instructions.  */
24341#define MNCEF(mnem, op, nops, ops, enc)					\
24342    NCE_tag (mnem, op, nops, ops, enc, OT_csuffixF, 1)
24343#define do_0 0
24344
24345static const struct asm_opcode insns[] =
24346{
24347#define ARM_VARIANT    & arm_ext_v1 /* Core ARM Instructions.  */
24348#define THUMB_VARIANT  & arm_ext_v4t
24349 tCE("and",	0000000, _and,     3, (RR, oRR, SH), arit, t_arit3c),
24350 tC3("ands",	0100000, _ands,	   3, (RR, oRR, SH), arit, t_arit3c),
24351 tCE("eor",	0200000, _eor,	   3, (RR, oRR, SH), arit, t_arit3c),
24352 tC3("eors",	0300000, _eors,	   3, (RR, oRR, SH), arit, t_arit3c),
24353 tCE("sub",	0400000, _sub,	   3, (RR, oRR, SH), arit, t_add_sub),
24354 tC3("subs",	0500000, _subs,	   3, (RR, oRR, SH), arit, t_add_sub),
24355 tCE("add",	0800000, _add,	   3, (RR, oRR, SHG), arit, t_add_sub),
24356 tC3("adds",	0900000, _adds,	   3, (RR, oRR, SHG), arit, t_add_sub),
24357 tCE("adc",	0a00000, _adc,	   3, (RR, oRR, SH), arit, t_arit3c),
24358 tC3("adcs",	0b00000, _adcs,	   3, (RR, oRR, SH), arit, t_arit3c),
24359 tCE("sbc",	0c00000, _sbc,	   3, (RR, oRR, SH), arit, t_arit3),
24360 tC3("sbcs",	0d00000, _sbcs,	   3, (RR, oRR, SH), arit, t_arit3),
24361 tCE("orr",	1800000, _orr,	   3, (RR, oRR, SH), arit, t_arit3c),
24362 tC3("orrs",	1900000, _orrs,	   3, (RR, oRR, SH), arit, t_arit3c),
24363 tCE("bic",	1c00000, _bic,	   3, (RR, oRR, SH), arit, t_arit3),
24364 tC3("bics",	1d00000, _bics,	   3, (RR, oRR, SH), arit, t_arit3),
24365
24366 /* The p-variants of tst/cmp/cmn/teq (below) are the pre-V6 mechanism
24367    for setting PSR flag bits.  They are obsolete in V6 and do not
24368    have Thumb equivalents. */
24369 tCE("tst",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
24370 tC3w("tsts",	1100000, _tst,	   2, (RR, SH),      cmp,  t_mvn_tst),
24371  CL("tstp",	110f000,     	   2, (RR, SH),      cmp),
24372 tCE("cmp",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
24373 tC3w("cmps",	1500000, _cmp,	   2, (RR, SH),      cmp,  t_mov_cmp),
24374  CL("cmpp",	150f000,     	   2, (RR, SH),      cmp),
24375 tCE("cmn",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
24376 tC3w("cmns",	1700000, _cmn,	   2, (RR, SH),      cmp,  t_mvn_tst),
24377  CL("cmnp",	170f000,     	   2, (RR, SH),      cmp),
24378
24379 tCE("mov",	1a00000, _mov,	   2, (RR, SH),      mov,  t_mov_cmp),
24380 tC3("movs",	1b00000, _movs,	   2, (RR, SHG),     mov,  t_mov_cmp),
24381 tCE("mvn",	1e00000, _mvn,	   2, (RR, SH),      mov,  t_mvn_tst),
24382 tC3("mvns",	1f00000, _mvns,	   2, (RR, SH),      mov,  t_mvn_tst),
24383
24384 tCE("ldr",	4100000, _ldr,	   2, (RR, ADDRGLDR),ldst, t_ldst),
24385 tC3("ldrb",	4500000, _ldrb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24386 tCE("str",	4000000, _str,	   _2, (MIX_ARM_THUMB_OPERANDS (OP_RR,
24387								OP_RRnpc),
24388					OP_ADDRGLDR),ldst, t_ldst),
24389 tC3("strb",	4400000, _strb,	   2, (RRnpc_npcsp, ADDRGLDR),ldst, t_ldst),
24390
24391 tCE("stm",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24392 tC3("stmia",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24393 tC3("stmea",	8800000, _stmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24394 tCE("ldm",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24395 tC3("ldmia",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24396 tC3("ldmfd",	8900000, _ldmia,    2, (RRw, REGLST), ldmstm, t_ldmstm),
24397
24398 tCE("b",	a000000, _b,	   1, (EXPr),	     branch, t_branch),
24399 TCE("bl",	b000000, f000f800, 1, (EXPr),	     bl, t_branch23),
24400
24401  /* Pseudo ops.  */
24402 tCE("adr",	28f0000, _adr,	   2, (RR, EXP),     adr,  t_adr),
24403  C3(adrl,	28f0000,           2, (RR, EXP),     adrl),
24404 tCE("nop",	1a00000, _nop,	   1, (oI255c),	     nop,  t_nop),
24405 tCE("udf",	7f000f0, _udf,     1, (oIffffb),     bkpt, t_udf),
24406
24407  /* Thumb-compatibility pseudo ops.  */
24408 tCE("lsl",	1a00000, _lsl,	   3, (RR, oRR, SH), shift, t_shift),
24409 tC3("lsls",	1b00000, _lsls,	   3, (RR, oRR, SH), shift, t_shift),
24410 tCE("lsr",	1a00020, _lsr,	   3, (RR, oRR, SH), shift, t_shift),
24411 tC3("lsrs",	1b00020, _lsrs,	   3, (RR, oRR, SH), shift, t_shift),
24412 tCE("asr",	1a00040, _asr,	   3, (RR, oRR, SH), shift, t_shift),
24413 tC3("asrs",      1b00040, _asrs,     3, (RR, oRR, SH), shift, t_shift),
24414 tCE("ror",	1a00060, _ror,	   3, (RR, oRR, SH), shift, t_shift),
24415 tC3("rors",	1b00060, _rors,	   3, (RR, oRR, SH), shift, t_shift),
24416 tCE("neg",	2600000, _neg,	   2, (RR, RR),      rd_rn, t_neg),
24417 tC3("negs",	2700000, _negs,	   2, (RR, RR),      rd_rn, t_neg),
24418 tCE("push",	92d0000, _push,     1, (REGLST),	     push_pop, t_push_pop),
24419 tCE("pop",	8bd0000, _pop,	   1, (REGLST),	     push_pop, t_push_pop),
24420
24421 /* These may simplify to neg.  */
24422 TCE("rsb",	0600000, ebc00000, 3, (RR, oRR, SH), arit, t_rsb),
24423 TC3("rsbs",	0700000, ebd00000, 3, (RR, oRR, SH), arit, t_rsb),
24424
24425#undef THUMB_VARIANT
24426#define THUMB_VARIANT  & arm_ext_os
24427
24428 TCE("swi",	f000000, df00,     1, (EXPi),        swi, t_swi),
24429 TCE("svc",	f000000, df00,     1, (EXPi),        swi, t_swi),
24430
24431#undef  THUMB_VARIANT
24432#define THUMB_VARIANT  & arm_ext_v6
24433
24434 TCE("cpy",       1a00000, 4600,     2, (RR, RR),      rd_rm, t_cpy),
24435
24436 /* V1 instructions with no Thumb analogue prior to V6T2.  */
24437#undef  THUMB_VARIANT
24438#define THUMB_VARIANT  & arm_ext_v6t2
24439
24440 TCE("teq",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
24441 TC3w("teqs",	1300000, ea900f00, 2, (RR, SH),      cmp,  t_mvn_tst),
24442  CL("teqp",	130f000,           2, (RR, SH),      cmp),
24443
24444 TC3("ldrt",	4300000, f8500e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24445 TC3("ldrbt",	4700000, f8100e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24446 TC3("strt",	4200000, f8400e00, 2, (RR_npcsp, ADDR),   ldstt, t_ldstt),
24447 TC3("strbt",	4600000, f8000e00, 2, (RRnpc_npcsp, ADDR),ldstt, t_ldstt),
24448
24449 TC3("stmdb",	9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24450 TC3("stmfd",     9000000, e9000000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24451
24452 TC3("ldmdb",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24453 TC3("ldmea",	9100000, e9100000, 2, (RRw, REGLST), ldmstm, t_ldmstm),
24454
24455 /* V1 instructions with no Thumb analogue at all.  */
24456  CE("rsc",	0e00000,	   3, (RR, oRR, SH), arit),
24457  C3(rscs,	0f00000,	   3, (RR, oRR, SH), arit),
24458
24459  C3(stmib,	9800000,	   2, (RRw, REGLST), ldmstm),
24460  C3(stmfa,	9800000,	   2, (RRw, REGLST), ldmstm),
24461  C3(stmda,	8000000,	   2, (RRw, REGLST), ldmstm),
24462  C3(stmed,	8000000,	   2, (RRw, REGLST), ldmstm),
24463  C3(ldmib,	9900000,	   2, (RRw, REGLST), ldmstm),
24464  C3(ldmed,	9900000,	   2, (RRw, REGLST), ldmstm),
24465  C3(ldmda,	8100000,	   2, (RRw, REGLST), ldmstm),
24466  C3(ldmfa,	8100000,	   2, (RRw, REGLST), ldmstm),
24467
24468#undef  ARM_VARIANT
24469#define ARM_VARIANT    & arm_ext_v2	/* ARM 2 - multiplies.	*/
24470#undef  THUMB_VARIANT
24471#define THUMB_VARIANT  & arm_ext_v4t
24472
24473 tCE("mul",	0000090, _mul,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
24474 tC3("muls",	0100090, _muls,	   3, (RRnpc, RRnpc, oRR), mul, t_mul),
24475
24476#undef  THUMB_VARIANT
24477#define THUMB_VARIANT  & arm_ext_v6t2
24478
24479 TCE("mla",	0200090, fb000000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24480  C3(mlas,	0300090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas),
24481
24482  /* Generic coprocessor instructions.	*/
24483 TCE("cdp",	e000000, ee000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
24484 TCE("ldc",	c100000, ec100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24485 TC3("ldcl",	c500000, ec500000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24486 TCE("stc",	c000000, ec000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24487 TC3("stcl",	c400000, ec400000, 3, (RCP, RCN, ADDRGLDC),	        lstc,   lstc),
24488 TCE("mcr",	e000010, ee000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24489 TCE("mrc",	e100010, ee100010, 6, (RCP, I7b, APSR_RR, RCN, RCN, oI7b),   co_reg, co_reg),
24490
24491#undef  ARM_VARIANT
24492#define ARM_VARIANT  & arm_ext_v2s /* ARM 3 - swp instructions.  */
24493
24494  CE("swp",	1000090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24495  C3(swpb,	1400090,           3, (RRnpc, RRnpc, RRnpcb), rd_rm_rn),
24496
24497#undef  ARM_VARIANT
24498#define ARM_VARIANT    & arm_ext_v3	/* ARM 6 Status register instructions.	*/
24499#undef  THUMB_VARIANT
24500#define THUMB_VARIANT  & arm_ext_msr
24501
24502 TCE("mrs",	1000000, f3e08000, 2, (RRnpc, rPSR), mrs, t_mrs),
24503 TCE("msr",	120f000, f3808000, 2, (wPSR, RR_EXi), msr, t_msr),
24504
24505#undef  ARM_VARIANT
24506#define ARM_VARIANT    & arm_ext_v3m	 /* ARM 7M long multiplies.  */
24507#undef  THUMB_VARIANT
24508#define THUMB_VARIANT  & arm_ext_v6t2
24509
24510 TCE("smull",	0c00090, fb800000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24511  CM("smull","s",	0d00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24512 TCE("umull",	0800090, fba00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24513  CM("umull","s",	0900090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24514 TCE("smlal",	0e00090, fbc00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24515  CM("smlal","s",	0f00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24516 TCE("umlal",	0a00090, fbe00000, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mull, t_mull),
24517  CM("umlal","s",	0b00090,           4, (RRnpc, RRnpc, RRnpc, RRnpc), mull),
24518
24519#undef  ARM_VARIANT
24520#define ARM_VARIANT    & arm_ext_v4	/* ARM Architecture 4.	*/
24521#undef  THUMB_VARIANT
24522#define THUMB_VARIANT  & arm_ext_v4t
24523
24524 tC3("ldrh",	01000b0, _ldrh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24525 tC3("strh",	00000b0, _strh,     2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24526 tC3("ldrsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24527 tC3("ldrsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24528 tC3("ldsh",	01000f0, _ldrsh,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24529 tC3("ldsb",	01000d0, _ldrsb,    2, (RRnpc_npcsp, ADDRGLDRS), ldstv4, t_ldst),
24530
24531#undef  ARM_VARIANT
24532#define ARM_VARIANT  & arm_ext_v4t_5
24533
24534  /* ARM Architecture 4T.  */
24535  /* Note: bx (and blx) are required on V5, even if the processor does
24536     not support Thumb.	 */
24537 TCE("bx",	12fff10, 4700, 1, (RR),	bx, t_bx),
24538
24539#undef  ARM_VARIANT
24540#define ARM_VARIANT    & arm_ext_v5 /*  ARM Architecture 5T.	 */
24541#undef  THUMB_VARIANT
24542#define THUMB_VARIANT  & arm_ext_v5t
24543
24544  /* Note: blx has 2 variants; the .value coded here is for
24545     BLX(2).  Only this variant has conditional execution.  */
24546 TCE("blx",	12fff30, 4780, 1, (RR_EXr),			    blx,  t_blx),
24547 TUE("bkpt",	1200070, be00, 1, (oIffffb),			    bkpt, t_bkpt),
24548
24549#undef  THUMB_VARIANT
24550#define THUMB_VARIANT  & arm_ext_v6t2
24551
24552 TCE("clz",	16f0f10, fab0f080, 2, (RRnpc, RRnpc),		        rd_rm,  t_clz),
24553 TUF("ldc2",	c100000, fc100000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24554 TUF("ldc2l",	c500000, fc500000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24555 TUF("stc2",	c000000, fc000000, 3, (RCP, RCN, ADDRGLDC),	        lstc,	lstc),
24556 TUF("stc2l",	c400000, fc400000, 3, (RCP, RCN, ADDRGLDC),		        lstc,	lstc),
24557 TUF("cdp2",	e000000, fe000000, 6, (RCP, I15b, RCN, RCN, RCN, oI7b), cdp,    cdp),
24558 TUF("mcr2",	e000010, fe000010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24559 TUF("mrc2",	e100010, fe100010, 6, (RCP, I7b, RR, RCN, RCN, oI7b),   co_reg, co_reg),
24560
24561#undef  ARM_VARIANT
24562#define ARM_VARIANT    & arm_ext_v5exp /*  ARM Architecture 5TExP.  */
24563#undef  THUMB_VARIANT
24564#define THUMB_VARIANT  & arm_ext_v5exp
24565
24566 TCE("smlabb",	1000080, fb100000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24567 TCE("smlatb",	10000a0, fb100020, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24568 TCE("smlabt",	10000c0, fb100010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24569 TCE("smlatt",	10000e0, fb100030, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24570
24571 TCE("smlawb",	1200080, fb300000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24572 TCE("smlawt",	12000c0, fb300010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smla, t_mla),
24573
24574 TCE("smlalbb",	1400080, fbc00080, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24575 TCE("smlaltb",	14000a0, fbc000a0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24576 TCE("smlalbt",	14000c0, fbc00090, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24577 TCE("smlaltt",	14000e0, fbc000b0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),   smlal, t_mlal),
24578
24579 TCE("smulbb",	1600080, fb10f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24580 TCE("smultb",	16000a0, fb10f020, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24581 TCE("smulbt",	16000c0, fb10f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24582 TCE("smultt",	16000e0, fb10f030, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24583
24584 TCE("smulwb",	12000a0, fb30f000, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24585 TCE("smulwt",	12000e0, fb30f010, 3, (RRnpc, RRnpc, RRnpc),	    smul, t_simd),
24586
24587 TCE("qadd",	1000050, fa80f080, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24588 TCE("qdadd",	1400050, fa80f090, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24589 TCE("qsub",	1200050, fa80f0a0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24590 TCE("qdsub",	1600050, fa80f0b0, 3, (RRnpc, RRnpc, RRnpc),	    rd_rm_rn, t_simd2),
24591
24592#undef  ARM_VARIANT
24593#define ARM_VARIANT    & arm_ext_v5e /*  ARM Architecture 5TE.  */
24594#undef  THUMB_VARIANT
24595#define THUMB_VARIANT  & arm_ext_v6t2
24596
24597 TUF("pld",	450f000, f810f000, 1, (ADDR),		     pld,  t_pld),
24598 TC3("ldrd",	00000d0, e8500000, 3, (RRnpc_npcsp, oRRnpc_npcsp, ADDRGLDRS),
24599     ldrd, t_ldstd),
24600 TC3("strd",	00000f0, e8400000, 3, (RRnpc_npcsp, oRRnpc_npcsp,
24601				       ADDRGLDRS), ldrd, t_ldstd),
24602
24603 TCE("mcrr",	c400000, ec400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24604 TCE("mrrc",	c500000, ec500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24605
24606#undef  ARM_VARIANT
24607#define ARM_VARIANT  & arm_ext_v5j /*  ARM Architecture 5TEJ.  */
24608
24609 TCE("bxj",	12fff20, f3c08f00, 1, (RR),			  bxj, t_bxj),
24610
24611#undef  ARM_VARIANT
24612#define ARM_VARIANT    & arm_ext_v6 /*  ARM V6.  */
24613#undef  THUMB_VARIANT
24614#define THUMB_VARIANT  & arm_ext_v6
24615
24616 TUF("cpsie",     1080000, b660,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24617 TUF("cpsid",     10c0000, b670,     2, (CPSF, oI31b),              cpsi,   t_cpsi),
24618 tCE("rev",       6bf0f30, _rev,      2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24619 tCE("rev16",     6bf0fb0, _rev16,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24620 tCE("revsh",     6ff0fb0, _revsh,    2, (RRnpc, RRnpc),             rd_rm,  t_rev),
24621 tCE("sxth",      6bf0070, _sxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24622 tCE("uxth",      6ff0070, _uxth,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24623 tCE("sxtb",      6af0070, _sxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24624 tCE("uxtb",      6ef0070, _uxtb,     3, (RRnpc, RRnpc, oROR),       sxth,   t_sxth),
24625 TUF("setend",    1010000, b650,     1, (ENDI),                     setend, t_setend),
24626
24627#undef  THUMB_VARIANT
24628#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24629
24630 TCE("ldrex",	1900f9f, e8500f00, 2, (RRnpc_npcsp, ADDR),	  ldrex, t_ldrex),
24631 TCE("strex",	1800f90, e8400000, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24632				      strex,  t_strex),
24633#undef  THUMB_VARIANT
24634#define THUMB_VARIANT  & arm_ext_v6t2
24635
24636 TUF("mcrr2",	c400000, fc400000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24637 TUF("mrrc2",	c500000, fc500000, 5, (RCP, I15b, RRnpc, RRnpc, RCN), co_reg2c, co_reg2c),
24638
24639 TCE("ssat",	6a00010, f3000000, 4, (RRnpc, I32, RRnpc, oSHllar),ssat,   t_ssat),
24640 TCE("usat",	6e00010, f3800000, 4, (RRnpc, I31, RRnpc, oSHllar),usat,   t_usat),
24641
24642/*  ARM V6 not included in V7M.  */
24643#undef  THUMB_VARIANT
24644#define THUMB_VARIANT  & arm_ext_v6_notm
24645 TUF("rfeia",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24646 TUF("rfe",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24647  UF(rfeib,	9900a00,           1, (RRw),			   rfe),
24648  UF(rfeda,	8100a00,           1, (RRw),			   rfe),
24649 TUF("rfedb",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24650 TUF("rfefd",	8900a00, e990c000, 1, (RRw),			   rfe, rfe),
24651  UF(rfefa,	8100a00,           1, (RRw),			   rfe),
24652 TUF("rfeea",	9100a00, e810c000, 1, (RRw),			   rfe, rfe),
24653  UF(rfeed,	9900a00,           1, (RRw),			   rfe),
24654 TUF("srsia",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24655 TUF("srs",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24656 TUF("srsea",	8c00500, e980c000, 2, (oRRw, I31w),		   srs,  srs),
24657  UF(srsib,	9c00500,           2, (oRRw, I31w),		   srs),
24658  UF(srsfa,	9c00500,           2, (oRRw, I31w),		   srs),
24659  UF(srsda,	8400500,	   2, (oRRw, I31w),		   srs),
24660  UF(srsed,	8400500,	   2, (oRRw, I31w),		   srs),
24661 TUF("srsdb",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24662 TUF("srsfd",	9400500, e800c000, 2, (oRRw, I31w),		   srs,  srs),
24663 TUF("cps",	1020000, f3af8100, 1, (I31b),			  imm0, t_cps),
24664
24665/*  ARM V6 not included in V7M (eg. integer SIMD).  */
24666#undef  THUMB_VARIANT
24667#define THUMB_VARIANT  & arm_ext_v6_dsp
24668 TCE("pkhbt",	6800010, eac00000, 4, (RRnpc, RRnpc, RRnpc, oSHll),   pkhbt, t_pkhbt),
24669 TCE("pkhtb",	6800050, eac00020, 4, (RRnpc, RRnpc, RRnpc, oSHar),   pkhtb, t_pkhtb),
24670 TCE("qadd16",	6200f10, fa90f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24671 TCE("qadd8",	6200f90, fa80f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24672 TCE("qasx",	6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24673 /* Old name for QASX.  */
24674 TCE("qaddsubx",6200f30, faa0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24675 TCE("qsax",	6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24676 /* Old name for QSAX.  */
24677 TCE("qsubaddx",6200f50, fae0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24678 TCE("qsub16",	6200f70, fad0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24679 TCE("qsub8",	6200ff0, fac0f010, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24680 TCE("sadd16",	6100f10, fa90f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24681 TCE("sadd8",	6100f90, fa80f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24682 TCE("sasx",	6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24683 /* Old name for SASX.  */
24684 TCE("saddsubx",6100f30, faa0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24685 TCE("shadd16",	6300f10, fa90f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24686 TCE("shadd8",	6300f90, fa80f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24687 TCE("shasx",   6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24688 /* Old name for SHASX.  */
24689 TCE("shaddsubx", 6300f30, faa0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24690 TCE("shsax",     6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24691 /* Old name for SHSAX.  */
24692 TCE("shsubaddx", 6300f50, fae0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24693 TCE("shsub16",	6300f70, fad0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24694 TCE("shsub8",	6300ff0, fac0f020, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24695 TCE("ssax",	6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24696 /* Old name for SSAX.  */
24697 TCE("ssubaddx",6100f50, fae0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24698 TCE("ssub16",	6100f70, fad0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24699 TCE("ssub8",	6100ff0, fac0f000, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24700 TCE("uadd16",	6500f10, fa90f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24701 TCE("uadd8",	6500f90, fa80f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24702 TCE("uasx",	6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24703 /* Old name for UASX.  */
24704 TCE("uaddsubx",6500f30, faa0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24705 TCE("uhadd16",	6700f10, fa90f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24706 TCE("uhadd8",	6700f90, fa80f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24707 TCE("uhasx",   6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24708 /* Old name for UHASX.  */
24709 TCE("uhaddsubx", 6700f30, faa0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24710 TCE("uhsax",     6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24711 /* Old name for UHSAX.  */
24712 TCE("uhsubaddx", 6700f50, fae0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24713 TCE("uhsub16",	6700f70, fad0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24714 TCE("uhsub8",	6700ff0, fac0f060, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24715 TCE("uqadd16",	6600f10, fa90f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24716 TCE("uqadd8",	6600f90, fa80f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24717 TCE("uqasx",   6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24718 /* Old name for UQASX.  */
24719 TCE("uqaddsubx", 6600f30, faa0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24720 TCE("uqsax",     6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24721 /* Old name for UQSAX.  */
24722 TCE("uqsubaddx", 6600f50, fae0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24723 TCE("uqsub16",	6600f70, fad0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24724 TCE("uqsub8",	6600ff0, fac0f050, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24725 TCE("usub16",	6500f70, fad0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24726 TCE("usax",	6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24727 /* Old name for USAX.  */
24728 TCE("usubaddx",6500f50, fae0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24729 TCE("usub8",	6500ff0, fac0f040, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24730 TCE("sxtah",	6b00070, fa00f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24731 TCE("sxtab16",	6800070, fa20f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24732 TCE("sxtab",	6a00070, fa40f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24733 TCE("sxtb16",	68f0070, fa2ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24734 TCE("uxtah",	6f00070, fa10f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24735 TCE("uxtab16",	6c00070, fa30f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24736 TCE("uxtab",	6e00070, fa50f080, 4, (RRnpc, RRnpc, RRnpc, oROR), sxtah, t_sxtah),
24737 TCE("uxtb16",	6cf0070, fa3ff080, 3, (RRnpc, RRnpc, oROR),	   sxth,  t_sxth),
24738 TCE("sel",	6800fb0, faa0f080, 3, (RRnpc, RRnpc, RRnpc),	   rd_rn_rm, t_simd),
24739 TCE("smlad",	7000010, fb200000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24740 TCE("smladx",	7000030, fb200010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24741 TCE("smlald",	7400010, fbc000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24742 TCE("smlaldx",	7400030, fbc000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24743 TCE("smlsd",	7000050, fb400000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24744 TCE("smlsdx",	7000070, fb400010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24745 TCE("smlsld",	7400050, fbd000c0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24746 TCE("smlsldx",	7400070, fbd000d0, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,t_mlal),
24747 TCE("smmla",	7500010, fb500000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24748 TCE("smmlar",	7500030, fb500010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24749 TCE("smmls",	75000d0, fb600000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24750 TCE("smmlsr",	75000f0, fb600010, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla, t_mla),
24751 TCE("smmul",	750f010, fb50f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24752 TCE("smmulr",	750f030, fb50f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24753 TCE("smuad",	700f010, fb20f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24754 TCE("smuadx",	700f030, fb20f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24755 TCE("smusd",	700f050, fb40f000, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24756 TCE("smusdx",	700f070, fb40f010, 3, (RRnpc, RRnpc, RRnpc),	   smul, t_simd),
24757 TCE("ssat16",	6a00f30, f3200000, 3, (RRnpc, I16, RRnpc),	   ssat16, t_ssat16),
24758 TCE("umaal",	0400090, fbe00060, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smlal,  t_mlal),
24759 TCE("usad8",	780f010, fb70f000, 3, (RRnpc, RRnpc, RRnpc),	   smul,   t_simd),
24760 TCE("usada8",	7800010, fb700000, 4, (RRnpc, RRnpc, RRnpc, RRnpc),smla,   t_mla),
24761 TCE("usat16",	6e00f30, f3a00000, 3, (RRnpc, I15, RRnpc),	   usat16, t_usat16),
24762
24763#undef  ARM_VARIANT
24764#define ARM_VARIANT   & arm_ext_v6k_v6t2
24765#undef  THUMB_VARIANT
24766#define THUMB_VARIANT & arm_ext_v6k_v6t2
24767
24768 tCE("yield",	320f001, _yield,    0, (), noargs, t_hint),
24769 tCE("wfe",	320f002, _wfe,      0, (), noargs, t_hint),
24770 tCE("wfi",	320f003, _wfi,      0, (), noargs, t_hint),
24771 tCE("sev",	320f004, _sev,      0, (), noargs, t_hint),
24772
24773#undef  THUMB_VARIANT
24774#define THUMB_VARIANT  & arm_ext_v6_notm
24775 TCE("ldrexd",	1b00f9f, e8d0007f, 3, (RRnpc_npcsp, oRRnpc_npcsp, RRnpcb),
24776				      ldrexd, t_ldrexd),
24777 TCE("strexd",	1a00f90, e8c00070, 4, (RRnpc_npcsp, RRnpc_npcsp, oRRnpc_npcsp,
24778				       RRnpcb), strexd, t_strexd),
24779
24780#undef  THUMB_VARIANT
24781#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24782 TCE("ldrexb",	1d00f9f, e8d00f4f, 2, (RRnpc_npcsp,RRnpcb),
24783     rd_rn,  rd_rn),
24784 TCE("ldrexh",	1f00f9f, e8d00f5f, 2, (RRnpc_npcsp, RRnpcb),
24785     rd_rn,  rd_rn),
24786 TCE("strexb",	1c00f90, e8c00f40, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24787     strex, t_strexbh),
24788 TCE("strexh",	1e00f90, e8c00f50, 3, (RRnpc_npcsp, RRnpc_npcsp, ADDR),
24789     strex, t_strexbh),
24790 TUF("clrex",	57ff01f, f3bf8f2f, 0, (),			      noargs, noargs),
24791
24792#undef  ARM_VARIANT
24793#define ARM_VARIANT    & arm_ext_sec
24794#undef  THUMB_VARIANT
24795#define THUMB_VARIANT  & arm_ext_sec
24796
24797 TCE("smc",	1600070, f7f08000, 1, (EXPi), smc, t_smc),
24798
24799#undef	ARM_VARIANT
24800#define	ARM_VARIANT    & arm_ext_virt
24801#undef	THUMB_VARIANT
24802#define	THUMB_VARIANT    & arm_ext_virt
24803
24804 TCE("hvc",	1400070, f7e08000, 1, (EXPi), hvc, t_hvc),
24805 TCE("eret",	160006e, f3de8f00, 0, (), noargs, noargs),
24806
24807#undef	ARM_VARIANT
24808#define	ARM_VARIANT    & arm_ext_pan
24809#undef	THUMB_VARIANT
24810#define	THUMB_VARIANT  & arm_ext_pan
24811
24812 TUF("setpan",	1100000, b610, 1, (I7), setpan, t_setpan),
24813
24814#undef  ARM_VARIANT
24815#define ARM_VARIANT    & arm_ext_v6t2
24816#undef  THUMB_VARIANT
24817#define THUMB_VARIANT  & arm_ext_v6t2
24818
24819 TCE("bfc",	7c0001f, f36f0000, 3, (RRnpc, I31, I32),	   bfc, t_bfc),
24820 TCE("bfi",	7c00010, f3600000, 4, (RRnpc, RRnpc_I0, I31, I32), bfi, t_bfi),
24821 TCE("sbfx",	7a00050, f3400000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24822 TCE("ubfx",	7e00050, f3c00000, 4, (RR, RR, I31, I32),	   bfx, t_bfx),
24823
24824 TCE("mls",	0600090, fb000010, 4, (RRnpc, RRnpc, RRnpc, RRnpc), mlas, t_mla),
24825 TCE("rbit",	6ff0f30, fa90f0a0, 2, (RR, RR),			    rd_rm, t_rbit),
24826
24827 TC3("ldrht",	03000b0, f8300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24828 TC3("ldrsht",	03000f0, f9300e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24829 TC3("ldrsbt",	03000d0, f9100e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24830 TC3("strht",	02000b0, f8200e00, 2, (RRnpc_npcsp, ADDR), ldsttv4, t_ldstt),
24831
24832#undef  ARM_VARIANT
24833#define ARM_VARIANT    & arm_ext_v3
24834#undef  THUMB_VARIANT
24835#define THUMB_VARIANT  & arm_ext_v6t2
24836
24837 TUE("csdb",	320f014, f3af8014, 0, (), noargs, t_csdb),
24838 TUF("ssbb",	57ff040, f3bf8f40, 0, (), noargs, t_csdb),
24839 TUF("pssbb",	57ff044, f3bf8f44, 0, (), noargs, t_csdb),
24840
24841#undef  ARM_VARIANT
24842#define ARM_VARIANT    & arm_ext_v6t2
24843#undef  THUMB_VARIANT
24844#define THUMB_VARIANT  & arm_ext_v6t2_v8m
24845 TCE("movw",	3000000, f2400000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24846 TCE("movt",	3400000, f2c00000, 2, (RRnpc, HALF),		    mov16, t_mov16),
24847
24848 /* Thumb-only instructions.  */
24849#undef  ARM_VARIANT
24850#define ARM_VARIANT NULL
24851  TUE("cbnz",     0,           b900,     2, (RR, EXP), 0, t_cbz),
24852  TUE("cbz",      0,           b100,     2, (RR, EXP), 0, t_cbz),
24853
24854 /* ARM does not really have an IT instruction, so always allow it.
24855    The opcode is copied from Thumb in order to allow warnings in
24856    -mimplicit-it=[never | arm] modes.  */
24857#undef  ARM_VARIANT
24858#define ARM_VARIANT  & arm_ext_v1
24859#undef  THUMB_VARIANT
24860#define THUMB_VARIANT  & arm_ext_v6t2
24861
24862 TUE("it",        bf08,        bf08,     1, (COND),   it,    t_it),
24863 TUE("itt",       bf0c,        bf0c,     1, (COND),   it,    t_it),
24864 TUE("ite",       bf04,        bf04,     1, (COND),   it,    t_it),
24865 TUE("ittt",      bf0e,        bf0e,     1, (COND),   it,    t_it),
24866 TUE("itet",      bf06,        bf06,     1, (COND),   it,    t_it),
24867 TUE("itte",      bf0a,        bf0a,     1, (COND),   it,    t_it),
24868 TUE("itee",      bf02,        bf02,     1, (COND),   it,    t_it),
24869 TUE("itttt",     bf0f,        bf0f,     1, (COND),   it,    t_it),
24870 TUE("itett",     bf07,        bf07,     1, (COND),   it,    t_it),
24871 TUE("ittet",     bf0b,        bf0b,     1, (COND),   it,    t_it),
24872 TUE("iteet",     bf03,        bf03,     1, (COND),   it,    t_it),
24873 TUE("ittte",     bf0d,        bf0d,     1, (COND),   it,    t_it),
24874 TUE("itete",     bf05,        bf05,     1, (COND),   it,    t_it),
24875 TUE("ittee",     bf09,        bf09,     1, (COND),   it,    t_it),
24876 TUE("iteee",     bf01,        bf01,     1, (COND),   it,    t_it),
24877 /* ARM/Thumb-2 instructions with no Thumb-1 equivalent.  */
24878 TC3("rrx",       01a00060, ea4f0030, 2, (RR, RR), rd_rm, t_rrx),
24879 TC3("rrxs",      01b00060, ea5f0030, 2, (RR, RR), rd_rm, t_rrx),
24880
24881 /* Thumb2 only instructions.  */
24882#undef  ARM_VARIANT
24883#define ARM_VARIANT  NULL
24884
24885 TCE("addw",	0, f2000000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24886 TCE("subw",	0, f2a00000, 3, (RR, RR, EXPi), 0, t_add_sub_w),
24887 TCE("orn",       0, ea600000, 3, (RR, oRR, SH),  0, t_orn),
24888 TCE("orns",      0, ea700000, 3, (RR, oRR, SH),  0, t_orn),
24889 TCE("tbb",       0, e8d0f000, 1, (TB), 0, t_tb),
24890 TCE("tbh",       0, e8d0f010, 1, (TB), 0, t_tb),
24891
24892 /* Hardware division instructions.  */
24893#undef  ARM_VARIANT
24894#define ARM_VARIANT    & arm_ext_adiv
24895#undef  THUMB_VARIANT
24896#define THUMB_VARIANT  & arm_ext_div
24897
24898 TCE("sdiv",	710f010, fb90f0f0, 3, (RR, oRR, RR), div, t_div),
24899 TCE("udiv",	730f010, fbb0f0f0, 3, (RR, oRR, RR), div, t_div),
24900
24901 /* ARM V6M/V7 instructions.  */
24902#undef  ARM_VARIANT
24903#define ARM_VARIANT    & arm_ext_barrier
24904#undef  THUMB_VARIANT
24905#define THUMB_VARIANT  & arm_ext_barrier
24906
24907 TUF("dmb",	57ff050, f3bf8f50, 1, (oBARRIER_I15), barrier, barrier),
24908 TUF("dsb",	57ff040, f3bf8f40, 1, (oBARRIER_I15), barrier, barrier),
24909 TUF("isb",	57ff060, f3bf8f60, 1, (oBARRIER_I15), barrier, barrier),
24910
24911 /* ARM V7 instructions.  */
24912#undef  ARM_VARIANT
24913#define ARM_VARIANT    & arm_ext_v7
24914#undef  THUMB_VARIANT
24915#define THUMB_VARIANT  & arm_ext_v7
24916
24917 TUF("pli",	450f000, f910f000, 1, (ADDR),	  pli,	    t_pld),
24918 TCE("dbg",	320f0f0, f3af80f0, 1, (I15),	  dbg,	    t_dbg),
24919
24920#undef  ARM_VARIANT
24921#define ARM_VARIANT    & arm_ext_mp
24922#undef  THUMB_VARIANT
24923#define THUMB_VARIANT  & arm_ext_mp
24924
24925 TUF("pldw",	410f000, f830f000, 1, (ADDR),	pld,	t_pld),
24926
24927 /* AArchv8 instructions.  */
24928#undef  ARM_VARIANT
24929#define ARM_VARIANT   & arm_ext_v8
24930
24931/* Instructions shared between armv8-a and armv8-m.  */
24932#undef  THUMB_VARIANT
24933#define THUMB_VARIANT & arm_ext_atomics
24934
24935 TCE("lda",	1900c9f, e8d00faf, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24936 TCE("ldab",	1d00c9f, e8d00f8f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24937 TCE("ldah",	1f00c9f, e8d00f9f, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24938 TCE("stl",	180fc90, e8c00faf, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24939 TCE("stlb",	1c0fc90, e8c00f8f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24940 TCE("stlh",	1e0fc90, e8c00f9f, 2, (RRnpc, RRnpcb),	rm_rn,  rd_rn),
24941 TCE("ldaex",	1900e9f, e8d00fef, 2, (RRnpc, RRnpcb),	rd_rn,	rd_rn),
24942 TCE("ldaexb",	1d00e9f, e8d00fcf, 2, (RRnpc,RRnpcb),	rd_rn,  rd_rn),
24943 TCE("ldaexh",	1f00e9f, e8d00fdf, 2, (RRnpc, RRnpcb),	rd_rn,  rd_rn),
24944 TCE("stlex",	1800e90, e8c00fe0, 3, (RRnpc, RRnpc, RRnpcb),
24945							stlex,  t_stlex),
24946 TCE("stlexb",	1c00e90, e8c00fc0, 3, (RRnpc, RRnpc, RRnpcb),
24947							stlex, t_stlex),
24948 TCE("stlexh",	1e00e90, e8c00fd0, 3, (RRnpc, RRnpc, RRnpcb),
24949							stlex, t_stlex),
24950#undef  THUMB_VARIANT
24951#define THUMB_VARIANT & arm_ext_v8
24952
24953 tCE("sevl",	320f005, _sevl,    0, (),		noargs,	t_hint),
24954 TCE("ldaexd",	1b00e9f, e8d000ff, 3, (RRnpc, oRRnpc, RRnpcb),
24955							ldrexd, t_ldrexd),
24956 TCE("stlexd",	1a00e90, e8c000f0, 4, (RRnpc, RRnpc, oRRnpc, RRnpcb),
24957							strexd, t_strexd),
24958#undef THUMB_VARIANT
24959#define THUMB_VARIANT & arm_ext_v8r
24960#undef ARM_VARIANT
24961#define ARM_VARIANT & arm_ext_v8r
24962
24963/* ARMv8-R instructions.  */
24964 TUF("dfb",	57ff04c, f3bf8f4c, 0, (), noargs, noargs),
24965
24966/* Defined in V8 but is in undefined encoding space for earlier
24967   architectures.  However earlier architectures are required to treat
24968   this instuction as a semihosting trap as well.  Hence while not explicitly
24969   defined as such, it is in fact correct to define the instruction for all
24970   architectures.  */
24971#undef  THUMB_VARIANT
24972#define THUMB_VARIANT  & arm_ext_v1
24973#undef  ARM_VARIANT
24974#define ARM_VARIANT  & arm_ext_v1
24975 TUE("hlt",	1000070, ba80,     1, (oIffffb),	bkpt,	t_hlt),
24976
24977 /* ARMv8 T32 only.  */
24978#undef  ARM_VARIANT
24979#define ARM_VARIANT  NULL
24980 TUF("dcps1",	0,	 f78f8001, 0, (),	noargs, noargs),
24981 TUF("dcps2",	0,	 f78f8002, 0, (),	noargs, noargs),
24982 TUF("dcps3",	0,	 f78f8003, 0, (),	noargs, noargs),
24983
24984  /* FP for ARMv8.  */
24985#undef  ARM_VARIANT
24986#define ARM_VARIANT   & fpu_vfp_ext_armv8xd
24987#undef  THUMB_VARIANT
24988#define THUMB_VARIANT & fpu_vfp_ext_armv8xd
24989
24990  nUF(vseleq, _vseleq, 3, (RVSD, RVSD, RVSD),		vsel),
24991  nUF(vselvs, _vselvs, 3, (RVSD, RVSD, RVSD),		vsel),
24992  nUF(vselge, _vselge, 3, (RVSD, RVSD, RVSD),		vsel),
24993  nUF(vselgt, _vselgt, 3, (RVSD, RVSD, RVSD),		vsel),
24994  nCE(vrintr, _vrintr, 2, (RNSDQ, oRNSDQ),		vrintr),
24995  mnCE(vrintz, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintz),
24996  mnCE(vrintx, _vrintr, 2, (RNSDQMQ, oRNSDQMQ),		vrintx),
24997  mnUF(vrinta, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrinta),
24998  mnUF(vrintn, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintn),
24999  mnUF(vrintp, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintp),
25000  mnUF(vrintm, _vrinta, 2, (RNSDQMQ, oRNSDQMQ),		vrintm),
25001
25002  /* Crypto v1 extensions.  */
25003#undef  ARM_VARIANT
25004#define ARM_VARIANT & fpu_crypto_ext_armv8
25005#undef  THUMB_VARIANT
25006#define THUMB_VARIANT & fpu_crypto_ext_armv8
25007
25008  nUF(aese, _aes, 2, (RNQ, RNQ), aese),
25009  nUF(aesd, _aes, 2, (RNQ, RNQ), aesd),
25010  nUF(aesmc, _aes, 2, (RNQ, RNQ), aesmc),
25011  nUF(aesimc, _aes, 2, (RNQ, RNQ), aesimc),
25012  nUF(sha1c, _sha3op, 3, (RNQ, RNQ, RNQ), sha1c),
25013  nUF(sha1p, _sha3op, 3, (RNQ, RNQ, RNQ), sha1p),
25014  nUF(sha1m, _sha3op, 3, (RNQ, RNQ, RNQ), sha1m),
25015  nUF(sha1su0, _sha3op, 3, (RNQ, RNQ, RNQ), sha1su0),
25016  nUF(sha256h, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h),
25017  nUF(sha256h2, _sha3op, 3, (RNQ, RNQ, RNQ), sha256h2),
25018  nUF(sha256su1, _sha3op, 3, (RNQ, RNQ, RNQ), sha256su1),
25019  nUF(sha1h, _sha1h, 2, (RNQ, RNQ), sha1h),
25020  nUF(sha1su1, _sha2op, 2, (RNQ, RNQ), sha1su1),
25021  nUF(sha256su0, _sha2op, 2, (RNQ, RNQ), sha256su0),
25022
25023#undef  ARM_VARIANT
25024#define ARM_VARIANT   & arm_ext_crc
25025#undef  THUMB_VARIANT
25026#define THUMB_VARIANT & arm_ext_crc
25027  TUEc("crc32b", 1000040, fac0f080, 3, (RR, oRR, RR), crc32b),
25028  TUEc("crc32h", 1200040, fac0f090, 3, (RR, oRR, RR), crc32h),
25029  TUEc("crc32w", 1400040, fac0f0a0, 3, (RR, oRR, RR), crc32w),
25030  TUEc("crc32cb",1000240, fad0f080, 3, (RR, oRR, RR), crc32cb),
25031  TUEc("crc32ch",1200240, fad0f090, 3, (RR, oRR, RR), crc32ch),
25032  TUEc("crc32cw",1400240, fad0f0a0, 3, (RR, oRR, RR), crc32cw),
25033
25034 /* ARMv8.2 RAS extension.  */
25035#undef  ARM_VARIANT
25036#define ARM_VARIANT   & arm_ext_ras
25037#undef  THUMB_VARIANT
25038#define THUMB_VARIANT & arm_ext_ras
25039 TUE ("esb", 320f010, f3af8010, 0, (), noargs,  noargs),
25040
25041#undef  ARM_VARIANT
25042#define ARM_VARIANT   & arm_ext_v8_3
25043#undef  THUMB_VARIANT
25044#define THUMB_VARIANT & arm_ext_v8_3
25045 NCE (vjcvt, eb90bc0, 2, (RVS, RVD), vjcvt),
25046
25047#undef  ARM_VARIANT
25048#define ARM_VARIANT   & fpu_neon_ext_dotprod
25049#undef  THUMB_VARIANT
25050#define THUMB_VARIANT & fpu_neon_ext_dotprod
25051 NUF (vsdot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_s),
25052 NUF (vudot, d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), neon_dotproduct_u),
25053
25054#undef  ARM_VARIANT
25055#define ARM_VARIANT  & fpu_fpa_ext_v1  /* Core FPA instruction set (V1).  */
25056#undef  THUMB_VARIANT
25057#define THUMB_VARIANT NULL
25058
25059 cCE("wfs",	e200110, 1, (RR),	     rd),
25060 cCE("rfs",	e300110, 1, (RR),	     rd),
25061 cCE("wfc",	e400110, 1, (RR),	     rd),
25062 cCE("rfc",	e500110, 1, (RR),	     rd),
25063
25064 cCL("ldfs",	c100100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25065 cCL("ldfd",	c108100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25066 cCL("ldfe",	c500100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25067 cCL("ldfp",	c508100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25068
25069 cCL("stfs",	c000100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25070 cCL("stfd",	c008100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25071 cCL("stfe",	c400100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25072 cCL("stfp",	c408100, 2, (RF, ADDRGLDC),  rd_cpaddr),
25073
25074 cCL("mvfs",	e008100, 2, (RF, RF_IF),     rd_rm),
25075 cCL("mvfsp",	e008120, 2, (RF, RF_IF),     rd_rm),
25076 cCL("mvfsm",	e008140, 2, (RF, RF_IF),     rd_rm),
25077 cCL("mvfsz",	e008160, 2, (RF, RF_IF),     rd_rm),
25078 cCL("mvfd",	e008180, 2, (RF, RF_IF),     rd_rm),
25079 cCL("mvfdp",	e0081a0, 2, (RF, RF_IF),     rd_rm),
25080 cCL("mvfdm",	e0081c0, 2, (RF, RF_IF),     rd_rm),
25081 cCL("mvfdz",	e0081e0, 2, (RF, RF_IF),     rd_rm),
25082 cCL("mvfe",	e088100, 2, (RF, RF_IF),     rd_rm),
25083 cCL("mvfep",	e088120, 2, (RF, RF_IF),     rd_rm),
25084 cCL("mvfem",	e088140, 2, (RF, RF_IF),     rd_rm),
25085 cCL("mvfez",	e088160, 2, (RF, RF_IF),     rd_rm),
25086
25087 cCL("mnfs",	e108100, 2, (RF, RF_IF),     rd_rm),
25088 cCL("mnfsp",	e108120, 2, (RF, RF_IF),     rd_rm),
25089 cCL("mnfsm",	e108140, 2, (RF, RF_IF),     rd_rm),
25090 cCL("mnfsz",	e108160, 2, (RF, RF_IF),     rd_rm),
25091 cCL("mnfd",	e108180, 2, (RF, RF_IF),     rd_rm),
25092 cCL("mnfdp",	e1081a0, 2, (RF, RF_IF),     rd_rm),
25093 cCL("mnfdm",	e1081c0, 2, (RF, RF_IF),     rd_rm),
25094 cCL("mnfdz",	e1081e0, 2, (RF, RF_IF),     rd_rm),
25095 cCL("mnfe",	e188100, 2, (RF, RF_IF),     rd_rm),
25096 cCL("mnfep",	e188120, 2, (RF, RF_IF),     rd_rm),
25097 cCL("mnfem",	e188140, 2, (RF, RF_IF),     rd_rm),
25098 cCL("mnfez",	e188160, 2, (RF, RF_IF),     rd_rm),
25099
25100 cCL("abss",	e208100, 2, (RF, RF_IF),     rd_rm),
25101 cCL("abssp",	e208120, 2, (RF, RF_IF),     rd_rm),
25102 cCL("abssm",	e208140, 2, (RF, RF_IF),     rd_rm),
25103 cCL("abssz",	e208160, 2, (RF, RF_IF),     rd_rm),
25104 cCL("absd",	e208180, 2, (RF, RF_IF),     rd_rm),
25105 cCL("absdp",	e2081a0, 2, (RF, RF_IF),     rd_rm),
25106 cCL("absdm",	e2081c0, 2, (RF, RF_IF),     rd_rm),
25107 cCL("absdz",	e2081e0, 2, (RF, RF_IF),     rd_rm),
25108 cCL("abse",	e288100, 2, (RF, RF_IF),     rd_rm),
25109 cCL("absep",	e288120, 2, (RF, RF_IF),     rd_rm),
25110 cCL("absem",	e288140, 2, (RF, RF_IF),     rd_rm),
25111 cCL("absez",	e288160, 2, (RF, RF_IF),     rd_rm),
25112
25113 cCL("rnds",	e308100, 2, (RF, RF_IF),     rd_rm),
25114 cCL("rndsp",	e308120, 2, (RF, RF_IF),     rd_rm),
25115 cCL("rndsm",	e308140, 2, (RF, RF_IF),     rd_rm),
25116 cCL("rndsz",	e308160, 2, (RF, RF_IF),     rd_rm),
25117 cCL("rndd",	e308180, 2, (RF, RF_IF),     rd_rm),
25118 cCL("rnddp",	e3081a0, 2, (RF, RF_IF),     rd_rm),
25119 cCL("rnddm",	e3081c0, 2, (RF, RF_IF),     rd_rm),
25120 cCL("rnddz",	e3081e0, 2, (RF, RF_IF),     rd_rm),
25121 cCL("rnde",	e388100, 2, (RF, RF_IF),     rd_rm),
25122 cCL("rndep",	e388120, 2, (RF, RF_IF),     rd_rm),
25123 cCL("rndem",	e388140, 2, (RF, RF_IF),     rd_rm),
25124 cCL("rndez",	e388160, 2, (RF, RF_IF),     rd_rm),
25125
25126 cCL("sqts",	e408100, 2, (RF, RF_IF),     rd_rm),
25127 cCL("sqtsp",	e408120, 2, (RF, RF_IF),     rd_rm),
25128 cCL("sqtsm",	e408140, 2, (RF, RF_IF),     rd_rm),
25129 cCL("sqtsz",	e408160, 2, (RF, RF_IF),     rd_rm),
25130 cCL("sqtd",	e408180, 2, (RF, RF_IF),     rd_rm),
25131 cCL("sqtdp",	e4081a0, 2, (RF, RF_IF),     rd_rm),
25132 cCL("sqtdm",	e4081c0, 2, (RF, RF_IF),     rd_rm),
25133 cCL("sqtdz",	e4081e0, 2, (RF, RF_IF),     rd_rm),
25134 cCL("sqte",	e488100, 2, (RF, RF_IF),     rd_rm),
25135 cCL("sqtep",	e488120, 2, (RF, RF_IF),     rd_rm),
25136 cCL("sqtem",	e488140, 2, (RF, RF_IF),     rd_rm),
25137 cCL("sqtez",	e488160, 2, (RF, RF_IF),     rd_rm),
25138
25139 cCL("logs",	e508100, 2, (RF, RF_IF),     rd_rm),
25140 cCL("logsp",	e508120, 2, (RF, RF_IF),     rd_rm),
25141 cCL("logsm",	e508140, 2, (RF, RF_IF),     rd_rm),
25142 cCL("logsz",	e508160, 2, (RF, RF_IF),     rd_rm),
25143 cCL("logd",	e508180, 2, (RF, RF_IF),     rd_rm),
25144 cCL("logdp",	e5081a0, 2, (RF, RF_IF),     rd_rm),
25145 cCL("logdm",	e5081c0, 2, (RF, RF_IF),     rd_rm),
25146 cCL("logdz",	e5081e0, 2, (RF, RF_IF),     rd_rm),
25147 cCL("loge",	e588100, 2, (RF, RF_IF),     rd_rm),
25148 cCL("logep",	e588120, 2, (RF, RF_IF),     rd_rm),
25149 cCL("logem",	e588140, 2, (RF, RF_IF),     rd_rm),
25150 cCL("logez",	e588160, 2, (RF, RF_IF),     rd_rm),
25151
25152 cCL("lgns",	e608100, 2, (RF, RF_IF),     rd_rm),
25153 cCL("lgnsp",	e608120, 2, (RF, RF_IF),     rd_rm),
25154 cCL("lgnsm",	e608140, 2, (RF, RF_IF),     rd_rm),
25155 cCL("lgnsz",	e608160, 2, (RF, RF_IF),     rd_rm),
25156 cCL("lgnd",	e608180, 2, (RF, RF_IF),     rd_rm),
25157 cCL("lgndp",	e6081a0, 2, (RF, RF_IF),     rd_rm),
25158 cCL("lgndm",	e6081c0, 2, (RF, RF_IF),     rd_rm),
25159 cCL("lgndz",	e6081e0, 2, (RF, RF_IF),     rd_rm),
25160 cCL("lgne",	e688100, 2, (RF, RF_IF),     rd_rm),
25161 cCL("lgnep",	e688120, 2, (RF, RF_IF),     rd_rm),
25162 cCL("lgnem",	e688140, 2, (RF, RF_IF),     rd_rm),
25163 cCL("lgnez",	e688160, 2, (RF, RF_IF),     rd_rm),
25164
25165 cCL("exps",	e708100, 2, (RF, RF_IF),     rd_rm),
25166 cCL("expsp",	e708120, 2, (RF, RF_IF),     rd_rm),
25167 cCL("expsm",	e708140, 2, (RF, RF_IF),     rd_rm),
25168 cCL("expsz",	e708160, 2, (RF, RF_IF),     rd_rm),
25169 cCL("expd",	e708180, 2, (RF, RF_IF),     rd_rm),
25170 cCL("expdp",	e7081a0, 2, (RF, RF_IF),     rd_rm),
25171 cCL("expdm",	e7081c0, 2, (RF, RF_IF),     rd_rm),
25172 cCL("expdz",	e7081e0, 2, (RF, RF_IF),     rd_rm),
25173 cCL("expe",	e788100, 2, (RF, RF_IF),     rd_rm),
25174 cCL("expep",	e788120, 2, (RF, RF_IF),     rd_rm),
25175 cCL("expem",	e788140, 2, (RF, RF_IF),     rd_rm),
25176 cCL("expdz",	e788160, 2, (RF, RF_IF),     rd_rm),
25177
25178 cCL("sins",	e808100, 2, (RF, RF_IF),     rd_rm),
25179 cCL("sinsp",	e808120, 2, (RF, RF_IF),     rd_rm),
25180 cCL("sinsm",	e808140, 2, (RF, RF_IF),     rd_rm),
25181 cCL("sinsz",	e808160, 2, (RF, RF_IF),     rd_rm),
25182 cCL("sind",	e808180, 2, (RF, RF_IF),     rd_rm),
25183 cCL("sindp",	e8081a0, 2, (RF, RF_IF),     rd_rm),
25184 cCL("sindm",	e8081c0, 2, (RF, RF_IF),     rd_rm),
25185 cCL("sindz",	e8081e0, 2, (RF, RF_IF),     rd_rm),
25186 cCL("sine",	e888100, 2, (RF, RF_IF),     rd_rm),
25187 cCL("sinep",	e888120, 2, (RF, RF_IF),     rd_rm),
25188 cCL("sinem",	e888140, 2, (RF, RF_IF),     rd_rm),
25189 cCL("sinez",	e888160, 2, (RF, RF_IF),     rd_rm),
25190
25191 cCL("coss",	e908100, 2, (RF, RF_IF),     rd_rm),
25192 cCL("cossp",	e908120, 2, (RF, RF_IF),     rd_rm),
25193 cCL("cossm",	e908140, 2, (RF, RF_IF),     rd_rm),
25194 cCL("cossz",	e908160, 2, (RF, RF_IF),     rd_rm),
25195 cCL("cosd",	e908180, 2, (RF, RF_IF),     rd_rm),
25196 cCL("cosdp",	e9081a0, 2, (RF, RF_IF),     rd_rm),
25197 cCL("cosdm",	e9081c0, 2, (RF, RF_IF),     rd_rm),
25198 cCL("cosdz",	e9081e0, 2, (RF, RF_IF),     rd_rm),
25199 cCL("cose",	e988100, 2, (RF, RF_IF),     rd_rm),
25200 cCL("cosep",	e988120, 2, (RF, RF_IF),     rd_rm),
25201 cCL("cosem",	e988140, 2, (RF, RF_IF),     rd_rm),
25202 cCL("cosez",	e988160, 2, (RF, RF_IF),     rd_rm),
25203
25204 cCL("tans",	ea08100, 2, (RF, RF_IF),     rd_rm),
25205 cCL("tansp",	ea08120, 2, (RF, RF_IF),     rd_rm),
25206 cCL("tansm",	ea08140, 2, (RF, RF_IF),     rd_rm),
25207 cCL("tansz",	ea08160, 2, (RF, RF_IF),     rd_rm),
25208 cCL("tand",	ea08180, 2, (RF, RF_IF),     rd_rm),
25209 cCL("tandp",	ea081a0, 2, (RF, RF_IF),     rd_rm),
25210 cCL("tandm",	ea081c0, 2, (RF, RF_IF),     rd_rm),
25211 cCL("tandz",	ea081e0, 2, (RF, RF_IF),     rd_rm),
25212 cCL("tane",	ea88100, 2, (RF, RF_IF),     rd_rm),
25213 cCL("tanep",	ea88120, 2, (RF, RF_IF),     rd_rm),
25214 cCL("tanem",	ea88140, 2, (RF, RF_IF),     rd_rm),
25215 cCL("tanez",	ea88160, 2, (RF, RF_IF),     rd_rm),
25216
25217 cCL("asns",	eb08100, 2, (RF, RF_IF),     rd_rm),
25218 cCL("asnsp",	eb08120, 2, (RF, RF_IF),     rd_rm),
25219 cCL("asnsm",	eb08140, 2, (RF, RF_IF),     rd_rm),
25220 cCL("asnsz",	eb08160, 2, (RF, RF_IF),     rd_rm),
25221 cCL("asnd",	eb08180, 2, (RF, RF_IF),     rd_rm),
25222 cCL("asndp",	eb081a0, 2, (RF, RF_IF),     rd_rm),
25223 cCL("asndm",	eb081c0, 2, (RF, RF_IF),     rd_rm),
25224 cCL("asndz",	eb081e0, 2, (RF, RF_IF),     rd_rm),
25225 cCL("asne",	eb88100, 2, (RF, RF_IF),     rd_rm),
25226 cCL("asnep",	eb88120, 2, (RF, RF_IF),     rd_rm),
25227 cCL("asnem",	eb88140, 2, (RF, RF_IF),     rd_rm),
25228 cCL("asnez",	eb88160, 2, (RF, RF_IF),     rd_rm),
25229
25230 cCL("acss",	ec08100, 2, (RF, RF_IF),     rd_rm),
25231 cCL("acssp",	ec08120, 2, (RF, RF_IF),     rd_rm),
25232 cCL("acssm",	ec08140, 2, (RF, RF_IF),     rd_rm),
25233 cCL("acssz",	ec08160, 2, (RF, RF_IF),     rd_rm),
25234 cCL("acsd",	ec08180, 2, (RF, RF_IF),     rd_rm),
25235 cCL("acsdp",	ec081a0, 2, (RF, RF_IF),     rd_rm),
25236 cCL("acsdm",	ec081c0, 2, (RF, RF_IF),     rd_rm),
25237 cCL("acsdz",	ec081e0, 2, (RF, RF_IF),     rd_rm),
25238 cCL("acse",	ec88100, 2, (RF, RF_IF),     rd_rm),
25239 cCL("acsep",	ec88120, 2, (RF, RF_IF),     rd_rm),
25240 cCL("acsem",	ec88140, 2, (RF, RF_IF),     rd_rm),
25241 cCL("acsez",	ec88160, 2, (RF, RF_IF),     rd_rm),
25242
25243 cCL("atns",	ed08100, 2, (RF, RF_IF),     rd_rm),
25244 cCL("atnsp",	ed08120, 2, (RF, RF_IF),     rd_rm),
25245 cCL("atnsm",	ed08140, 2, (RF, RF_IF),     rd_rm),
25246 cCL("atnsz",	ed08160, 2, (RF, RF_IF),     rd_rm),
25247 cCL("atnd",	ed08180, 2, (RF, RF_IF),     rd_rm),
25248 cCL("atndp",	ed081a0, 2, (RF, RF_IF),     rd_rm),
25249 cCL("atndm",	ed081c0, 2, (RF, RF_IF),     rd_rm),
25250 cCL("atndz",	ed081e0, 2, (RF, RF_IF),     rd_rm),
25251 cCL("atne",	ed88100, 2, (RF, RF_IF),     rd_rm),
25252 cCL("atnep",	ed88120, 2, (RF, RF_IF),     rd_rm),
25253 cCL("atnem",	ed88140, 2, (RF, RF_IF),     rd_rm),
25254 cCL("atnez",	ed88160, 2, (RF, RF_IF),     rd_rm),
25255
25256 cCL("urds",	ee08100, 2, (RF, RF_IF),     rd_rm),
25257 cCL("urdsp",	ee08120, 2, (RF, RF_IF),     rd_rm),
25258 cCL("urdsm",	ee08140, 2, (RF, RF_IF),     rd_rm),
25259 cCL("urdsz",	ee08160, 2, (RF, RF_IF),     rd_rm),
25260 cCL("urdd",	ee08180, 2, (RF, RF_IF),     rd_rm),
25261 cCL("urddp",	ee081a0, 2, (RF, RF_IF),     rd_rm),
25262 cCL("urddm",	ee081c0, 2, (RF, RF_IF),     rd_rm),
25263 cCL("urddz",	ee081e0, 2, (RF, RF_IF),     rd_rm),
25264 cCL("urde",	ee88100, 2, (RF, RF_IF),     rd_rm),
25265 cCL("urdep",	ee88120, 2, (RF, RF_IF),     rd_rm),
25266 cCL("urdem",	ee88140, 2, (RF, RF_IF),     rd_rm),
25267 cCL("urdez",	ee88160, 2, (RF, RF_IF),     rd_rm),
25268
25269 cCL("nrms",	ef08100, 2, (RF, RF_IF),     rd_rm),
25270 cCL("nrmsp",	ef08120, 2, (RF, RF_IF),     rd_rm),
25271 cCL("nrmsm",	ef08140, 2, (RF, RF_IF),     rd_rm),
25272 cCL("nrmsz",	ef08160, 2, (RF, RF_IF),     rd_rm),
25273 cCL("nrmd",	ef08180, 2, (RF, RF_IF),     rd_rm),
25274 cCL("nrmdp",	ef081a0, 2, (RF, RF_IF),     rd_rm),
25275 cCL("nrmdm",	ef081c0, 2, (RF, RF_IF),     rd_rm),
25276 cCL("nrmdz",	ef081e0, 2, (RF, RF_IF),     rd_rm),
25277 cCL("nrme",	ef88100, 2, (RF, RF_IF),     rd_rm),
25278 cCL("nrmep",	ef88120, 2, (RF, RF_IF),     rd_rm),
25279 cCL("nrmem",	ef88140, 2, (RF, RF_IF),     rd_rm),
25280 cCL("nrmez",	ef88160, 2, (RF, RF_IF),     rd_rm),
25281
25282 cCL("adfs",	e000100, 3, (RF, RF, RF_IF), rd_rn_rm),
25283 cCL("adfsp",	e000120, 3, (RF, RF, RF_IF), rd_rn_rm),
25284 cCL("adfsm",	e000140, 3, (RF, RF, RF_IF), rd_rn_rm),
25285 cCL("adfsz",	e000160, 3, (RF, RF, RF_IF), rd_rn_rm),
25286 cCL("adfd",	e000180, 3, (RF, RF, RF_IF), rd_rn_rm),
25287 cCL("adfdp",	e0001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25288 cCL("adfdm",	e0001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25289 cCL("adfdz",	e0001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25290 cCL("adfe",	e080100, 3, (RF, RF, RF_IF), rd_rn_rm),
25291 cCL("adfep",	e080120, 3, (RF, RF, RF_IF), rd_rn_rm),
25292 cCL("adfem",	e080140, 3, (RF, RF, RF_IF), rd_rn_rm),
25293 cCL("adfez",	e080160, 3, (RF, RF, RF_IF), rd_rn_rm),
25294
25295 cCL("sufs",	e200100, 3, (RF, RF, RF_IF), rd_rn_rm),
25296 cCL("sufsp",	e200120, 3, (RF, RF, RF_IF), rd_rn_rm),
25297 cCL("sufsm",	e200140, 3, (RF, RF, RF_IF), rd_rn_rm),
25298 cCL("sufsz",	e200160, 3, (RF, RF, RF_IF), rd_rn_rm),
25299 cCL("sufd",	e200180, 3, (RF, RF, RF_IF), rd_rn_rm),
25300 cCL("sufdp",	e2001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25301 cCL("sufdm",	e2001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25302 cCL("sufdz",	e2001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25303 cCL("sufe",	e280100, 3, (RF, RF, RF_IF), rd_rn_rm),
25304 cCL("sufep",	e280120, 3, (RF, RF, RF_IF), rd_rn_rm),
25305 cCL("sufem",	e280140, 3, (RF, RF, RF_IF), rd_rn_rm),
25306 cCL("sufez",	e280160, 3, (RF, RF, RF_IF), rd_rn_rm),
25307
25308 cCL("rsfs",	e300100, 3, (RF, RF, RF_IF), rd_rn_rm),
25309 cCL("rsfsp",	e300120, 3, (RF, RF, RF_IF), rd_rn_rm),
25310 cCL("rsfsm",	e300140, 3, (RF, RF, RF_IF), rd_rn_rm),
25311 cCL("rsfsz",	e300160, 3, (RF, RF, RF_IF), rd_rn_rm),
25312 cCL("rsfd",	e300180, 3, (RF, RF, RF_IF), rd_rn_rm),
25313 cCL("rsfdp",	e3001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25314 cCL("rsfdm",	e3001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25315 cCL("rsfdz",	e3001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25316 cCL("rsfe",	e380100, 3, (RF, RF, RF_IF), rd_rn_rm),
25317 cCL("rsfep",	e380120, 3, (RF, RF, RF_IF), rd_rn_rm),
25318 cCL("rsfem",	e380140, 3, (RF, RF, RF_IF), rd_rn_rm),
25319 cCL("rsfez",	e380160, 3, (RF, RF, RF_IF), rd_rn_rm),
25320
25321 cCL("mufs",	e100100, 3, (RF, RF, RF_IF), rd_rn_rm),
25322 cCL("mufsp",	e100120, 3, (RF, RF, RF_IF), rd_rn_rm),
25323 cCL("mufsm",	e100140, 3, (RF, RF, RF_IF), rd_rn_rm),
25324 cCL("mufsz",	e100160, 3, (RF, RF, RF_IF), rd_rn_rm),
25325 cCL("mufd",	e100180, 3, (RF, RF, RF_IF), rd_rn_rm),
25326 cCL("mufdp",	e1001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25327 cCL("mufdm",	e1001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25328 cCL("mufdz",	e1001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25329 cCL("mufe",	e180100, 3, (RF, RF, RF_IF), rd_rn_rm),
25330 cCL("mufep",	e180120, 3, (RF, RF, RF_IF), rd_rn_rm),
25331 cCL("mufem",	e180140, 3, (RF, RF, RF_IF), rd_rn_rm),
25332 cCL("mufez",	e180160, 3, (RF, RF, RF_IF), rd_rn_rm),
25333
25334 cCL("dvfs",	e400100, 3, (RF, RF, RF_IF), rd_rn_rm),
25335 cCL("dvfsp",	e400120, 3, (RF, RF, RF_IF), rd_rn_rm),
25336 cCL("dvfsm",	e400140, 3, (RF, RF, RF_IF), rd_rn_rm),
25337 cCL("dvfsz",	e400160, 3, (RF, RF, RF_IF), rd_rn_rm),
25338 cCL("dvfd",	e400180, 3, (RF, RF, RF_IF), rd_rn_rm),
25339 cCL("dvfdp",	e4001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25340 cCL("dvfdm",	e4001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25341 cCL("dvfdz",	e4001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25342 cCL("dvfe",	e480100, 3, (RF, RF, RF_IF), rd_rn_rm),
25343 cCL("dvfep",	e480120, 3, (RF, RF, RF_IF), rd_rn_rm),
25344 cCL("dvfem",	e480140, 3, (RF, RF, RF_IF), rd_rn_rm),
25345 cCL("dvfez",	e480160, 3, (RF, RF, RF_IF), rd_rn_rm),
25346
25347 cCL("rdfs",	e500100, 3, (RF, RF, RF_IF), rd_rn_rm),
25348 cCL("rdfsp",	e500120, 3, (RF, RF, RF_IF), rd_rn_rm),
25349 cCL("rdfsm",	e500140, 3, (RF, RF, RF_IF), rd_rn_rm),
25350 cCL("rdfsz",	e500160, 3, (RF, RF, RF_IF), rd_rn_rm),
25351 cCL("rdfd",	e500180, 3, (RF, RF, RF_IF), rd_rn_rm),
25352 cCL("rdfdp",	e5001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25353 cCL("rdfdm",	e5001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25354 cCL("rdfdz",	e5001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25355 cCL("rdfe",	e580100, 3, (RF, RF, RF_IF), rd_rn_rm),
25356 cCL("rdfep",	e580120, 3, (RF, RF, RF_IF), rd_rn_rm),
25357 cCL("rdfem",	e580140, 3, (RF, RF, RF_IF), rd_rn_rm),
25358 cCL("rdfez",	e580160, 3, (RF, RF, RF_IF), rd_rn_rm),
25359
25360 cCL("pows",	e600100, 3, (RF, RF, RF_IF), rd_rn_rm),
25361 cCL("powsp",	e600120, 3, (RF, RF, RF_IF), rd_rn_rm),
25362 cCL("powsm",	e600140, 3, (RF, RF, RF_IF), rd_rn_rm),
25363 cCL("powsz",	e600160, 3, (RF, RF, RF_IF), rd_rn_rm),
25364 cCL("powd",	e600180, 3, (RF, RF, RF_IF), rd_rn_rm),
25365 cCL("powdp",	e6001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25366 cCL("powdm",	e6001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25367 cCL("powdz",	e6001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25368 cCL("powe",	e680100, 3, (RF, RF, RF_IF), rd_rn_rm),
25369 cCL("powep",	e680120, 3, (RF, RF, RF_IF), rd_rn_rm),
25370 cCL("powem",	e680140, 3, (RF, RF, RF_IF), rd_rn_rm),
25371 cCL("powez",	e680160, 3, (RF, RF, RF_IF), rd_rn_rm),
25372
25373 cCL("rpws",	e700100, 3, (RF, RF, RF_IF), rd_rn_rm),
25374 cCL("rpwsp",	e700120, 3, (RF, RF, RF_IF), rd_rn_rm),
25375 cCL("rpwsm",	e700140, 3, (RF, RF, RF_IF), rd_rn_rm),
25376 cCL("rpwsz",	e700160, 3, (RF, RF, RF_IF), rd_rn_rm),
25377 cCL("rpwd",	e700180, 3, (RF, RF, RF_IF), rd_rn_rm),
25378 cCL("rpwdp",	e7001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25379 cCL("rpwdm",	e7001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25380 cCL("rpwdz",	e7001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25381 cCL("rpwe",	e780100, 3, (RF, RF, RF_IF), rd_rn_rm),
25382 cCL("rpwep",	e780120, 3, (RF, RF, RF_IF), rd_rn_rm),
25383 cCL("rpwem",	e780140, 3, (RF, RF, RF_IF), rd_rn_rm),
25384 cCL("rpwez",	e780160, 3, (RF, RF, RF_IF), rd_rn_rm),
25385
25386 cCL("rmfs",	e800100, 3, (RF, RF, RF_IF), rd_rn_rm),
25387 cCL("rmfsp",	e800120, 3, (RF, RF, RF_IF), rd_rn_rm),
25388 cCL("rmfsm",	e800140, 3, (RF, RF, RF_IF), rd_rn_rm),
25389 cCL("rmfsz",	e800160, 3, (RF, RF, RF_IF), rd_rn_rm),
25390 cCL("rmfd",	e800180, 3, (RF, RF, RF_IF), rd_rn_rm),
25391 cCL("rmfdp",	e8001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25392 cCL("rmfdm",	e8001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25393 cCL("rmfdz",	e8001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25394 cCL("rmfe",	e880100, 3, (RF, RF, RF_IF), rd_rn_rm),
25395 cCL("rmfep",	e880120, 3, (RF, RF, RF_IF), rd_rn_rm),
25396 cCL("rmfem",	e880140, 3, (RF, RF, RF_IF), rd_rn_rm),
25397 cCL("rmfez",	e880160, 3, (RF, RF, RF_IF), rd_rn_rm),
25398
25399 cCL("fmls",	e900100, 3, (RF, RF, RF_IF), rd_rn_rm),
25400 cCL("fmlsp",	e900120, 3, (RF, RF, RF_IF), rd_rn_rm),
25401 cCL("fmlsm",	e900140, 3, (RF, RF, RF_IF), rd_rn_rm),
25402 cCL("fmlsz",	e900160, 3, (RF, RF, RF_IF), rd_rn_rm),
25403 cCL("fmld",	e900180, 3, (RF, RF, RF_IF), rd_rn_rm),
25404 cCL("fmldp",	e9001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25405 cCL("fmldm",	e9001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25406 cCL("fmldz",	e9001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25407 cCL("fmle",	e980100, 3, (RF, RF, RF_IF), rd_rn_rm),
25408 cCL("fmlep",	e980120, 3, (RF, RF, RF_IF), rd_rn_rm),
25409 cCL("fmlem",	e980140, 3, (RF, RF, RF_IF), rd_rn_rm),
25410 cCL("fmlez",	e980160, 3, (RF, RF, RF_IF), rd_rn_rm),
25411
25412 cCL("fdvs",	ea00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25413 cCL("fdvsp",	ea00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25414 cCL("fdvsm",	ea00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25415 cCL("fdvsz",	ea00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25416 cCL("fdvd",	ea00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25417 cCL("fdvdp",	ea001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25418 cCL("fdvdm",	ea001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25419 cCL("fdvdz",	ea001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25420 cCL("fdve",	ea80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25421 cCL("fdvep",	ea80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25422 cCL("fdvem",	ea80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25423 cCL("fdvez",	ea80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25424
25425 cCL("frds",	eb00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25426 cCL("frdsp",	eb00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25427 cCL("frdsm",	eb00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25428 cCL("frdsz",	eb00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25429 cCL("frdd",	eb00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25430 cCL("frddp",	eb001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25431 cCL("frddm",	eb001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25432 cCL("frddz",	eb001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25433 cCL("frde",	eb80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25434 cCL("frdep",	eb80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25435 cCL("frdem",	eb80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25436 cCL("frdez",	eb80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25437
25438 cCL("pols",	ec00100, 3, (RF, RF, RF_IF), rd_rn_rm),
25439 cCL("polsp",	ec00120, 3, (RF, RF, RF_IF), rd_rn_rm),
25440 cCL("polsm",	ec00140, 3, (RF, RF, RF_IF), rd_rn_rm),
25441 cCL("polsz",	ec00160, 3, (RF, RF, RF_IF), rd_rn_rm),
25442 cCL("pold",	ec00180, 3, (RF, RF, RF_IF), rd_rn_rm),
25443 cCL("poldp",	ec001a0, 3, (RF, RF, RF_IF), rd_rn_rm),
25444 cCL("poldm",	ec001c0, 3, (RF, RF, RF_IF), rd_rn_rm),
25445 cCL("poldz",	ec001e0, 3, (RF, RF, RF_IF), rd_rn_rm),
25446 cCL("pole",	ec80100, 3, (RF, RF, RF_IF), rd_rn_rm),
25447 cCL("polep",	ec80120, 3, (RF, RF, RF_IF), rd_rn_rm),
25448 cCL("polem",	ec80140, 3, (RF, RF, RF_IF), rd_rn_rm),
25449 cCL("polez",	ec80160, 3, (RF, RF, RF_IF), rd_rn_rm),
25450
25451 cCE("cmf",	e90f110, 2, (RF, RF_IF),     fpa_cmp),
25452 C3E("cmfe",	ed0f110, 2, (RF, RF_IF),     fpa_cmp),
25453 cCE("cnf",	eb0f110, 2, (RF, RF_IF),     fpa_cmp),
25454 C3E("cnfe",	ef0f110, 2, (RF, RF_IF),     fpa_cmp),
25455
25456 cCL("flts",	e000110, 2, (RF, RR),	     rn_rd),
25457 cCL("fltsp",	e000130, 2, (RF, RR),	     rn_rd),
25458 cCL("fltsm",	e000150, 2, (RF, RR),	     rn_rd),
25459 cCL("fltsz",	e000170, 2, (RF, RR),	     rn_rd),
25460 cCL("fltd",	e000190, 2, (RF, RR),	     rn_rd),
25461 cCL("fltdp",	e0001b0, 2, (RF, RR),	     rn_rd),
25462 cCL("fltdm",	e0001d0, 2, (RF, RR),	     rn_rd),
25463 cCL("fltdz",	e0001f0, 2, (RF, RR),	     rn_rd),
25464 cCL("flte",	e080110, 2, (RF, RR),	     rn_rd),
25465 cCL("fltep",	e080130, 2, (RF, RR),	     rn_rd),
25466 cCL("fltem",	e080150, 2, (RF, RR),	     rn_rd),
25467 cCL("fltez",	e080170, 2, (RF, RR),	     rn_rd),
25468
25469  /* The implementation of the FIX instruction is broken on some
25470     assemblers, in that it accepts a precision specifier as well as a
25471     rounding specifier, despite the fact that this is meaningless.
25472     To be more compatible, we accept it as well, though of course it
25473     does not set any bits.  */
25474 cCE("fix",	e100110, 2, (RR, RF),	     rd_rm),
25475 cCL("fixp",	e100130, 2, (RR, RF),	     rd_rm),
25476 cCL("fixm",	e100150, 2, (RR, RF),	     rd_rm),
25477 cCL("fixz",	e100170, 2, (RR, RF),	     rd_rm),
25478 cCL("fixsp",	e100130, 2, (RR, RF),	     rd_rm),
25479 cCL("fixsm",	e100150, 2, (RR, RF),	     rd_rm),
25480 cCL("fixsz",	e100170, 2, (RR, RF),	     rd_rm),
25481 cCL("fixdp",	e100130, 2, (RR, RF),	     rd_rm),
25482 cCL("fixdm",	e100150, 2, (RR, RF),	     rd_rm),
25483 cCL("fixdz",	e100170, 2, (RR, RF),	     rd_rm),
25484 cCL("fixep",	e100130, 2, (RR, RF),	     rd_rm),
25485 cCL("fixem",	e100150, 2, (RR, RF),	     rd_rm),
25486 cCL("fixez",	e100170, 2, (RR, RF),	     rd_rm),
25487
25488  /* Instructions that were new with the real FPA, call them V2.  */
25489#undef  ARM_VARIANT
25490#define ARM_VARIANT  & fpu_fpa_ext_v2
25491
25492 cCE("lfm",	c100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25493 cCL("lfmfd",	c900200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25494 cCL("lfmea",	d100200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25495 cCE("sfm",	c000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25496 cCL("sfmfd",	d000200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25497 cCL("sfmea",	c800200, 3, (RF, I4b, ADDR), fpa_ldmstm),
25498
25499#undef  ARM_VARIANT
25500#define ARM_VARIANT  & fpu_vfp_ext_v1xd  /* VFP V1xD (single precision).  */
25501#undef THUMB_VARIANT
25502#define THUMB_VARIANT  & arm_ext_v6t2
25503 mcCE(vmrs,	ef00a10, 2, (APSR_RR, RVC),   vmrs),
25504 mcCE(vmsr,	ee00a10, 2, (RVC, RR),        vmsr),
25505 mcCE(fldd,	d100b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
25506 mcCE(fstd,	d000b00, 2, (RVD, ADDRGLDC),  vfp_dp_ldst),
25507 mcCE(flds,	d100a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
25508 mcCE(fsts,	d000a00, 2, (RVS, ADDRGLDC),  vfp_sp_ldst),
25509
25510  /* Memory operations.	 */
25511 mcCE(fldmias,	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25512 mcCE(fldmdbs,	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25513 mcCE(fstmias,	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25514 mcCE(fstmdbs,	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25515#undef THUMB_VARIANT
25516
25517  /* Moves and type conversions.  */
25518 cCE("fmstat",	ef1fa10, 0, (),		      noargs),
25519 cCE("fsitos",	eb80ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25520 cCE("fuitos",	eb80a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25521 cCE("ftosis",	ebd0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25522 cCE("ftosizs",	ebd0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25523 cCE("ftouis",	ebc0a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25524 cCE("ftouizs",	ebc0ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25525 cCE("fmrx",	ef00a10, 2, (RR, RVC),	      rd_rn),
25526 cCE("fmxr",	ee00a10, 2, (RVC, RR),	      rn_rd),
25527
25528  /* Memory operations.	 */
25529 cCE("fldmfds",	c900a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25530 cCE("fldmeas",	d300a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25531 cCE("fldmiax",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25532 cCE("fldmfdx",	c900b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25533 cCE("fldmdbx",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25534 cCE("fldmeax",	d300b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25535 cCE("fstmeas",	c800a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmia),
25536 cCE("fstmfds",	d200a00, 2, (RRnpctw, VRSLST),    vfp_sp_ldstmdb),
25537 cCE("fstmiax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25538 cCE("fstmeax",	c800b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmia),
25539 cCE("fstmdbx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25540 cCE("fstmfdx",	d200b00, 2, (RRnpctw, VRDLST),    vfp_xp_ldstmdb),
25541
25542  /* Monadic operations.  */
25543 cCE("fabss",	eb00ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25544 cCE("fnegs",	eb10a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25545 cCE("fsqrts",	eb10ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25546
25547  /* Dyadic operations.	 */
25548 cCE("fadds",	e300a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25549 cCE("fsubs",	e300a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25550 cCE("fmuls",	e200a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25551 cCE("fdivs",	e800a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25552 cCE("fmacs",	e000a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25553 cCE("fmscs",	e100a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25554 cCE("fnmuls",	e200a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25555 cCE("fnmacs",	e000a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25556 cCE("fnmscs",	e100a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25557
25558  /* Comparisons.  */
25559 cCE("fcmps",	eb40a40, 2, (RVS, RVS),	      vfp_sp_monadic),
25560 cCE("fcmpzs",	eb50a40, 1, (RVS),	      vfp_sp_compare_z),
25561 cCE("fcmpes",	eb40ac0, 2, (RVS, RVS),	      vfp_sp_monadic),
25562 cCE("fcmpezs",	eb50ac0, 1, (RVS),	      vfp_sp_compare_z),
25563
25564 /* Double precision load/store are still present on single precision
25565    implementations.  */
25566 cCE("fldmiad",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25567 cCE("fldmfdd",	c900b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25568 cCE("fldmdbd",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25569 cCE("fldmead",	d300b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25570 cCE("fstmiad",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25571 cCE("fstmead",	c800b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmia),
25572 cCE("fstmdbd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25573 cCE("fstmfdd",	d200b00, 2, (RRnpctw, VRDLST),    vfp_dp_ldstmdb),
25574
25575#undef  ARM_VARIANT
25576#define ARM_VARIANT  & fpu_vfp_ext_v1 /* VFP V1 (Double precision).  */
25577
25578  /* Moves and type conversions.  */
25579 cCE("fcvtds",	eb70ac0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25580 cCE("fcvtsd",	eb70bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25581 cCE("fmdhr",	e200b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25582 cCE("fmdlr",	e000b10, 2, (RVD, RR),	      vfp_dp_rn_rd),
25583 cCE("fmrdh",	e300b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25584 cCE("fmrdl",	e100b10, 2, (RR, RVD),	      vfp_dp_rd_rn),
25585 cCE("fsitod",	eb80bc0, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25586 cCE("fuitod",	eb80b40, 2, (RVD, RVS),	      vfp_dp_sp_cvt),
25587 cCE("ftosid",	ebd0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25588 cCE("ftosizd",	ebd0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25589 cCE("ftouid",	ebc0b40, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25590 cCE("ftouizd",	ebc0bc0, 2, (RVS, RVD),	      vfp_sp_dp_cvt),
25591
25592  /* Monadic operations.  */
25593 cCE("fabsd",	eb00bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25594 cCE("fnegd",	eb10b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25595 cCE("fsqrtd",	eb10bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25596
25597  /* Dyadic operations.	 */
25598 cCE("faddd",	e300b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25599 cCE("fsubd",	e300b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25600 cCE("fmuld",	e200b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25601 cCE("fdivd",	e800b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25602 cCE("fmacd",	e000b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25603 cCE("fmscd",	e100b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25604 cCE("fnmuld",	e200b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25605 cCE("fnmacd",	e000b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25606 cCE("fnmscd",	e100b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25607
25608  /* Comparisons.  */
25609 cCE("fcmpd",	eb40b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25610 cCE("fcmpzd",	eb50b40, 1, (RVD),	      vfp_dp_rd),
25611 cCE("fcmped",	eb40bc0, 2, (RVD, RVD),	      vfp_dp_rd_rm),
25612 cCE("fcmpezd",	eb50bc0, 1, (RVD),	      vfp_dp_rd),
25613
25614/* Instructions which may belong to either the Neon or VFP instruction sets.
25615   Individual encoder functions perform additional architecture checks.  */
25616#undef  ARM_VARIANT
25617#define ARM_VARIANT    & fpu_vfp_ext_v1xd
25618#undef  THUMB_VARIANT
25619#define THUMB_VARIANT  & arm_ext_v6t2
25620
25621 NCE(vldm,      c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25622 NCE(vldmia,    c900b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25623 NCE(vldmdb,    d100b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25624 NCE(vstm,      c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25625 NCE(vstmia,    c800b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25626 NCE(vstmdb,    d000b00, 2, (RRnpctw, VRSDLST), neon_ldm_stm),
25627
25628 NCE(vpop,      0,       1, (VRSDLST),          vfp_nsyn_pop),
25629 NCE(vpush,     0,       1, (VRSDLST),          vfp_nsyn_push),
25630
25631#undef  THUMB_VARIANT
25632#define THUMB_VARIANT  & fpu_vfp_ext_v1xd
25633
25634  /* These mnemonics are unique to VFP.  */
25635 NCE(vsqrt,     0,       2, (RVSD, RVSD),       vfp_nsyn_sqrt),
25636 NCE(vdiv,      0,       3, (RVSD, RVSD, RVSD), vfp_nsyn_div),
25637 nCE(vnmul,     _vnmul,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25638 nCE(vnmla,     _vnmla,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25639 nCE(vnmls,     _vnmls,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25640 NCE(vcvtz,     0,       2, (RVSD, RVSD),       vfp_nsyn_cvtz),
25641
25642  /* Mnemonics shared by Neon and VFP.  */
25643 nCEF(vmls,     _vmls,    3, (RNSDQ, oRNSDQ, RNSDQ_RNSC), neon_mac_maybe_scalar),
25644
25645 mnCEF(vcvt,     _vcvt,   3, (RNSDQMQ, RNSDQMQ, oI32z), neon_cvt),
25646 nCEF(vcvtr,    _vcvt,   2, (RNSDQ, RNSDQ), neon_cvtr),
25647 MNCEF(vcvtb,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtb),
25648 MNCEF(vcvtt,	eb20a40, 3, (RVSDMQ, RVSDMQ, oI32b), neon_cvtt),
25649
25650
25651  /* NOTE: All VMOV encoding is special-cased!  */
25652 NCE(vmovq,     0,       1, (VMOV), neon_mov),
25653
25654#undef  THUMB_VARIANT
25655/* Could be either VLDR/VSTR or VLDR/VSTR (system register) which are guarded
25656   by different feature bits.  Since we are setting the Thumb guard, we can
25657   require Thumb-1 which makes it a nop guard and set the right feature bit in
25658   do_vldr_vstr ().  */
25659#define THUMB_VARIANT  & arm_ext_v4t
25660 NCE(vldr,      d100b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25661 NCE(vstr,      d000b00, 2, (VLDR, ADDRGLDC), vldr_vstr),
25662
25663#undef  ARM_VARIANT
25664#define ARM_VARIANT    & arm_ext_fp16
25665#undef  THUMB_VARIANT
25666#define THUMB_VARIANT  & arm_ext_fp16
25667 /* New instructions added from v8.2, allowing the extraction and insertion of
25668    the upper 16 bits of a 32-bit vector register.  */
25669 NCE (vmovx,     eb00a40,       2, (RVS, RVS), neon_movhf),
25670 NCE (vins,      eb00ac0,       2, (RVS, RVS), neon_movhf),
25671
25672 /* New backported fma/fms instructions optional in v8.2.  */
25673 NUF (vfmsl, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmsl),
25674 NUF (vfmal, 810, 3, (RNDQ, RNSD, RNSD_RNSC), neon_vfmal),
25675
25676#undef  THUMB_VARIANT
25677#define THUMB_VARIANT  & fpu_neon_ext_v1
25678#undef  ARM_VARIANT
25679#define ARM_VARIANT    & fpu_neon_ext_v1
25680
25681  /* Data processing with three registers of the same length.  */
25682  /* integer ops, valid types S8 S16 S32 U8 U16 U32.  */
25683 NUF(vaba,      0000710, 3, (RNDQ, RNDQ,  RNDQ), neon_dyadic_i_su),
25684 NUF(vabaq,     0000710, 3, (RNQ,  RNQ,   RNQ),  neon_dyadic_i_su),
25685 NUF(vhaddq,    0000000, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25686 NUF(vrhaddq,   0000100, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25687 NUF(vhsubq,    0000200, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i_su),
25688  /* integer ops, valid types S8 S16 S32 S64 U8 U16 U32 U64.  */
25689 NUF(vqaddq,    0000010, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25690 NUF(vqsubq,    0000210, 3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_i64_su),
25691 NUF(vrshlq,    0000500, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25692 NUF(vqrshlq,   0000510, 3, (RNQ,  oRNQ,  RNQ),  neon_rshl),
25693  /* If not immediate, fall back to neon_dyadic_i64_su.
25694     shl should accept I8 I16 I32 I64,
25695     qshl should accept S8 S16 S32 S64 U8 U16 U32 U64.  */
25696 nUF(vshlq,     _vshl,    3, (RNQ,  oRNQ,  RNDQ_I63b), neon_shl),
25697 nUF(vqshlq,    _vqshl,   3, (RNQ,  oRNQ,  RNDQ_I63b), neon_qshl),
25698  /* Logic ops, types optional & ignored.  */
25699 nUF(vandq,     _vand,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25700 nUF(vbicq,     _vbic,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25701 nUF(vorrq,     _vorr,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25702 nUF(vornq,     _vorn,    3, (RNQ,  oRNQ,  RNDQ_Ibig), neon_logic),
25703 nUF(veorq,     _veor,    3, (RNQ,  oRNQ,  RNQ),       neon_logic),
25704  /* Bitfield ops, untyped.  */
25705 NUF(vbsl,      1100110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25706 NUF(vbslq,     1100110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25707 NUF(vbit,      1200110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25708 NUF(vbitq,     1200110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25709 NUF(vbif,      1300110, 3, (RNDQ, RNDQ, RNDQ), neon_bitfield),
25710 NUF(vbifq,     1300110, 3, (RNQ,  RNQ,  RNQ),  neon_bitfield),
25711  /* Int and float variants, types S8 S16 S32 U8 U16 U32 F16 F32.  */
25712 nUF(vabdq,     _vabd,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25713 nUF(vmaxq,     _vmax,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25714 nUF(vminq,     _vmin,    3, (RNQ,  oRNQ,  RNQ),  neon_dyadic_if_su),
25715  /* Comparisons. Types S8 S16 S32 U8 U16 U32 F32. Non-immediate versions fall
25716     back to neon_dyadic_if_su.  */
25717 nUF(vcge,      _vcge,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25718 nUF(vcgeq,     _vcge,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25719 nUF(vcgt,      _vcgt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp),
25720 nUF(vcgtq,     _vcgt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp),
25721 nUF(vclt,      _vclt,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25722 nUF(vcltq,     _vclt,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25723 nUF(vcle,      _vcle,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_cmp_inv),
25724 nUF(vcleq,     _vcle,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_cmp_inv),
25725  /* Comparison. Type I8 I16 I32 F32.  */
25726 nUF(vceq,      _vceq,    3, (RNDQ, oRNDQ, RNDQ_I0), neon_ceq),
25727 nUF(vceqq,     _vceq,    3, (RNQ,  oRNQ,  RNDQ_I0), neon_ceq),
25728  /* As above, D registers only.  */
25729 nUF(vpmax,     _vpmax,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25730 nUF(vpmin,     _vpmin,   3, (RND, oRND, RND), neon_dyadic_if_su_d),
25731  /* Int and float variants, signedness unimportant.  */
25732 nUF(vmlaq,     _vmla,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25733 nUF(vmlsq,     _vmls,    3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mac_maybe_scalar),
25734 nUF(vpadd,     _vpadd,   3, (RND,  oRND,  RND),       neon_dyadic_if_i_d),
25735  /* Add/sub take types I8 I16 I32 I64 F32.  */
25736 nUF(vaddq,     _vadd,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25737 nUF(vsubq,     _vsub,    3, (RNQ,  oRNQ,  RNQ),  neon_addsub_if_i),
25738  /* vtst takes sizes 8, 16, 32.  */
25739 NUF(vtst,      0000810, 3, (RNDQ, oRNDQ, RNDQ), neon_tst),
25740 NUF(vtstq,     0000810, 3, (RNQ,  oRNQ,  RNQ),  neon_tst),
25741  /* VMUL takes I8 I16 I32 F32 P8.  */
25742 nUF(vmulq,     _vmul,     3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_mul),
25743  /* VQD{R}MULH takes S16 S32.  */
25744 nUF(vqdmulhq,  _vqdmulh,  3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25745 nUF(vqrdmulhq, _vqrdmulh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qdmulh),
25746 NUF(vacge,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25747 NUF(vacgeq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25748 NUF(vacgt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute),
25749 NUF(vacgtq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute),
25750 NUF(vaclt,     0200e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25751 NUF(vacltq,    0200e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25752 NUF(vacle,     0000e10,  3, (RNDQ, oRNDQ, RNDQ), neon_fcmp_absolute_inv),
25753 NUF(vacleq,    0000e10,  3, (RNQ,  oRNQ,  RNQ),  neon_fcmp_absolute_inv),
25754 NUF(vrecps,    0000f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25755 NUF(vrecpsq,   0000f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25756 NUF(vrsqrts,   0200f10,  3, (RNDQ, oRNDQ, RNDQ), neon_step),
25757 NUF(vrsqrtsq,  0200f10,  3, (RNQ,  oRNQ,  RNQ),  neon_step),
25758 /* ARM v8.1 extension.  */
25759 nUF (vqrdmlahq, _vqrdmlah, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25760 nUF (vqrdmlsh,  _vqrdmlsh, 3, (RNDQ, oRNDQ, RNDQ_RNSC), neon_qrdmlah),
25761 nUF (vqrdmlshq, _vqrdmlsh, 3, (RNQ,  oRNQ,  RNDQ_RNSC), neon_qrdmlah),
25762
25763  /* Two address, int/float. Types S8 S16 S32 F32.  */
25764 NUF(vabsq,     1b10300, 2, (RNQ,  RNQ),      neon_abs_neg),
25765 NUF(vnegq,     1b10380, 2, (RNQ,  RNQ),      neon_abs_neg),
25766
25767  /* Data processing with two registers and a shift amount.  */
25768  /* Right shifts, and variants with rounding.
25769     Types accepted S8 S16 S32 S64 U8 U16 U32 U64.  */
25770 NUF(vshrq,     0800010, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25771 NUF(vrshrq,    0800210, 3, (RNQ,  oRNQ,  I64z), neon_rshift_round_imm),
25772 NUF(vsra,      0800110, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25773 NUF(vsraq,     0800110, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25774 NUF(vrsra,     0800310, 3, (RNDQ, oRNDQ, I64),  neon_rshift_round_imm),
25775 NUF(vrsraq,    0800310, 3, (RNQ,  oRNQ,  I64),  neon_rshift_round_imm),
25776  /* Shift and insert. Sizes accepted 8 16 32 64.  */
25777 NUF(vsliq,     1800510, 3, (RNQ,  oRNQ,  I63), neon_sli),
25778 NUF(vsriq,     1800410, 3, (RNQ,  oRNQ,  I64), neon_sri),
25779  /* QSHL{U} immediate accepts S8 S16 S32 S64 U8 U16 U32 U64.  */
25780 NUF(vqshluq,   1800610, 3, (RNQ,  oRNQ,  I63), neon_qshlu_imm),
25781  /* Right shift immediate, saturating & narrowing, with rounding variants.
25782     Types accepted S16 S32 S64 U16 U32 U64.  */
25783 NUF(vqshrn,    0800910, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25784 NUF(vqrshrn,   0800950, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow),
25785  /* As above, unsigned. Types accepted S16 S32 S64.  */
25786 NUF(vqshrun,   0800810, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25787 NUF(vqrshrun,  0800850, 3, (RND, RNQ, I32z), neon_rshift_sat_narrow_u),
25788  /* Right shift narrowing. Types accepted I16 I32 I64.  */
25789 NUF(vshrn,     0800810, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25790 NUF(vrshrn,    0800850, 3, (RND, RNQ, I32z), neon_rshift_narrow),
25791  /* Special case. Types S8 S16 S32 U8 U16 U32. Handles max shift variant.  */
25792 nUF(vshll,     _vshll,   3, (RNQ, RND, I32),  neon_shll),
25793  /* CVT with optional immediate for fixed-point variant.  */
25794 nUF(vcvtq,     _vcvt,    3, (RNQ, RNQ, oI32b), neon_cvt),
25795
25796 nUF(vmvnq,     _vmvn,    2, (RNQ,  RNDQ_Ibig), neon_mvn),
25797
25798  /* Data processing, three registers of different lengths.  */
25799  /* Dyadic, long insns. Types S8 S16 S32 U8 U16 U32.  */
25800 NUF(vabal,     0800500, 3, (RNQ, RND, RND),  neon_abal),
25801  /* If not scalar, fall back to neon_dyadic_long.
25802     Vector types as above, scalar types S16 S32 U16 U32.  */
25803 nUF(vmlal,     _vmlal,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25804 nUF(vmlsl,     _vmlsl,   3, (RNQ, RND, RND_RNSC), neon_mac_maybe_scalar_long),
25805  /* Dyadic, widening insns. Types S8 S16 S32 U8 U16 U32.  */
25806 NUF(vaddw,     0800100, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25807 NUF(vsubw,     0800300, 3, (RNQ, oRNQ, RND), neon_dyadic_wide),
25808  /* Dyadic, narrowing insns. Types I16 I32 I64.  */
25809 NUF(vaddhn,    0800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25810 NUF(vraddhn,   1800400, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25811 NUF(vsubhn,    0800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25812 NUF(vrsubhn,   1800600, 3, (RND, RNQ, RNQ),  neon_dyadic_narrow),
25813  /* Saturating doubling multiplies. Types S16 S32.  */
25814 nUF(vqdmlal,   _vqdmlal, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25815 nUF(vqdmlsl,   _vqdmlsl, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25816 nUF(vqdmull,   _vqdmull, 3, (RNQ, RND, RND_RNSC), neon_mul_sat_scalar_long),
25817  /* VMULL. Vector types S8 S16 S32 U8 U16 U32 P8, scalar types
25818     S16 S32 U16 U32.  */
25819 nUF(vmull,     _vmull,   3, (RNQ, RND, RND_RNSC), neon_vmull),
25820
25821  /* Extract. Size 8.  */
25822 NUF(vext,      0b00000, 4, (RNDQ, oRNDQ, RNDQ, I15), neon_ext),
25823 NUF(vextq,     0b00000, 4, (RNQ,  oRNQ,  RNQ,  I15), neon_ext),
25824
25825  /* Two registers, miscellaneous.  */
25826  /* Reverse. Sizes 8 16 32 (must be < size in opcode).  */
25827 NUF(vrev64q,   1b00000, 2, (RNQ,  RNQ),      neon_rev),
25828 NUF(vrev32q,   1b00080, 2, (RNQ,  RNQ),      neon_rev),
25829 NUF(vrev16q,   1b00100, 2, (RNQ,  RNQ),      neon_rev),
25830  /* Vector replicate. Sizes 8 16 32.  */
25831 nCE(vdupq,     _vdup,    2, (RNQ,  RR_RNSC),  neon_dup),
25832  /* VMOVL. Types S8 S16 S32 U8 U16 U32.  */
25833 NUF(vmovl,     0800a10, 2, (RNQ, RND),       neon_movl),
25834  /* VMOVN. Types I16 I32 I64.  */
25835 nUF(vmovn,     _vmovn,   2, (RND, RNQ),       neon_movn),
25836  /* VQMOVN. Types S16 S32 S64 U16 U32 U64.  */
25837 nUF(vqmovn,    _vqmovn,  2, (RND, RNQ),       neon_qmovn),
25838  /* VQMOVUN. Types S16 S32 S64.  */
25839 nUF(vqmovun,   _vqmovun, 2, (RND, RNQ),       neon_qmovun),
25840  /* VZIP / VUZP. Sizes 8 16 32.  */
25841 NUF(vzip,      1b20180, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25842 NUF(vzipq,     1b20180, 2, (RNQ,  RNQ),      neon_zip_uzp),
25843 NUF(vuzp,      1b20100, 2, (RNDQ, RNDQ),     neon_zip_uzp),
25844 NUF(vuzpq,     1b20100, 2, (RNQ,  RNQ),      neon_zip_uzp),
25845  /* VQABS / VQNEG. Types S8 S16 S32.  */
25846 NUF(vqabsq,    1b00700, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25847 NUF(vqnegq,    1b00780, 2, (RNQ,  RNQ),      neon_sat_abs_neg),
25848  /* Pairwise, lengthening. Types S8 S16 S32 U8 U16 U32.  */
25849 NUF(vpadal,    1b00600, 2, (RNDQ, RNDQ),     neon_pair_long),
25850 NUF(vpadalq,   1b00600, 2, (RNQ,  RNQ),      neon_pair_long),
25851 NUF(vpaddl,    1b00200, 2, (RNDQ, RNDQ),     neon_pair_long),
25852 NUF(vpaddlq,   1b00200, 2, (RNQ,  RNQ),      neon_pair_long),
25853  /* Reciprocal estimates.  Types U32 F16 F32.  */
25854 NUF(vrecpe,    1b30400, 2, (RNDQ, RNDQ),     neon_recip_est),
25855 NUF(vrecpeq,   1b30400, 2, (RNQ,  RNQ),      neon_recip_est),
25856 NUF(vrsqrte,   1b30480, 2, (RNDQ, RNDQ),     neon_recip_est),
25857 NUF(vrsqrteq,  1b30480, 2, (RNQ,  RNQ),      neon_recip_est),
25858  /* VCLS. Types S8 S16 S32.  */
25859 NUF(vclsq,     1b00400, 2, (RNQ,  RNQ),      neon_cls),
25860  /* VCLZ. Types I8 I16 I32.  */
25861 NUF(vclzq,     1b00480, 2, (RNQ,  RNQ),      neon_clz),
25862  /* VCNT. Size 8.  */
25863 NUF(vcnt,      1b00500, 2, (RNDQ, RNDQ),     neon_cnt),
25864 NUF(vcntq,     1b00500, 2, (RNQ,  RNQ),      neon_cnt),
25865  /* Two address, untyped.  */
25866 NUF(vswp,      1b20000, 2, (RNDQ, RNDQ),     neon_swp),
25867 NUF(vswpq,     1b20000, 2, (RNQ,  RNQ),      neon_swp),
25868  /* VTRN. Sizes 8 16 32.  */
25869 nUF(vtrn,      _vtrn,    2, (RNDQ, RNDQ),     neon_trn),
25870 nUF(vtrnq,     _vtrn,    2, (RNQ,  RNQ),      neon_trn),
25871
25872  /* Table lookup. Size 8.  */
25873 NUF(vtbl,      1b00800, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25874 NUF(vtbx,      1b00840, 3, (RND, NRDLST, RND), neon_tbl_tbx),
25875
25876#undef  THUMB_VARIANT
25877#define THUMB_VARIANT  & fpu_vfp_v3_or_neon_ext
25878#undef  ARM_VARIANT
25879#define ARM_VARIANT    & fpu_vfp_v3_or_neon_ext
25880
25881  /* Neon element/structure load/store.  */
25882 nUF(vld1,      _vld1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25883 nUF(vst1,      _vst1,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25884 nUF(vld2,      _vld2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25885 nUF(vst2,      _vst2,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25886 nUF(vld3,      _vld3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25887 nUF(vst3,      _vst3,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25888 nUF(vld4,      _vld4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25889 nUF(vst4,      _vst4,    2, (NSTRLST, ADDR),  neon_ldx_stx),
25890
25891#undef  THUMB_VARIANT
25892#define THUMB_VARIANT & fpu_vfp_ext_v3xd
25893#undef  ARM_VARIANT
25894#define ARM_VARIANT   & fpu_vfp_ext_v3xd
25895 cCE("fconsts",   eb00a00, 2, (RVS, I255),      vfp_sp_const),
25896 cCE("fshtos",    eba0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25897 cCE("fsltos",    eba0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25898 cCE("fuhtos",    ebb0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25899 cCE("fultos",    ebb0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25900 cCE("ftoshs",    ebe0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25901 cCE("ftosls",    ebe0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25902 cCE("ftouhs",    ebf0a40, 2, (RVS, I16z),      vfp_sp_conv_16),
25903 cCE("ftouls",    ebf0ac0, 2, (RVS, I32),       vfp_sp_conv_32),
25904
25905#undef  THUMB_VARIANT
25906#define THUMB_VARIANT  & fpu_vfp_ext_v3
25907#undef  ARM_VARIANT
25908#define ARM_VARIANT    & fpu_vfp_ext_v3
25909
25910 cCE("fconstd",   eb00b00, 2, (RVD, I255),      vfp_dp_const),
25911 cCE("fshtod",    eba0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25912 cCE("fsltod",    eba0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25913 cCE("fuhtod",    ebb0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25914 cCE("fultod",    ebb0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25915 cCE("ftoshd",    ebe0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25916 cCE("ftosld",    ebe0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25917 cCE("ftouhd",    ebf0b40, 2, (RVD, I16z),      vfp_dp_conv_16),
25918 cCE("ftould",    ebf0bc0, 2, (RVD, I32),       vfp_dp_conv_32),
25919
25920#undef  ARM_VARIANT
25921#define ARM_VARIANT    & fpu_vfp_ext_fma
25922#undef  THUMB_VARIANT
25923#define THUMB_VARIANT  & fpu_vfp_ext_fma
25924 /* Mnemonics shared by Neon, VFP, MVE and BF16.  These are included in the
25925    VFP FMA variant; NEON and VFP FMA always includes the NEON
25926    FMA instructions.  */
25927 mnCEF(vfma,     _vfma,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR), neon_fmac),
25928 TUF ("vfmat",    c300850,    fc300850,  3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), mve_vfma, mve_vfma),
25929 mnCEF(vfms,     _vfms,    3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),  neon_fmac),
25930
25931 /* ffmas/ffmad/ffmss/ffmsd are dummy mnemonics to satisfy gas;
25932    the v form should always be used.  */
25933 cCE("ffmas",	ea00a00, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25934 cCE("ffnmas",	ea00a40, 3, (RVS, RVS, RVS),  vfp_sp_dyadic),
25935 cCE("ffmad",	ea00b00, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25936 cCE("ffnmad",	ea00b40, 3, (RVD, RVD, RVD),  vfp_dp_rd_rn_rm),
25937 nCE(vfnma,     _vfnma,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25938 nCE(vfnms,     _vfnms,   3, (RVSD, RVSD, RVSD), vfp_nsyn_nmul),
25939
25940#undef THUMB_VARIANT
25941#undef  ARM_VARIANT
25942#define ARM_VARIANT  & arm_cext_xscale /* Intel XScale extensions.  */
25943
25944 cCE("mia",	e200010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25945 cCE("miaph",	e280010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25946 cCE("miabb",	e2c0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25947 cCE("miabt",	e2d0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25948 cCE("miatb",	e2e0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25949 cCE("miatt",	e2f0010, 3, (RXA, RRnpc, RRnpc), xsc_mia),
25950 cCE("mar",	c400000, 3, (RXA, RRnpc, RRnpc), xsc_mar),
25951 cCE("mra",	c500000, 3, (RRnpc, RRnpc, RXA), xsc_mra),
25952
25953#undef  ARM_VARIANT
25954#define ARM_VARIANT  & arm_cext_iwmmxt /* Intel Wireless MMX technology.  */
25955
25956 cCE("tandcb",	e13f130, 1, (RR),		    iwmmxt_tandorc),
25957 cCE("tandch",	e53f130, 1, (RR),		    iwmmxt_tandorc),
25958 cCE("tandcw",	e93f130, 1, (RR),		    iwmmxt_tandorc),
25959 cCE("tbcstb",	e400010, 2, (RIWR, RR),		    rn_rd),
25960 cCE("tbcsth",	e400050, 2, (RIWR, RR),		    rn_rd),
25961 cCE("tbcstw",	e400090, 2, (RIWR, RR),		    rn_rd),
25962 cCE("textrcb",	e130170, 2, (RR, I7),		    iwmmxt_textrc),
25963 cCE("textrch",	e530170, 2, (RR, I7),		    iwmmxt_textrc),
25964 cCE("textrcw",	e930170, 2, (RR, I7),		    iwmmxt_textrc),
25965 cCE("textrmub",e100070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25966 cCE("textrmuh",e500070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25967 cCE("textrmuw",e900070, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25968 cCE("textrmsb",e100078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25969 cCE("textrmsh",e500078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25970 cCE("textrmsw",e900078, 3, (RR, RIWR, I7),	    iwmmxt_textrm),
25971 cCE("tinsrb",	e600010, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25972 cCE("tinsrh",	e600050, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25973 cCE("tinsrw",	e600090, 3, (RIWR, RR, I7),	    iwmmxt_tinsr),
25974 cCE("tmcr",	e000110, 2, (RIWC_RIWG, RR),	    rn_rd),
25975 cCE("tmcrr",	c400000, 3, (RIWR, RR, RR),	    rm_rd_rn),
25976 cCE("tmia",	e200010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25977 cCE("tmiaph",	e280010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25978 cCE("tmiabb",	e2c0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25979 cCE("tmiabt",	e2d0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25980 cCE("tmiatb",	e2e0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25981 cCE("tmiatt",	e2f0010, 3, (RIWR, RR, RR),	    iwmmxt_tmia),
25982 cCE("tmovmskb",e100030, 2, (RR, RIWR),		    rd_rn),
25983 cCE("tmovmskh",e500030, 2, (RR, RIWR),		    rd_rn),
25984 cCE("tmovmskw",e900030, 2, (RR, RIWR),		    rd_rn),
25985 cCE("tmrc",	e100110, 2, (RR, RIWC_RIWG),	    rd_rn),
25986 cCE("tmrrc",	c500000, 3, (RR, RR, RIWR),	    rd_rn_rm),
25987 cCE("torcb",	e13f150, 1, (RR),		    iwmmxt_tandorc),
25988 cCE("torch",	e53f150, 1, (RR),		    iwmmxt_tandorc),
25989 cCE("torcw",	e93f150, 1, (RR),		    iwmmxt_tandorc),
25990 cCE("waccb",	e0001c0, 2, (RIWR, RIWR),	    rd_rn),
25991 cCE("wacch",	e4001c0, 2, (RIWR, RIWR),	    rd_rn),
25992 cCE("waccw",	e8001c0, 2, (RIWR, RIWR),	    rd_rn),
25993 cCE("waddbss",	e300180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25994 cCE("waddb",	e000180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25995 cCE("waddbus",	e100180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25996 cCE("waddhss",	e700180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25997 cCE("waddh",	e400180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25998 cCE("waddhus",	e500180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
25999 cCE("waddwss",	eb00180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26000 cCE("waddw",	e800180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26001 cCE("waddwus",	e900180, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26002 cCE("waligni",	e000020, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_waligni),
26003 cCE("walignr0",e800020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26004 cCE("walignr1",e900020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26005 cCE("walignr2",ea00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26006 cCE("walignr3",eb00020, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26007 cCE("wand",	e200000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26008 cCE("wandn",	e300000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26009 cCE("wavg2b",	e800000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26010 cCE("wavg2br",	e900000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26011 cCE("wavg2h",	ec00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26012 cCE("wavg2hr",	ed00000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26013 cCE("wcmpeqb",	e000060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26014 cCE("wcmpeqh",	e400060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26015 cCE("wcmpeqw",	e800060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26016 cCE("wcmpgtub",e100060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26017 cCE("wcmpgtuh",e500060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26018 cCE("wcmpgtuw",e900060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26019 cCE("wcmpgtsb",e300060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26020 cCE("wcmpgtsh",e700060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26021 cCE("wcmpgtsw",eb00060, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26022 cCE("wldrb",	c100000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26023 cCE("wldrh",	c500000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26024 cCE("wldrw",	c100100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
26025 cCE("wldrd",	c500100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
26026 cCE("wmacs",	e600100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26027 cCE("wmacsz",	e700100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26028 cCE("wmacu",	e400100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26029 cCE("wmacuz",	e500100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26030 cCE("wmadds",	ea00100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26031 cCE("wmaddu",	e800100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26032 cCE("wmaxsb",	e200160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26033 cCE("wmaxsh",	e600160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26034 cCE("wmaxsw",	ea00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26035 cCE("wmaxub",	e000160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26036 cCE("wmaxuh",	e400160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26037 cCE("wmaxuw",	e800160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26038 cCE("wminsb",	e300160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26039 cCE("wminsh",	e700160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26040 cCE("wminsw",	eb00160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26041 cCE("wminub",	e100160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26042 cCE("wminuh",	e500160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26043 cCE("wminuw",	e900160, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26044 cCE("wmov",	e000000, 2, (RIWR, RIWR),	    iwmmxt_wmov),
26045 cCE("wmulsm",	e300100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26046 cCE("wmulsl",	e200100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26047 cCE("wmulum",	e100100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26048 cCE("wmulul",	e000100, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26049 cCE("wor",	e000000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26050 cCE("wpackhss",e700080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26051 cCE("wpackhus",e500080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26052 cCE("wpackwss",eb00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26053 cCE("wpackwus",e900080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26054 cCE("wpackdss",ef00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26055 cCE("wpackdus",ed00080, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26056 cCE("wrorh",	e700040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26057 cCE("wrorhg",	e700148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26058 cCE("wrorw",	eb00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26059 cCE("wrorwg",	eb00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26060 cCE("wrord",	ef00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26061 cCE("wrordg",	ef00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26062 cCE("wsadb",	e000120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26063 cCE("wsadbz",	e100120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26064 cCE("wsadh",	e400120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26065 cCE("wsadhz",	e500120, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26066 cCE("wshufh",	e0001e0, 3, (RIWR, RIWR, I255),	    iwmmxt_wshufh),
26067 cCE("wsllh",	e500040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26068 cCE("wsllhg",	e500148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26069 cCE("wsllw",	e900040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26070 cCE("wsllwg",	e900148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26071 cCE("wslld",	ed00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26072 cCE("wslldg",	ed00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26073 cCE("wsrah",	e400040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26074 cCE("wsrahg",	e400148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26075 cCE("wsraw",	e800040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26076 cCE("wsrawg",	e800148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26077 cCE("wsrad",	ec00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26078 cCE("wsradg",	ec00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26079 cCE("wsrlh",	e600040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26080 cCE("wsrlhg",	e600148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26081 cCE("wsrlw",	ea00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26082 cCE("wsrlwg",	ea00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26083 cCE("wsrld",	ee00040, 3, (RIWR, RIWR, RIWR_I32z),iwmmxt_wrwrwr_or_imm5),
26084 cCE("wsrldg",	ee00148, 3, (RIWR, RIWR, RIWG),	    rd_rn_rm),
26085 cCE("wstrb",	c000000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26086 cCE("wstrh",	c400000, 2, (RIWR, ADDR),	    iwmmxt_wldstbh),
26087 cCE("wstrw",	c000100, 2, (RIWR_RIWC, ADDR),	    iwmmxt_wldstw),
26088 cCE("wstrd",	c400100, 2, (RIWR, ADDR),	    iwmmxt_wldstd),
26089 cCE("wsubbss",	e3001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26090 cCE("wsubb",	e0001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26091 cCE("wsubbus",	e1001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26092 cCE("wsubhss",	e7001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26093 cCE("wsubh",	e4001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26094 cCE("wsubhus",	e5001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26095 cCE("wsubwss",	eb001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26096 cCE("wsubw",	e8001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26097 cCE("wsubwus",	e9001a0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26098 cCE("wunpckehub",e0000c0, 2, (RIWR, RIWR),	    rd_rn),
26099 cCE("wunpckehuh",e4000c0, 2, (RIWR, RIWR),	    rd_rn),
26100 cCE("wunpckehuw",e8000c0, 2, (RIWR, RIWR),	    rd_rn),
26101 cCE("wunpckehsb",e2000c0, 2, (RIWR, RIWR),	    rd_rn),
26102 cCE("wunpckehsh",e6000c0, 2, (RIWR, RIWR),	    rd_rn),
26103 cCE("wunpckehsw",ea000c0, 2, (RIWR, RIWR),	    rd_rn),
26104 cCE("wunpckihb", e1000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26105 cCE("wunpckihh", e5000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26106 cCE("wunpckihw", e9000c0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26107 cCE("wunpckelub",e0000e0, 2, (RIWR, RIWR),	    rd_rn),
26108 cCE("wunpckeluh",e4000e0, 2, (RIWR, RIWR),	    rd_rn),
26109 cCE("wunpckeluw",e8000e0, 2, (RIWR, RIWR),	    rd_rn),
26110 cCE("wunpckelsb",e2000e0, 2, (RIWR, RIWR),	    rd_rn),
26111 cCE("wunpckelsh",e6000e0, 2, (RIWR, RIWR),	    rd_rn),
26112 cCE("wunpckelsw",ea000e0, 2, (RIWR, RIWR),	    rd_rn),
26113 cCE("wunpckilb", e1000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26114 cCE("wunpckilh", e5000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26115 cCE("wunpckilw", e9000e0, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26116 cCE("wxor",	e100000, 3, (RIWR, RIWR, RIWR),	    rd_rn_rm),
26117 cCE("wzero",	e300000, 1, (RIWR),		    iwmmxt_wzero),
26118
26119#undef  ARM_VARIANT
26120#define ARM_VARIANT  & arm_cext_iwmmxt2 /* Intel Wireless MMX technology, version 2.  */
26121
26122 cCE("torvscb",   e12f190, 1, (RR),		    iwmmxt_tandorc),
26123 cCE("torvsch",   e52f190, 1, (RR),		    iwmmxt_tandorc),
26124 cCE("torvscw",   e92f190, 1, (RR),		    iwmmxt_tandorc),
26125 cCE("wabsb",     e2001c0, 2, (RIWR, RIWR),           rd_rn),
26126 cCE("wabsh",     e6001c0, 2, (RIWR, RIWR),           rd_rn),
26127 cCE("wabsw",     ea001c0, 2, (RIWR, RIWR),           rd_rn),
26128 cCE("wabsdiffb", e1001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26129 cCE("wabsdiffh", e5001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26130 cCE("wabsdiffw", e9001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26131 cCE("waddbhusl", e2001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26132 cCE("waddbhusm", e6001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26133 cCE("waddhc",    e600180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26134 cCE("waddwc",    ea00180, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26135 cCE("waddsubhx", ea001a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26136 cCE("wavg4",	e400000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26137 cCE("wavg4r",    e500000, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26138 cCE("wmaddsn",   ee00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26139 cCE("wmaddsx",   eb00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26140 cCE("wmaddun",   ec00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26141 cCE("wmaddux",   e900100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26142 cCE("wmerge",    e000080, 4, (RIWR, RIWR, RIWR, I7), iwmmxt_wmerge),
26143 cCE("wmiabb",    e0000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26144 cCE("wmiabt",    e1000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26145 cCE("wmiatb",    e2000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26146 cCE("wmiatt",    e3000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26147 cCE("wmiabbn",   e4000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26148 cCE("wmiabtn",   e5000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26149 cCE("wmiatbn",   e6000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26150 cCE("wmiattn",   e7000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26151 cCE("wmiawbb",   e800120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26152 cCE("wmiawbt",   e900120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26153 cCE("wmiawtb",   ea00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26154 cCE("wmiawtt",   eb00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26155 cCE("wmiawbbn",  ec00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26156 cCE("wmiawbtn",  ed00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26157 cCE("wmiawtbn",  ee00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26158 cCE("wmiawttn",  ef00120, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26159 cCE("wmulsmr",   ef00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26160 cCE("wmulumr",   ed00100, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26161 cCE("wmulwumr",  ec000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26162 cCE("wmulwsmr",  ee000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26163 cCE("wmulwum",   ed000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26164 cCE("wmulwsm",   ef000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26165 cCE("wmulwl",    eb000c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26166 cCE("wqmiabb",   e8000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26167 cCE("wqmiabt",   e9000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26168 cCE("wqmiatb",   ea000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26169 cCE("wqmiatt",   eb000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26170 cCE("wqmiabbn",  ec000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26171 cCE("wqmiabtn",  ed000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26172 cCE("wqmiatbn",  ee000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26173 cCE("wqmiattn",  ef000a0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26174 cCE("wqmulm",    e100080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26175 cCE("wqmulmr",   e300080, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26176 cCE("wqmulwm",   ec000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26177 cCE("wqmulwmr",  ee000e0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26178 cCE("wsubaddhx", ed001c0, 3, (RIWR, RIWR, RIWR),     rd_rn_rm),
26179
26180#undef  ARM_VARIANT
26181#define ARM_VARIANT  & arm_cext_maverick /* Cirrus Maverick instructions.  */
26182
26183 cCE("cfldrs",	c100400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
26184 cCE("cfldrd",	c500400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
26185 cCE("cfldr32",	c100500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
26186 cCE("cfldr64",	c500500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
26187 cCE("cfstrs",	c000400, 2, (RMF, ADDRGLDC),	      rd_cpaddr),
26188 cCE("cfstrd",	c400400, 2, (RMD, ADDRGLDC),	      rd_cpaddr),
26189 cCE("cfstr32",	c000500, 2, (RMFX, ADDRGLDC),	      rd_cpaddr),
26190 cCE("cfstr64",	c400500, 2, (RMDX, ADDRGLDC),	      rd_cpaddr),
26191 cCE("cfmvsr",	e000450, 2, (RMF, RR),		      rn_rd),
26192 cCE("cfmvrs",	e100450, 2, (RR, RMF),		      rd_rn),
26193 cCE("cfmvdlr",	e000410, 2, (RMD, RR),		      rn_rd),
26194 cCE("cfmvrdl",	e100410, 2, (RR, RMD),		      rd_rn),
26195 cCE("cfmvdhr",	e000430, 2, (RMD, RR),		      rn_rd),
26196 cCE("cfmvrdh",	e100430, 2, (RR, RMD),		      rd_rn),
26197 cCE("cfmv64lr",e000510, 2, (RMDX, RR),		      rn_rd),
26198 cCE("cfmvr64l",e100510, 2, (RR, RMDX),		      rd_rn),
26199 cCE("cfmv64hr",e000530, 2, (RMDX, RR),		      rn_rd),
26200 cCE("cfmvr64h",e100530, 2, (RR, RMDX),		      rd_rn),
26201 cCE("cfmval32",e200440, 2, (RMAX, RMFX),	      rd_rn),
26202 cCE("cfmv32al",e100440, 2, (RMFX, RMAX),	      rd_rn),
26203 cCE("cfmvam32",e200460, 2, (RMAX, RMFX),	      rd_rn),
26204 cCE("cfmv32am",e100460, 2, (RMFX, RMAX),	      rd_rn),
26205 cCE("cfmvah32",e200480, 2, (RMAX, RMFX),	      rd_rn),
26206 cCE("cfmv32ah",e100480, 2, (RMFX, RMAX),	      rd_rn),
26207 cCE("cfmva32",	e2004a0, 2, (RMAX, RMFX),	      rd_rn),
26208 cCE("cfmv32a",	e1004a0, 2, (RMFX, RMAX),	      rd_rn),
26209 cCE("cfmva64",	e2004c0, 2, (RMAX, RMDX),	      rd_rn),
26210 cCE("cfmv64a",	e1004c0, 2, (RMDX, RMAX),	      rd_rn),
26211 cCE("cfmvsc32",e2004e0, 2, (RMDS, RMDX),	      mav_dspsc),
26212 cCE("cfmv32sc",e1004e0, 2, (RMDX, RMDS),	      rd),
26213 cCE("cfcpys",	e000400, 2, (RMF, RMF),		      rd_rn),
26214 cCE("cfcpyd",	e000420, 2, (RMD, RMD),		      rd_rn),
26215 cCE("cfcvtsd",	e000460, 2, (RMD, RMF),		      rd_rn),
26216 cCE("cfcvtds",	e000440, 2, (RMF, RMD),		      rd_rn),
26217 cCE("cfcvt32s",e000480, 2, (RMF, RMFX),	      rd_rn),
26218 cCE("cfcvt32d",e0004a0, 2, (RMD, RMFX),	      rd_rn),
26219 cCE("cfcvt64s",e0004c0, 2, (RMF, RMDX),	      rd_rn),
26220 cCE("cfcvt64d",e0004e0, 2, (RMD, RMDX),	      rd_rn),
26221 cCE("cfcvts32",e100580, 2, (RMFX, RMF),	      rd_rn),
26222 cCE("cfcvtd32",e1005a0, 2, (RMFX, RMD),	      rd_rn),
26223 cCE("cftruncs32",e1005c0, 2, (RMFX, RMF),	      rd_rn),
26224 cCE("cftruncd32",e1005e0, 2, (RMFX, RMD),	      rd_rn),
26225 cCE("cfrshl32",e000550, 3, (RMFX, RMFX, RR),	      mav_triple),
26226 cCE("cfrshl64",e000570, 3, (RMDX, RMDX, RR),	      mav_triple),
26227 cCE("cfsh32",	e000500, 3, (RMFX, RMFX, I63s),	      mav_shift),
26228 cCE("cfsh64",	e200500, 3, (RMDX, RMDX, I63s),	      mav_shift),
26229 cCE("cfcmps",	e100490, 3, (RR, RMF, RMF),	      rd_rn_rm),
26230 cCE("cfcmpd",	e1004b0, 3, (RR, RMD, RMD),	      rd_rn_rm),
26231 cCE("cfcmp32",	e100590, 3, (RR, RMFX, RMFX),	      rd_rn_rm),
26232 cCE("cfcmp64",	e1005b0, 3, (RR, RMDX, RMDX),	      rd_rn_rm),
26233 cCE("cfabss",	e300400, 2, (RMF, RMF),		      rd_rn),
26234 cCE("cfabsd",	e300420, 2, (RMD, RMD),		      rd_rn),
26235 cCE("cfnegs",	e300440, 2, (RMF, RMF),		      rd_rn),
26236 cCE("cfnegd",	e300460, 2, (RMD, RMD),		      rd_rn),
26237 cCE("cfadds",	e300480, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26238 cCE("cfaddd",	e3004a0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26239 cCE("cfsubs",	e3004c0, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26240 cCE("cfsubd",	e3004e0, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26241 cCE("cfmuls",	e100400, 3, (RMF, RMF, RMF),	      rd_rn_rm),
26242 cCE("cfmuld",	e100420, 3, (RMD, RMD, RMD),	      rd_rn_rm),
26243 cCE("cfabs32",	e300500, 2, (RMFX, RMFX),	      rd_rn),
26244 cCE("cfabs64",	e300520, 2, (RMDX, RMDX),	      rd_rn),
26245 cCE("cfneg32",	e300540, 2, (RMFX, RMFX),	      rd_rn),
26246 cCE("cfneg64",	e300560, 2, (RMDX, RMDX),	      rd_rn),
26247 cCE("cfadd32",	e300580, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26248 cCE("cfadd64",	e3005a0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26249 cCE("cfsub32",	e3005c0, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26250 cCE("cfsub64",	e3005e0, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26251 cCE("cfmul32",	e100500, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26252 cCE("cfmul64",	e100520, 3, (RMDX, RMDX, RMDX),	      rd_rn_rm),
26253 cCE("cfmac32",	e100540, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26254 cCE("cfmsc32",	e100560, 3, (RMFX, RMFX, RMFX),	      rd_rn_rm),
26255 cCE("cfmadd32",e000600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26256 cCE("cfmsub32",e100600, 4, (RMAX, RMFX, RMFX, RMFX), mav_quad),
26257 cCE("cfmadda32", e200600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26258 cCE("cfmsuba32", e300600, 4, (RMAX, RMAX, RMFX, RMFX), mav_quad),
26259
26260 /* ARMv8.5-A instructions.  */
26261#undef  ARM_VARIANT
26262#define ARM_VARIANT   & arm_ext_sb
26263#undef  THUMB_VARIANT
26264#define THUMB_VARIANT & arm_ext_sb
26265 TUF("sb", 57ff070, f3bf8f70, 0, (), noargs, noargs),
26266
26267#undef  ARM_VARIANT
26268#define ARM_VARIANT   & arm_ext_predres
26269#undef  THUMB_VARIANT
26270#define THUMB_VARIANT & arm_ext_predres
26271 CE("cfprctx", e070f93, 1, (RRnpc), rd),
26272 CE("dvprctx", e070fb3, 1, (RRnpc), rd),
26273 CE("cpprctx", e070ff3, 1, (RRnpc), rd),
26274
26275 /* ARMv8-M instructions.  */
26276#undef  ARM_VARIANT
26277#define ARM_VARIANT NULL
26278#undef  THUMB_VARIANT
26279#define THUMB_VARIANT & arm_ext_v8m
26280 ToU("sg",    e97fe97f,	0, (),		   noargs),
26281 ToC("blxns", 4784,	1, (RRnpc),	   t_blx),
26282 ToC("bxns",  4704,	1, (RRnpc),	   t_bx),
26283 ToC("tt",    e840f000,	2, (RRnpc, RRnpc), tt),
26284 ToC("ttt",   e840f040,	2, (RRnpc, RRnpc), tt),
26285 ToC("tta",   e840f080,	2, (RRnpc, RRnpc), tt),
26286 ToC("ttat",  e840f0c0,	2, (RRnpc, RRnpc), tt),
26287
26288 /* FP for ARMv8-M Mainline.  Enabled for ARMv8-M Mainline because the
26289    instructions behave as nop if no VFP is present.  */
26290#undef  THUMB_VARIANT
26291#define THUMB_VARIANT & arm_ext_v8m_main
26292 ToC("vlldm", ec300a00, 1, (RRnpc), rn),
26293 ToC("vlstm", ec200a00, 1, (RRnpc), rn),
26294
26295 /* Armv8.1-M Mainline instructions.  */
26296#undef  THUMB_VARIANT
26297#define THUMB_VARIANT & arm_ext_v8_1m_main
26298 toU("cinc",  _cinc,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26299 toU("cinv",  _cinv,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26300 toU("cneg",  _cneg,  3, (RRnpcsp, RR_ZR, COND),	t_cond),
26301 toU("csel",  _csel,  4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26302 toU("csetm", _csetm, 2, (RRnpcsp, COND),		t_cond),
26303 toU("cset",  _cset,  2, (RRnpcsp, COND),		t_cond),
26304 toU("csinc", _csinc, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26305 toU("csinv", _csinv, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26306 toU("csneg", _csneg, 4, (RRnpcsp, RR_ZR, RR_ZR, COND),	t_cond),
26307
26308 toC("bf",     _bf,	2, (EXPs, EXPs),	     t_branch_future),
26309 toU("bfcsel", _bfcsel,	4, (EXPs, EXPs, EXPs, COND), t_branch_future),
26310 toC("bfx",    _bfx,	2, (EXPs, RRnpcsp),	     t_branch_future),
26311 toC("bfl",    _bfl,	2, (EXPs, EXPs),	     t_branch_future),
26312 toC("bflx",   _bflx,	2, (EXPs, RRnpcsp),	     t_branch_future),
26313
26314 toU("dls", _dls, 2, (LR, RRnpcsp),	 t_loloop),
26315 toU("wls", _wls, 3, (LR, RRnpcsp, EXP), t_loloop),
26316 toU("le",  _le,  2, (oLR, EXP),	 t_loloop),
26317
26318 ToC("clrm",	e89f0000, 1, (CLRMLST),  t_clrm),
26319 ToC("vscclrm",	ec9f0a00, 1, (VRSDVLST), t_vscclrm),
26320
26321#undef  THUMB_VARIANT
26322#define THUMB_VARIANT & mve_ext
26323 ToC("lsll",	ea50010d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26324 ToC("lsrl",	ea50011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26325 ToC("asrl",	ea50012d, 3, (RRe, RRo, RRnpcsp_I32), mve_scalar_shift),
26326 ToC("uqrshll",	ea51010d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26327 ToC("sqrshrl",	ea51012d, 4, (RRe, RRo, I48_I64, RRnpcsp), mve_scalar_shift1),
26328 ToC("uqshll",	ea51010f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26329 ToC("urshrl",	ea51011f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26330 ToC("srshrl",	ea51012f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26331 ToC("sqshll",	ea51013f, 3, (RRe, RRo, I32),	      mve_scalar_shift),
26332 ToC("uqrshl",	ea500f0d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
26333 ToC("sqrshr",	ea500f2d, 2, (RRnpcsp, RRnpcsp),      mve_scalar_shift),
26334 ToC("uqshl",	ea500f0f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26335 ToC("urshr",	ea500f1f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26336 ToC("srshr",	ea500f2f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26337 ToC("sqshl",	ea500f3f, 2, (RRnpcsp, I32),	      mve_scalar_shift),
26338
26339 ToC("vpt",	ee410f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26340 ToC("vptt",	ee018f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26341 ToC("vpte",	ee418f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26342 ToC("vpttt",	ee014f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26343 ToC("vptte",	ee01cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26344 ToC("vptet",	ee41cf00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26345 ToC("vptee",	ee414f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26346 ToC("vptttt",	ee012f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26347 ToC("vpttte",	ee016f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26348 ToC("vpttet",	ee01ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26349 ToC("vpttee",	ee01af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26350 ToC("vptett",	ee41af00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26351 ToC("vptete",	ee41ef00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26352 ToC("vpteet",	ee416f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26353 ToC("vpteee",	ee412f00, 3, (COND, RMQ, RMQRZ), mve_vpt),
26354
26355 ToC("vpst",	fe710f4d, 0, (), mve_vpt),
26356 ToC("vpstt",	fe318f4d, 0, (), mve_vpt),
26357 ToC("vpste",	fe718f4d, 0, (), mve_vpt),
26358 ToC("vpsttt",	fe314f4d, 0, (), mve_vpt),
26359 ToC("vpstte",	fe31cf4d, 0, (), mve_vpt),
26360 ToC("vpstet",	fe71cf4d, 0, (), mve_vpt),
26361 ToC("vpstee",	fe714f4d, 0, (), mve_vpt),
26362 ToC("vpstttt",	fe312f4d, 0, (), mve_vpt),
26363 ToC("vpsttte", fe316f4d, 0, (), mve_vpt),
26364 ToC("vpsttet",	fe31ef4d, 0, (), mve_vpt),
26365 ToC("vpsttee",	fe31af4d, 0, (), mve_vpt),
26366 ToC("vpstett",	fe71af4d, 0, (), mve_vpt),
26367 ToC("vpstete",	fe71ef4d, 0, (), mve_vpt),
26368 ToC("vpsteet",	fe716f4d, 0, (), mve_vpt),
26369 ToC("vpsteee",	fe712f4d, 0, (), mve_vpt),
26370
26371 /* MVE and MVE FP only.  */
26372 mToC("vhcadd",	ee000f00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vhcadd),
26373 mCEF(vctp,	_vctp,      1, (RRnpc),				  mve_vctp),
26374 mCEF(vadc,	_vadc,      3, (RMQ, RMQ, RMQ),			  mve_vadc),
26375 mCEF(vadci,	_vadci,     3, (RMQ, RMQ, RMQ),			  mve_vadc),
26376 mToC("vsbc",	fe300f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
26377 mToC("vsbci",	fe301f00,   3, (RMQ, RMQ, RMQ),			  mve_vsbc),
26378 mCEF(vmullb,	_vmullb,    3, (RMQ, RMQ, RMQ),			  mve_vmull),
26379 mCEF(vabav,	_vabav,	    3, (RRnpcsp, RMQ, RMQ),		  mve_vabav),
26380 mCEF(vmladav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26381 mCEF(vmladava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26382 mCEF(vmladavx,	  _vmladavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26383 mCEF(vmladavax,  _vmladavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26384 mCEF(vmlav,	  _vmladav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26385 mCEF(vmlava,	  _vmladava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26386 mCEF(vmlsdav,	  _vmlsdav,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26387 mCEF(vmlsdava,	  _vmlsdava,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26388 mCEF(vmlsdavx,	  _vmlsdavx,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26389 mCEF(vmlsdavax,  _vmlsdavax,	3, (RRe, RMQ, RMQ),		mve_vmladav),
26390
26391 mCEF(vst20,	_vst20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26392 mCEF(vst21,	_vst21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26393 mCEF(vst40,	_vst40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26394 mCEF(vst41,	_vst41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26395 mCEF(vst42,	_vst42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26396 mCEF(vst43,	_vst43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26397 mCEF(vld20,	_vld20,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26398 mCEF(vld21,	_vld21,	    2, (MSTRLST2, ADDRMVE),		mve_vst_vld),
26399 mCEF(vld40,	_vld40,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26400 mCEF(vld41,	_vld41,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26401 mCEF(vld42,	_vld42,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26402 mCEF(vld43,	_vld43,	    2, (MSTRLST4, ADDRMVE),		mve_vst_vld),
26403 mCEF(vstrb,	_vstrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26404 mCEF(vstrh,	_vstrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26405 mCEF(vstrw,	_vstrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26406 mCEF(vstrd,	_vstrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26407 mCEF(vldrb,	_vldrb,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26408 mCEF(vldrh,	_vldrh,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26409 mCEF(vldrw,	_vldrw,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26410 mCEF(vldrd,	_vldrd,	    2, (RMQ, ADDRMVE),			mve_vstr_vldr),
26411
26412 mCEF(vmovnt,	_vmovnt,    2, (RMQ, RMQ),			  mve_movn),
26413 mCEF(vmovnb,	_vmovnb,    2, (RMQ, RMQ),			  mve_movn),
26414 mCEF(vbrsr,	_vbrsr,     3, (RMQ, RMQ, RR),			  mve_vbrsr),
26415 mCEF(vaddlv,	_vaddlv,    3, (RRe, RRo, RMQ),			  mve_vaddlv),
26416 mCEF(vaddlva,	_vaddlva,   3, (RRe, RRo, RMQ),			  mve_vaddlv),
26417 mCEF(vaddv,	_vaddv,	    2, (RRe, RMQ),			  mve_vaddv),
26418 mCEF(vaddva,	_vaddva,    2, (RRe, RMQ),			  mve_vaddv),
26419 mCEF(vddup,	_vddup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
26420 mCEF(vdwdup,	_vdwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
26421 mCEF(vidup,	_vidup,	    3, (RMQ, RRe, EXPi),		  mve_viddup),
26422 mCEF(viwdup,	_viwdup,    4, (RMQ, RRe, RR, EXPi),		  mve_viddup),
26423 mToC("vmaxa",	ee330e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
26424 mToC("vmina",	ee331e81,   2, (RMQ, RMQ),			  mve_vmaxa_vmina),
26425 mCEF(vmaxv,	_vmaxv,	  2, (RR, RMQ),				  mve_vmaxv),
26426 mCEF(vmaxav,	_vmaxav,  2, (RR, RMQ),				  mve_vmaxv),
26427 mCEF(vminv,	_vminv,	  2, (RR, RMQ),				  mve_vmaxv),
26428 mCEF(vminav,	_vminav,  2, (RR, RMQ),				  mve_vmaxv),
26429
26430 mCEF(vmlaldav,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26431 mCEF(vmlaldava,  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26432 mCEF(vmlaldavx,  _vmlaldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26433 mCEF(vmlaldavax, _vmlaldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26434 mCEF(vmlalv,	  _vmlaldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26435 mCEF(vmlalva,	  _vmlaldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26436 mCEF(vmlsldav,	  _vmlsldav,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26437 mCEF(vmlsldava,  _vmlsldava,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26438 mCEF(vmlsldavx,  _vmlsldavx,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26439 mCEF(vmlsldavax, _vmlsldavax,	4, (RRe, RRo, RMQ, RMQ),	mve_vmlaldav),
26440 mToC("vrmlaldavh", ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26441 mToC("vrmlaldavha",ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26442 mCEF(vrmlaldavhx,  _vrmlaldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26443 mCEF(vrmlaldavhax, _vrmlaldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26444 mToC("vrmlalvh",   ee800f00,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26445 mToC("vrmlalvha",  ee800f20,	   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26446 mCEF(vrmlsldavh,   _vrmlsldavh,   4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26447 mCEF(vrmlsldavha,  _vrmlsldavha,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26448 mCEF(vrmlsldavhx,  _vrmlsldavhx,  4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26449 mCEF(vrmlsldavhax, _vrmlsldavhax, 4, (RRe, RR, RMQ, RMQ),  mve_vrmlaldavh),
26450
26451 mToC("vmlas",	  ee011e40,	3, (RMQ, RMQ, RR),		mve_vmlas),
26452 mToC("vmulh",	  ee010e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
26453 mToC("vrmulh",	  ee011e01,	3, (RMQ, RMQ, RMQ),		mve_vmulh),
26454 mToC("vpnot",	  fe310f4d,	0, (),				mve_vpnot),
26455 mToC("vpsel",	  fe310f01,	3, (RMQ, RMQ, RMQ),		mve_vpsel),
26456
26457 mToC("vqdmladh",  ee000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26458 mToC("vqdmladhx", ee001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26459 mToC("vqrdmladh", ee000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26460 mToC("vqrdmladhx",ee001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26461 mToC("vqdmlsdh",  fe000e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26462 mToC("vqdmlsdhx", fe001e00,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26463 mToC("vqrdmlsdh", fe000e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26464 mToC("vqrdmlsdhx",fe001e01,	3, (RMQ, RMQ, RMQ),		mve_vqdmladh),
26465 mToC("vqdmlah",   ee000e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26466 mToC("vqdmlash",  ee001e60,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26467 mToC("vqrdmlash", ee001e40,	3, (RMQ, RMQ, RR),		mve_vqdmlah),
26468 mToC("vqdmullt",  ee301f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
26469 mToC("vqdmullb",  ee300f00,	3, (RMQ, RMQ, RMQRR),		mve_vqdmull),
26470 mCEF(vqmovnt,	  _vqmovnt,	2, (RMQ, RMQ),			mve_vqmovn),
26471 mCEF(vqmovnb,	  _vqmovnb,	2, (RMQ, RMQ),			mve_vqmovn),
26472 mCEF(vqmovunt,	  _vqmovunt,	2, (RMQ, RMQ),			mve_vqmovn),
26473 mCEF(vqmovunb,	  _vqmovunb,	2, (RMQ, RMQ),			mve_vqmovn),
26474
26475 mCEF(vshrnt,	  _vshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26476 mCEF(vshrnb,	  _vshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26477 mCEF(vrshrnt,	  _vrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26478 mCEF(vrshrnb,	  _vrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26479 mCEF(vqshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26480 mCEF(vqshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26481 mCEF(vqshrunt,	  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26482 mCEF(vqshrunb,	  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26483 mCEF(vqrshrnt,	  _vqrshrnt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26484 mCEF(vqrshrnb,	  _vqrshrnb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26485 mCEF(vqrshrunt,  _vqrshrunt,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26486 mCEF(vqrshrunb,  _vqrshrunb,	3, (RMQ, RMQ, I32z),	mve_vshrn),
26487
26488 mToC("vshlc",	    eea00fc0,	   3, (RMQ, RR, I32z),	    mve_vshlc),
26489 mToC("vshllt",	    ee201e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
26490 mToC("vshllb",	    ee200e00,	   3, (RMQ, RMQ, I32),	    mve_vshll),
26491
26492 toU("dlstp",	_dlstp, 2, (LR, RR),      t_loloop),
26493 toU("wlstp",	_wlstp, 3, (LR, RR, EXP), t_loloop),
26494 toU("letp",	_letp,  2, (LR, EXP),	  t_loloop),
26495 toU("lctp",	_lctp,  0, (),		  t_loloop),
26496
26497#undef THUMB_VARIANT
26498#define THUMB_VARIANT & mve_fp_ext
26499 mToC("vcmul", ee300e00,   4, (RMQ, RMQ, RMQ, EXPi),		  mve_vcmul),
26500 mToC("vfmas", ee311e40,   3, (RMQ, RMQ, RR),			  mve_vfmas),
26501 mToC("vmaxnma", ee3f0e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
26502 mToC("vminnma", ee3f1e81, 2, (RMQ, RMQ),			  mve_vmaxnma_vminnma),
26503 mToC("vmaxnmv", eeee0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
26504 mToC("vmaxnmav",eeec0f00, 2, (RR, RMQ),			  mve_vmaxnmv),
26505 mToC("vminnmv", eeee0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
26506 mToC("vminnmav",eeec0f80, 2, (RR, RMQ),			  mve_vmaxnmv),
26507
26508#undef  ARM_VARIANT
26509#define ARM_VARIANT  & fpu_vfp_ext_v1
26510#undef  THUMB_VARIANT
26511#define THUMB_VARIANT  & arm_ext_v6t2
26512 mnCEF(vmla,     _vmla,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mac_maybe_scalar),
26513 mnCEF(vmul,     _vmul,    3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ_RR), neon_mul),
26514
26515 mcCE(fcpyd,	eb00b40, 2, (RVD, RVD),	      vfp_dp_rd_rm),
26516
26517#undef  ARM_VARIANT
26518#define ARM_VARIANT  & fpu_vfp_ext_v1xd
26519
26520 MNCE(vmov,   0,	1, (VMOV),	      neon_mov),
26521 mcCE(fmrs,	e100a10, 2, (RR, RVS),	      vfp_reg_from_sp),
26522 mcCE(fmsr,	e000a10, 2, (RVS, RR),	      vfp_sp_from_reg),
26523 mcCE(fcpys,	eb00a40, 2, (RVS, RVS),	      vfp_sp_monadic),
26524
26525 mCEF(vmullt, _vmullt,	3, (RNSDQMQ, oRNSDQMQ, RNSDQ_RNSC_MQ),	mve_vmull),
26526 mnCEF(vadd,  _vadd,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26527 mnCEF(vsub,  _vsub,	3, (RNSDQMQ, oRNSDQMQ, RNSDQMQR),	neon_addsub_if_i),
26528
26529 MNCEF(vabs,  1b10300,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26530 MNCEF(vneg,  1b10380,	2, (RNSDQMQ, RNSDQMQ),	neon_abs_neg),
26531
26532 mCEF(vmovlt, _vmovlt,	1, (VMOV),		mve_movl),
26533 mCEF(vmovlb, _vmovlb,	1, (VMOV),		mve_movl),
26534
26535 mnCE(vcmp,      _vcmp,    3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26536 mnCE(vcmpe,     _vcmpe,   3, (RVSD_COND, RSVDMQ_FI0, oRMQRZ),    vfp_nsyn_cmp),
26537
26538#undef  ARM_VARIANT
26539#define ARM_VARIANT  & fpu_vfp_ext_v2
26540
26541 mcCE(fmsrr,	c400a10, 3, (VRSLST, RR, RR), vfp_sp2_from_reg2),
26542 mcCE(fmrrs,	c500a10, 3, (RR, RR, VRSLST), vfp_reg2_from_sp2),
26543 mcCE(fmdrr,	c400b10, 3, (RVD, RR, RR),    vfp_dp_rm_rd_rn),
26544 mcCE(fmrrd,	c500b10, 3, (RR, RR, RVD),    vfp_dp_rd_rn_rm),
26545
26546#undef  ARM_VARIANT
26547#define ARM_VARIANT    & fpu_vfp_ext_armv8xd
26548 mnUF(vcvta,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvta),
26549 mnUF(vcvtp,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtp),
26550 mnUF(vcvtn,  _vcvta,  3, (RNSDQMQ, oRNSDQMQ, oI32z),	neon_cvtn),
26551 mnUF(vcvtm,  _vcvta,  2, (RNSDQMQ, oRNSDQMQ),		neon_cvtm),
26552 mnUF(vmaxnm, _vmaxnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26553 mnUF(vminnm, _vminnm, 3, (RNSDQMQ, oRNSDQMQ, RNSDQMQ),	vmaxnm),
26554
26555#undef	ARM_VARIANT
26556#define ARM_VARIANT & fpu_neon_ext_v1
26557 mnUF(vabd,      _vabd,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26558 mnUF(vabdl,     _vabdl,	  3, (RNQMQ, RNDMQ, RNDMQ),   neon_dyadic_long),
26559 mnUF(vaddl,     _vaddl,	  3, (RNSDQMQ, oRNSDMQ, RNSDMQR),  neon_dyadic_long),
26560 mnUF(vsubl,     _vsubl,	  3, (RNSDQMQ, oRNSDMQ, RNSDMQR),  neon_dyadic_long),
26561 mnUF(vand,      _vand,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26562 mnUF(vbic,      _vbic,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26563 mnUF(vorr,      _vorr,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26564 mnUF(vorn,      _vorn,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ_Ibig), neon_logic),
26565 mnUF(veor,      _veor,		  3, (RNDQMQ, oRNDQMQ, RNDQMQ),      neon_logic),
26566 MNUF(vcls,      1b00400,	  2, (RNDQMQ, RNDQMQ),		     neon_cls),
26567 MNUF(vclz,      1b00480,	  2, (RNDQMQ, RNDQMQ),		     neon_clz),
26568 mnCE(vdup,      _vdup,		  2, (RNDQMQ, RR_RNSC),		     neon_dup),
26569 MNUF(vhadd,     00000000,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26570 MNUF(vrhadd,    00000100,	  3, (RNDQMQ, oRNDQMQ, RNDQMQ),	  neon_dyadic_i_su),
26571 MNUF(vhsub,     00000200,	  3, (RNDQMQ, oRNDQMQ, RNDQMQR),  neon_dyadic_i_su),
26572 mnUF(vmin,      _vmin,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26573 mnUF(vmax,      _vmax,    3, (RNDQMQ, oRNDQMQ, RNDQMQ), neon_dyadic_if_su),
26574 MNUF(vqadd,     0000010,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26575 MNUF(vqsub,     0000210,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_dyadic_i64_su),
26576 mnUF(vmvn,      _vmvn,    2, (RNDQMQ, RNDQMQ_Ibig), neon_mvn),
26577 MNUF(vqabs,     1b00700,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26578 MNUF(vqneg,     1b00780,  2, (RNDQMQ, RNDQMQ),     neon_sat_abs_neg),
26579 mnUF(vqrdmlah,  _vqrdmlah,3, (RNDQMQ, oRNDQMQ, RNDQ_RNSC_RR), neon_qrdmlah),
26580 mnUF(vqdmulh,   _vqdmulh, 3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26581 mnUF(vqrdmulh,  _vqrdmulh,3, (RNDQMQ, oRNDQMQ, RNDQMQ_RNSC_RR), neon_qdmulh),
26582 MNUF(vqrshl,    0000510,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26583 MNUF(vrshl,     0000500,  3, (RNDQMQ, oRNDQMQ, RNDQMQR), neon_rshl),
26584 MNUF(vshr,      0800010,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26585 MNUF(vrshr,     0800210,  3, (RNDQMQ, oRNDQMQ, I64z), neon_rshift_round_imm),
26586 MNUF(vsli,      1800510,  3, (RNDQMQ, oRNDQMQ, I63),  neon_sli),
26587 MNUF(vsri,      1800410,  3, (RNDQMQ, oRNDQMQ, I64z), neon_sri),
26588 MNUF(vrev64,    1b00000,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26589 MNUF(vrev32,    1b00080,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26590 MNUF(vrev16,    1b00100,  2, (RNDQMQ, RNDQMQ),     neon_rev),
26591 mnUF(vshl,	 _vshl,    3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_shl),
26592 mnUF(vqshl,     _vqshl,   3, (RNDQMQ, oRNDQMQ, RNDQMQ_I63b_RR), neon_qshl),
26593 MNUF(vqshlu,    1800610,  3, (RNDQMQ, oRNDQMQ, I63),		 neon_qshlu_imm),
26594
26595#undef	ARM_VARIANT
26596#define ARM_VARIANT & arm_ext_v8_3
26597#undef	THUMB_VARIANT
26598#define	THUMB_VARIANT & arm_ext_v6t2_v8m
26599 MNUF (vcadd, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ, EXPi), vcadd),
26600 MNUF (vcmla, 0, 4, (RNDQMQ, RNDQMQ, RNDQMQ_RNSC, EXPi), vcmla),
26601
26602#undef	ARM_VARIANT
26603#define ARM_VARIANT &arm_ext_bf16
26604#undef	THUMB_VARIANT
26605#define	THUMB_VARIANT &arm_ext_bf16
26606 TUF ("vdot", c000d00, fc000d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vdot, vdot),
26607 TUF ("vmmla", c000c40, fc000c40, 3, (RNQ, RNQ, RNQ), vmmla, vmmla),
26608 TUF ("vfmab", c300810, fc300810, 3, (RNDQ, RNDQ, RNDQ_RNSC), bfloat_vfma, bfloat_vfma),
26609
26610#undef	ARM_VARIANT
26611#define ARM_VARIANT &arm_ext_i8mm
26612#undef	THUMB_VARIANT
26613#define	THUMB_VARIANT &arm_ext_i8mm
26614 TUF ("vsmmla", c200c40, fc200c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26615 TUF ("vummla", c200c50, fc200c50, 3, (RNQ, RNQ, RNQ), vummla, vummla),
26616 TUF ("vusmmla", ca00c40, fca00c40, 3, (RNQ, RNQ, RNQ), vsmmla, vsmmla),
26617 TUF ("vusdot", c800d00, fc800d00, 3, (RNDQ, RNDQ, RNDQ_RNSC), vusdot, vusdot),
26618 TUF ("vsudot", c800d10, fc800d10, 3, (RNDQ, RNDQ, RNSC), vsudot, vsudot),
26619
26620#undef	ARM_VARIANT
26621#undef	THUMB_VARIANT
26622#define	THUMB_VARIANT &arm_ext_cde
26623 ToC ("cx1", ee000000, 3, (RCP, APSR_RR, I8191), cx1),
26624 ToC ("cx1a", fe000000, 3, (RCP, APSR_RR, I8191), cx1a),
26625 ToC ("cx1d", ee000040, 4, (RCP, RR, APSR_RR, I8191), cx1d),
26626 ToC ("cx1da", fe000040, 4, (RCP, RR, APSR_RR, I8191), cx1da),
26627
26628 ToC ("cx2", ee400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2),
26629 ToC ("cx2a", fe400000, 4, (RCP, APSR_RR, APSR_RR, I511), cx2a),
26630 ToC ("cx2d", ee400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2d),
26631 ToC ("cx2da", fe400040, 5, (RCP, RR, APSR_RR, APSR_RR, I511), cx2da),
26632
26633 ToC ("cx3", ee800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3),
26634 ToC ("cx3a", fe800000, 5, (RCP, APSR_RR, APSR_RR, APSR_RR, I63), cx3a),
26635 ToC ("cx3d", ee800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3d),
26636 ToC ("cx3da", fe800040, 6, (RCP, RR, APSR_RR, APSR_RR, APSR_RR, I63), cx3da),
26637
26638 mToC ("vcx1", ec200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26639 mToC ("vcx1a", fc200000, 3, (RCP, RNSDMQ, I4095), vcx1),
26640
26641 mToC ("vcx2", ec300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26642 mToC ("vcx2a", fc300000, 4, (RCP, RNSDMQ, RNSDMQ, I127), vcx2),
26643
26644 mToC ("vcx3", ec800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26645 mToC ("vcx3a", fc800000, 5, (RCP, RNSDMQ, RNSDMQ, RNSDMQ, I15), vcx3),
26646};
26647
26648#undef ARM_VARIANT
26649#undef THUMB_VARIANT
26650#undef TCE
26651#undef TUE
26652#undef TUF
26653#undef TCC
26654#undef cCE
26655#undef cCL
26656#undef C3E
26657#undef C3
26658#undef CE
26659#undef CM
26660#undef CL
26661#undef UE
26662#undef UF
26663#undef UT
26664#undef NUF
26665#undef nUF
26666#undef NCE
26667#undef nCE
26668#undef OPS0
26669#undef OPS1
26670#undef OPS2
26671#undef OPS3
26672#undef OPS4
26673#undef OPS5
26674#undef OPS6
26675#undef do_0
26676#undef ToC
26677#undef toC
26678#undef ToU
26679#undef toU
26680
26681/* MD interface: bits in the object file.  */
26682
26683/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
26684   for use in the a.out file, and stores them in the array pointed to by buf.
26685   This knows about the endian-ness of the target machine and does
26686   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
26687   2 (short) and 4 (long)  Floating numbers are put out as a series of
26688   LITTLENUMS (shorts, here at least).	*/
26689
26690void
26691md_number_to_chars (char * buf, valueT val, int n)
26692{
26693  if (target_big_endian)
26694    number_to_chars_bigendian (buf, val, n);
26695  else
26696    number_to_chars_littleendian (buf, val, n);
26697}
26698
26699static valueT
26700md_chars_to_number (char * buf, int n)
26701{
26702  valueT result = 0;
26703  unsigned char * where = (unsigned char *) buf;
26704
26705  if (target_big_endian)
26706    {
26707      while (n--)
26708	{
26709	  result <<= 8;
26710	  result |= (*where++ & 255);
26711	}
26712    }
26713  else
26714    {
26715      while (n--)
26716	{
26717	  result <<= 8;
26718	  result |= (where[n] & 255);
26719	}
26720    }
26721
26722  return result;
26723}
26724
26725/* MD interface: Sections.  */
26726
26727/* Calculate the maximum variable size (i.e., excluding fr_fix)
26728   that an rs_machine_dependent frag may reach.  */
26729
26730unsigned int
26731arm_frag_max_var (fragS *fragp)
26732{
26733  /* We only use rs_machine_dependent for variable-size Thumb instructions,
26734     which are either THUMB_SIZE (2) or INSN_SIZE (4).
26735
26736     Note that we generate relaxable instructions even for cases that don't
26737     really need it, like an immediate that's a trivial constant.  So we're
26738     overestimating the instruction size for some of those cases.  Rather
26739     than putting more intelligence here, it would probably be better to
26740     avoid generating a relaxation frag in the first place when it can be
26741     determined up front that a short instruction will suffice.  */
26742
26743  gas_assert (fragp->fr_type == rs_machine_dependent);
26744  return INSN_SIZE;
26745}
26746
26747/* Estimate the size of a frag before relaxing.  Assume everything fits in
26748   2 bytes.  */
26749
26750int
26751md_estimate_size_before_relax (fragS * fragp,
26752			       segT    segtype ATTRIBUTE_UNUSED)
26753{
26754  fragp->fr_var = 2;
26755  return 2;
26756}
26757
26758/* Convert a machine dependent frag.  */
26759
26760void
26761md_convert_frag (bfd *abfd, segT asec ATTRIBUTE_UNUSED, fragS *fragp)
26762{
26763  unsigned long insn;
26764  unsigned long old_op;
26765  char *buf;
26766  expressionS exp;
26767  fixS *fixp;
26768  int reloc_type;
26769  int pc_rel;
26770  int opcode;
26771
26772  buf = fragp->fr_literal + fragp->fr_fix;
26773
26774  old_op = bfd_get_16(abfd, buf);
26775  if (fragp->fr_symbol)
26776    {
26777      exp.X_op = O_symbol;
26778      exp.X_add_symbol = fragp->fr_symbol;
26779    }
26780  else
26781    {
26782      exp.X_op = O_constant;
26783    }
26784  exp.X_add_number = fragp->fr_offset;
26785  opcode = fragp->fr_subtype;
26786  switch (opcode)
26787    {
26788    case T_MNEM_ldr_pc:
26789    case T_MNEM_ldr_pc2:
26790    case T_MNEM_ldr_sp:
26791    case T_MNEM_str_sp:
26792    case T_MNEM_ldr:
26793    case T_MNEM_ldrb:
26794    case T_MNEM_ldrh:
26795    case T_MNEM_str:
26796    case T_MNEM_strb:
26797    case T_MNEM_strh:
26798      if (fragp->fr_var == 4)
26799	{
26800	  insn = THUMB_OP32 (opcode);
26801	  if ((old_op >> 12) == 4 || (old_op >> 12) == 9)
26802	    {
26803	      insn |= (old_op & 0x700) << 4;
26804	    }
26805	  else
26806	    {
26807	      insn |= (old_op & 7) << 12;
26808	      insn |= (old_op & 0x38) << 13;
26809	    }
26810	  insn |= 0x00000c00;
26811	  put_thumb32_insn (buf, insn);
26812	  reloc_type = BFD_RELOC_ARM_T32_OFFSET_IMM;
26813	}
26814      else
26815	{
26816	  reloc_type = BFD_RELOC_ARM_THUMB_OFFSET;
26817	}
26818      pc_rel = (opcode == T_MNEM_ldr_pc2);
26819      break;
26820    case T_MNEM_adr:
26821      if (fragp->fr_var == 4)
26822	{
26823	  insn = THUMB_OP32 (opcode);
26824	  insn |= (old_op & 0xf0) << 4;
26825	  put_thumb32_insn (buf, insn);
26826	  reloc_type = BFD_RELOC_ARM_T32_ADD_PC12;
26827	}
26828      else
26829	{
26830	  reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26831	  exp.X_add_number -= 4;
26832	}
26833      pc_rel = 1;
26834      break;
26835    case T_MNEM_mov:
26836    case T_MNEM_movs:
26837    case T_MNEM_cmp:
26838    case T_MNEM_cmn:
26839      if (fragp->fr_var == 4)
26840	{
26841	  int r0off = (opcode == T_MNEM_mov
26842		       || opcode == T_MNEM_movs) ? 0 : 8;
26843	  insn = THUMB_OP32 (opcode);
26844	  insn = (insn & 0xe1ffffff) | 0x10000000;
26845	  insn |= (old_op & 0x700) << r0off;
26846	  put_thumb32_insn (buf, insn);
26847	  reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26848	}
26849      else
26850	{
26851	  reloc_type = BFD_RELOC_ARM_THUMB_IMM;
26852	}
26853      pc_rel = 0;
26854      break;
26855    case T_MNEM_b:
26856      if (fragp->fr_var == 4)
26857	{
26858	  insn = THUMB_OP32(opcode);
26859	  put_thumb32_insn (buf, insn);
26860	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH25;
26861	}
26862      else
26863	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH12;
26864      pc_rel = 1;
26865      break;
26866    case T_MNEM_bcond:
26867      if (fragp->fr_var == 4)
26868	{
26869	  insn = THUMB_OP32(opcode);
26870	  insn |= (old_op & 0xf00) << 14;
26871	  put_thumb32_insn (buf, insn);
26872	  reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH20;
26873	}
26874      else
26875	reloc_type = BFD_RELOC_THUMB_PCREL_BRANCH9;
26876      pc_rel = 1;
26877      break;
26878    case T_MNEM_add_sp:
26879    case T_MNEM_add_pc:
26880    case T_MNEM_inc_sp:
26881    case T_MNEM_dec_sp:
26882      if (fragp->fr_var == 4)
26883	{
26884	  /* ??? Choose between add and addw.  */
26885	  insn = THUMB_OP32 (opcode);
26886	  insn |= (old_op & 0xf0) << 4;
26887	  put_thumb32_insn (buf, insn);
26888	  if (opcode == T_MNEM_add_pc)
26889	    reloc_type = BFD_RELOC_ARM_T32_IMM12;
26890	  else
26891	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26892	}
26893      else
26894	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26895      pc_rel = 0;
26896      break;
26897
26898    case T_MNEM_addi:
26899    case T_MNEM_addis:
26900    case T_MNEM_subi:
26901    case T_MNEM_subis:
26902      if (fragp->fr_var == 4)
26903	{
26904	  insn = THUMB_OP32 (opcode);
26905	  insn |= (old_op & 0xf0) << 4;
26906	  insn |= (old_op & 0xf) << 16;
26907	  put_thumb32_insn (buf, insn);
26908	  if (insn & (1 << 20))
26909	    reloc_type = BFD_RELOC_ARM_T32_ADD_IMM;
26910	  else
26911	    reloc_type = BFD_RELOC_ARM_T32_IMMEDIATE;
26912	}
26913      else
26914	reloc_type = BFD_RELOC_ARM_THUMB_ADD;
26915      pc_rel = 0;
26916      break;
26917    default:
26918      abort ();
26919    }
26920  fixp = fix_new_exp (fragp, fragp->fr_fix, fragp->fr_var, &exp, pc_rel,
26921		      (enum bfd_reloc_code_real) reloc_type);
26922  fixp->fx_file = fragp->fr_file;
26923  fixp->fx_line = fragp->fr_line;
26924  fragp->fr_fix += fragp->fr_var;
26925
26926  /* Set whether we use thumb-2 ISA based on final relaxation results.  */
26927  if (thumb_mode && fragp->fr_var == 4 && no_cpu_selected ()
26928      && !ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_t2))
26929    ARM_MERGE_FEATURE_SETS (arm_arch_used, thumb_arch_used, arm_ext_v6t2);
26930}
26931
26932/* Return the size of a relaxable immediate operand instruction.
26933   SHIFT and SIZE specify the form of the allowable immediate.  */
26934static int
26935relax_immediate (fragS *fragp, int size, int shift)
26936{
26937  offsetT offset;
26938  offsetT mask;
26939  offsetT low;
26940
26941  /* ??? Should be able to do better than this.  */
26942  if (fragp->fr_symbol)
26943    return 4;
26944
26945  low = (1 << shift) - 1;
26946  mask = (1 << (shift + size)) - (1 << shift);
26947  offset = fragp->fr_offset;
26948  /* Force misaligned offsets to 32-bit variant.  */
26949  if (offset & low)
26950    return 4;
26951  if (offset & ~mask)
26952    return 4;
26953  return 2;
26954}
26955
26956/* Get the address of a symbol during relaxation.  */
26957static addressT
26958relaxed_symbol_addr (fragS *fragp, long stretch)
26959{
26960  fragS *sym_frag;
26961  addressT addr;
26962  symbolS *sym;
26963
26964  sym = fragp->fr_symbol;
26965  sym_frag = symbol_get_frag (sym);
26966  know (S_GET_SEGMENT (sym) != absolute_section
26967	|| sym_frag == &zero_address_frag);
26968  addr = S_GET_VALUE (sym) + fragp->fr_offset;
26969
26970  /* If frag has yet to be reached on this pass, assume it will
26971     move by STRETCH just as we did.  If this is not so, it will
26972     be because some frag between grows, and that will force
26973     another pass.  */
26974
26975  if (stretch != 0
26976      && sym_frag->relax_marker != fragp->relax_marker)
26977    {
26978      fragS *f;
26979
26980      /* Adjust stretch for any alignment frag.  Note that if have
26981	 been expanding the earlier code, the symbol may be
26982	 defined in what appears to be an earlier frag.  FIXME:
26983	 This doesn't handle the fr_subtype field, which specifies
26984	 a maximum number of bytes to skip when doing an
26985	 alignment.  */
26986      for (f = fragp; f != NULL && f != sym_frag; f = f->fr_next)
26987	{
26988	  if (f->fr_type == rs_align || f->fr_type == rs_align_code)
26989	    {
26990	      if (stretch < 0)
26991		stretch = - ((- stretch)
26992			     & ~ ((1 << (int) f->fr_offset) - 1));
26993	      else
26994		stretch &= ~ ((1 << (int) f->fr_offset) - 1);
26995	      if (stretch == 0)
26996		break;
26997	    }
26998	}
26999      if (f != NULL)
27000	addr += stretch;
27001    }
27002
27003  return addr;
27004}
27005
27006/* Return the size of a relaxable adr pseudo-instruction or PC-relative
27007   load.  */
27008static int
27009relax_adr (fragS *fragp, asection *sec, long stretch)
27010{
27011  addressT addr;
27012  offsetT val;
27013
27014  /* Assume worst case for symbols not known to be in the same section.  */
27015  if (fragp->fr_symbol == NULL
27016      || !S_IS_DEFINED (fragp->fr_symbol)
27017      || sec != S_GET_SEGMENT (fragp->fr_symbol)
27018      || S_IS_WEAK (fragp->fr_symbol))
27019    return 4;
27020
27021  val = relaxed_symbol_addr (fragp, stretch);
27022  addr = fragp->fr_address + fragp->fr_fix;
27023  addr = (addr + 4) & ~3;
27024  /* Force misaligned targets to 32-bit variant.  */
27025  if (val & 3)
27026    return 4;
27027  val -= addr;
27028  if (val < 0 || val > 1020)
27029    return 4;
27030  return 2;
27031}
27032
27033/* Return the size of a relaxable add/sub immediate instruction.  */
27034static int
27035relax_addsub (fragS *fragp, asection *sec)
27036{
27037  char *buf;
27038  int op;
27039
27040  buf = fragp->fr_literal + fragp->fr_fix;
27041  op = bfd_get_16(sec->owner, buf);
27042  if ((op & 0xf) == ((op >> 4) & 0xf))
27043    return relax_immediate (fragp, 8, 0);
27044  else
27045    return relax_immediate (fragp, 3, 0);
27046}
27047
27048/* Return TRUE iff the definition of symbol S could be pre-empted
27049   (overridden) at link or load time.  */
27050static bfd_boolean
27051symbol_preemptible (symbolS *s)
27052{
27053  /* Weak symbols can always be pre-empted.  */
27054  if (S_IS_WEAK (s))
27055    return TRUE;
27056
27057  /* Non-global symbols cannot be pre-empted. */
27058  if (! S_IS_EXTERNAL (s))
27059    return FALSE;
27060
27061#ifdef OBJ_ELF
27062  /* In ELF, a global symbol can be marked protected, or private.  In that
27063     case it can't be pre-empted (other definitions in the same link unit
27064     would violate the ODR).  */
27065  if (ELF_ST_VISIBILITY (S_GET_OTHER (s)) > STV_DEFAULT)
27066    return FALSE;
27067#endif
27068
27069  /* Other global symbols might be pre-empted.  */
27070  return TRUE;
27071}
27072
27073/* Return the size of a relaxable branch instruction.  BITS is the
27074   size of the offset field in the narrow instruction.  */
27075
27076static int
27077relax_branch (fragS *fragp, asection *sec, int bits, long stretch)
27078{
27079  addressT addr;
27080  offsetT val;
27081  offsetT limit;
27082
27083  /* Assume worst case for symbols not known to be in the same section.  */
27084  if (!S_IS_DEFINED (fragp->fr_symbol)
27085      || sec != S_GET_SEGMENT (fragp->fr_symbol)
27086      || S_IS_WEAK (fragp->fr_symbol))
27087    return 4;
27088
27089#ifdef OBJ_ELF
27090  /* A branch to a function in ARM state will require interworking.  */
27091  if (S_IS_DEFINED (fragp->fr_symbol)
27092      && ARM_IS_FUNC (fragp->fr_symbol))
27093      return 4;
27094#endif
27095
27096  if (symbol_preemptible (fragp->fr_symbol))
27097    return 4;
27098
27099  val = relaxed_symbol_addr (fragp, stretch);
27100  addr = fragp->fr_address + fragp->fr_fix + 4;
27101  val -= addr;
27102
27103  /* Offset is a signed value *2 */
27104  limit = 1 << bits;
27105  if (val >= limit || val < -limit)
27106    return 4;
27107  return 2;
27108}
27109
27110
27111/* Relax a machine dependent frag.  This returns the amount by which
27112   the current size of the frag should change.  */
27113
27114int
27115arm_relax_frag (asection *sec, fragS *fragp, long stretch)
27116{
27117  int oldsize;
27118  int newsize;
27119
27120  oldsize = fragp->fr_var;
27121  switch (fragp->fr_subtype)
27122    {
27123    case T_MNEM_ldr_pc2:
27124      newsize = relax_adr (fragp, sec, stretch);
27125      break;
27126    case T_MNEM_ldr_pc:
27127    case T_MNEM_ldr_sp:
27128    case T_MNEM_str_sp:
27129      newsize = relax_immediate (fragp, 8, 2);
27130      break;
27131    case T_MNEM_ldr:
27132    case T_MNEM_str:
27133      newsize = relax_immediate (fragp, 5, 2);
27134      break;
27135    case T_MNEM_ldrh:
27136    case T_MNEM_strh:
27137      newsize = relax_immediate (fragp, 5, 1);
27138      break;
27139    case T_MNEM_ldrb:
27140    case T_MNEM_strb:
27141      newsize = relax_immediate (fragp, 5, 0);
27142      break;
27143    case T_MNEM_adr:
27144      newsize = relax_adr (fragp, sec, stretch);
27145      break;
27146    case T_MNEM_mov:
27147    case T_MNEM_movs:
27148    case T_MNEM_cmp:
27149    case T_MNEM_cmn:
27150      newsize = relax_immediate (fragp, 8, 0);
27151      break;
27152    case T_MNEM_b:
27153      newsize = relax_branch (fragp, sec, 11, stretch);
27154      break;
27155    case T_MNEM_bcond:
27156      newsize = relax_branch (fragp, sec, 8, stretch);
27157      break;
27158    case T_MNEM_add_sp:
27159    case T_MNEM_add_pc:
27160      newsize = relax_immediate (fragp, 8, 2);
27161      break;
27162    case T_MNEM_inc_sp:
27163    case T_MNEM_dec_sp:
27164      newsize = relax_immediate (fragp, 7, 2);
27165      break;
27166    case T_MNEM_addi:
27167    case T_MNEM_addis:
27168    case T_MNEM_subi:
27169    case T_MNEM_subis:
27170      newsize = relax_addsub (fragp, sec);
27171      break;
27172    default:
27173      abort ();
27174    }
27175
27176  fragp->fr_var = newsize;
27177  /* Freeze wide instructions that are at or before the same location as
27178     in the previous pass.  This avoids infinite loops.
27179     Don't freeze them unconditionally because targets may be artificially
27180     misaligned by the expansion of preceding frags.  */
27181  if (stretch <= 0 && newsize > 2)
27182    {
27183      md_convert_frag (sec->owner, sec, fragp);
27184      frag_wane (fragp);
27185    }
27186
27187  return newsize - oldsize;
27188}
27189
27190/* Round up a section size to the appropriate boundary.	 */
27191
27192valueT
27193md_section_align (segT	 segment ATTRIBUTE_UNUSED,
27194		  valueT size)
27195{
27196  return size;
27197}
27198
27199/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
27200   of an rs_align_code fragment.  */
27201
27202void
27203arm_handle_align (fragS * fragP)
27204{
27205  static unsigned char const arm_noop[2][2][4] =
27206    {
27207      {  /* ARMv1 */
27208	{0x00, 0x00, 0xa0, 0xe1},  /* LE */
27209	{0xe1, 0xa0, 0x00, 0x00},  /* BE */
27210      },
27211      {  /* ARMv6k */
27212	{0x00, 0xf0, 0x20, 0xe3},  /* LE */
27213	{0xe3, 0x20, 0xf0, 0x00},  /* BE */
27214      },
27215    };
27216  static unsigned char const thumb_noop[2][2][2] =
27217    {
27218      {  /* Thumb-1 */
27219	{0xc0, 0x46},  /* LE */
27220	{0x46, 0xc0},  /* BE */
27221      },
27222      {  /* Thumb-2 */
27223	{0x00, 0xbf},  /* LE */
27224	{0xbf, 0x00}   /* BE */
27225      }
27226    };
27227  static unsigned char const wide_thumb_noop[2][4] =
27228    {  /* Wide Thumb-2 */
27229      {0xaf, 0xf3, 0x00, 0x80},  /* LE */
27230      {0xf3, 0xaf, 0x80, 0x00},  /* BE */
27231    };
27232
27233  unsigned bytes, fix, noop_size;
27234  char * p;
27235  const unsigned char * noop;
27236  const unsigned char *narrow_noop = NULL;
27237#ifdef OBJ_ELF
27238  enum mstate state;
27239#endif
27240
27241  if (fragP->fr_type != rs_align_code)
27242    return;
27243
27244  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
27245  p = fragP->fr_literal + fragP->fr_fix;
27246  fix = 0;
27247
27248  if (bytes > MAX_MEM_FOR_RS_ALIGN_CODE)
27249    bytes &= MAX_MEM_FOR_RS_ALIGN_CODE;
27250
27251  gas_assert ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) != 0);
27252
27253  if (fragP->tc_frag_data.thumb_mode & (~ MODE_RECORDED))
27254    {
27255      if (ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27256			       ? selected_cpu : arm_arch_none, arm_ext_v6t2))
27257	{
27258	  narrow_noop = thumb_noop[1][target_big_endian];
27259	  noop = wide_thumb_noop[target_big_endian];
27260	}
27261      else
27262	noop = thumb_noop[0][target_big_endian];
27263      noop_size = 2;
27264#ifdef OBJ_ELF
27265      state = MAP_THUMB;
27266#endif
27267    }
27268  else
27269    {
27270      noop = arm_noop[ARM_CPU_HAS_FEATURE (selected_cpu_name[0]
27271					   ? selected_cpu : arm_arch_none,
27272					   arm_ext_v6k) != 0]
27273		     [target_big_endian];
27274      noop_size = 4;
27275#ifdef OBJ_ELF
27276      state = MAP_ARM;
27277#endif
27278    }
27279
27280  fragP->fr_var = noop_size;
27281
27282  if (bytes & (noop_size - 1))
27283    {
27284      fix = bytes & (noop_size - 1);
27285#ifdef OBJ_ELF
27286      insert_data_mapping_symbol (state, fragP->fr_fix, fragP, fix);
27287#endif
27288      memset (p, 0, fix);
27289      p += fix;
27290      bytes -= fix;
27291    }
27292
27293  if (narrow_noop)
27294    {
27295      if (bytes & noop_size)
27296	{
27297	  /* Insert a narrow noop.  */
27298	  memcpy (p, narrow_noop, noop_size);
27299	  p += noop_size;
27300	  bytes -= noop_size;
27301	  fix += noop_size;
27302	}
27303
27304      /* Use wide noops for the remainder */
27305      noop_size = 4;
27306    }
27307
27308  while (bytes >= noop_size)
27309    {
27310      memcpy (p, noop, noop_size);
27311      p += noop_size;
27312      bytes -= noop_size;
27313      fix += noop_size;
27314    }
27315
27316  fragP->fr_fix += fix;
27317}
27318
27319/* Called from md_do_align.  Used to create an alignment
27320   frag in a code section.  */
27321
27322void
27323arm_frag_align_code (int n, int max)
27324{
27325  char * p;
27326
27327  /* We assume that there will never be a requirement
27328     to support alignments greater than MAX_MEM_FOR_RS_ALIGN_CODE bytes.  */
27329  if (max > MAX_MEM_FOR_RS_ALIGN_CODE)
27330    {
27331      char err_msg[128];
27332
27333      sprintf (err_msg,
27334	_("alignments greater than %d bytes not supported in .text sections."),
27335	MAX_MEM_FOR_RS_ALIGN_CODE + 1);
27336      as_fatal ("%s", err_msg);
27337    }
27338
27339  p = frag_var (rs_align_code,
27340		MAX_MEM_FOR_RS_ALIGN_CODE,
27341		1,
27342		(relax_substateT) max,
27343		(symbolS *) NULL,
27344		(offsetT) n,
27345		(char *) NULL);
27346  *p = 0;
27347}
27348
27349/* Perform target specific initialisation of a frag.
27350   Note - despite the name this initialisation is not done when the frag
27351   is created, but only when its type is assigned.  A frag can be created
27352   and used a long time before its type is set, so beware of assuming that
27353   this initialisation is performed first.  */
27354
27355#ifndef OBJ_ELF
27356void
27357arm_init_frag (fragS * fragP, int max_chars ATTRIBUTE_UNUSED)
27358{
27359  /* Record whether this frag is in an ARM or a THUMB area.  */
27360  fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27361}
27362
27363#else /* OBJ_ELF is defined.  */
27364void
27365arm_init_frag (fragS * fragP, int max_chars)
27366{
27367  bfd_boolean frag_thumb_mode;
27368
27369  /* If the current ARM vs THUMB mode has not already
27370     been recorded into this frag then do so now.  */
27371  if ((fragP->tc_frag_data.thumb_mode & MODE_RECORDED) == 0)
27372    fragP->tc_frag_data.thumb_mode = thumb_mode | MODE_RECORDED;
27373
27374  /* PR 21809: Do not set a mapping state for debug sections
27375     - it just confuses other tools.  */
27376  if (bfd_section_flags (now_seg) & SEC_DEBUGGING)
27377    return;
27378
27379  frag_thumb_mode = fragP->tc_frag_data.thumb_mode ^ MODE_RECORDED;
27380
27381  /* Record a mapping symbol for alignment frags.  We will delete this
27382     later if the alignment ends up empty.  */
27383  switch (fragP->fr_type)
27384    {
27385    case rs_align:
27386    case rs_align_test:
27387    case rs_fill:
27388      mapping_state_2 (MAP_DATA, max_chars);
27389      break;
27390    case rs_align_code:
27391      mapping_state_2 (frag_thumb_mode ? MAP_THUMB : MAP_ARM, max_chars);
27392      break;
27393    default:
27394      break;
27395    }
27396}
27397
27398/* When we change sections we need to issue a new mapping symbol.  */
27399
27400void
27401arm_elf_change_section (void)
27402{
27403  /* Link an unlinked unwind index table section to the .text section.	*/
27404  if (elf_section_type (now_seg) == SHT_ARM_EXIDX
27405      && elf_linked_to_section (now_seg) == NULL)
27406    elf_linked_to_section (now_seg) = text_section;
27407}
27408
27409int
27410arm_elf_section_type (const char * str, size_t len)
27411{
27412  if (len == 5 && strncmp (str, "exidx", 5) == 0)
27413    return SHT_ARM_EXIDX;
27414
27415  return -1;
27416}
27417
27418/* Code to deal with unwinding tables.	*/
27419
27420static void add_unwind_adjustsp (offsetT);
27421
27422/* Generate any deferred unwind frame offset.  */
27423
27424static void
27425flush_pending_unwind (void)
27426{
27427  offsetT offset;
27428
27429  offset = unwind.pending_offset;
27430  unwind.pending_offset = 0;
27431  if (offset != 0)
27432    add_unwind_adjustsp (offset);
27433}
27434
27435/* Add an opcode to this list for this function.  Two-byte opcodes should
27436   be passed as op[0] << 8 | op[1].  The list of opcodes is built in reverse
27437   order.  */
27438
27439static void
27440add_unwind_opcode (valueT op, int length)
27441{
27442  /* Add any deferred stack adjustment.	 */
27443  if (unwind.pending_offset)
27444    flush_pending_unwind ();
27445
27446  unwind.sp_restored = 0;
27447
27448  if (unwind.opcode_count + length > unwind.opcode_alloc)
27449    {
27450      unwind.opcode_alloc += ARM_OPCODE_CHUNK_SIZE;
27451      if (unwind.opcodes)
27452	unwind.opcodes = XRESIZEVEC (unsigned char, unwind.opcodes,
27453				     unwind.opcode_alloc);
27454      else
27455	unwind.opcodes = XNEWVEC (unsigned char, unwind.opcode_alloc);
27456    }
27457  while (length > 0)
27458    {
27459      length--;
27460      unwind.opcodes[unwind.opcode_count] = op & 0xff;
27461      op >>= 8;
27462      unwind.opcode_count++;
27463    }
27464}
27465
27466/* Add unwind opcodes to adjust the stack pointer.  */
27467
27468static void
27469add_unwind_adjustsp (offsetT offset)
27470{
27471  valueT op;
27472
27473  if (offset > 0x200)
27474    {
27475      /* We need at most 5 bytes to hold a 32-bit value in a uleb128.  */
27476      char bytes[5];
27477      int n;
27478      valueT o;
27479
27480      /* Long form: 0xb2, uleb128.  */
27481      /* This might not fit in a word so add the individual bytes,
27482	 remembering the list is built in reverse order.  */
27483      o = (valueT) ((offset - 0x204) >> 2);
27484      if (o == 0)
27485	add_unwind_opcode (0, 1);
27486
27487      /* Calculate the uleb128 encoding of the offset.	*/
27488      n = 0;
27489      while (o)
27490	{
27491	  bytes[n] = o & 0x7f;
27492	  o >>= 7;
27493	  if (o)
27494	    bytes[n] |= 0x80;
27495	  n++;
27496	}
27497      /* Add the insn.	*/
27498      for (; n; n--)
27499	add_unwind_opcode (bytes[n - 1], 1);
27500      add_unwind_opcode (0xb2, 1);
27501    }
27502  else if (offset > 0x100)
27503    {
27504      /* Two short opcodes.  */
27505      add_unwind_opcode (0x3f, 1);
27506      op = (offset - 0x104) >> 2;
27507      add_unwind_opcode (op, 1);
27508    }
27509  else if (offset > 0)
27510    {
27511      /* Short opcode.	*/
27512      op = (offset - 4) >> 2;
27513      add_unwind_opcode (op, 1);
27514    }
27515  else if (offset < 0)
27516    {
27517      offset = -offset;
27518      while (offset > 0x100)
27519	{
27520	  add_unwind_opcode (0x7f, 1);
27521	  offset -= 0x100;
27522	}
27523      op = ((offset - 4) >> 2) | 0x40;
27524      add_unwind_opcode (op, 1);
27525    }
27526}
27527
27528/* Finish the list of unwind opcodes for this function.	 */
27529
27530static void
27531finish_unwind_opcodes (void)
27532{
27533  valueT op;
27534
27535  if (unwind.fp_used)
27536    {
27537      /* Adjust sp as necessary.  */
27538      unwind.pending_offset += unwind.fp_offset - unwind.frame_size;
27539      flush_pending_unwind ();
27540
27541      /* After restoring sp from the frame pointer.  */
27542      op = 0x90 | unwind.fp_reg;
27543      add_unwind_opcode (op, 1);
27544    }
27545  else
27546    flush_pending_unwind ();
27547}
27548
27549
27550/* Start an exception table entry.  If idx is nonzero this is an index table
27551   entry.  */
27552
27553static void
27554start_unwind_section (const segT text_seg, int idx)
27555{
27556  const char * text_name;
27557  const char * prefix;
27558  const char * prefix_once;
27559  struct elf_section_match match;
27560  char * sec_name;
27561  int type;
27562  int flags;
27563  int linkonce;
27564
27565  if (idx)
27566    {
27567      prefix = ELF_STRING_ARM_unwind;
27568      prefix_once = ELF_STRING_ARM_unwind_once;
27569      type = SHT_ARM_EXIDX;
27570    }
27571  else
27572    {
27573      prefix = ELF_STRING_ARM_unwind_info;
27574      prefix_once = ELF_STRING_ARM_unwind_info_once;
27575      type = SHT_PROGBITS;
27576    }
27577
27578  text_name = segment_name (text_seg);
27579  if (streq (text_name, ".text"))
27580    text_name = "";
27581
27582  if (strncmp (text_name, ".gnu.linkonce.t.",
27583	       strlen (".gnu.linkonce.t.")) == 0)
27584    {
27585      prefix = prefix_once;
27586      text_name += strlen (".gnu.linkonce.t.");
27587    }
27588
27589  sec_name = concat (prefix, text_name, (char *) NULL);
27590
27591  flags = SHF_ALLOC;
27592  linkonce = 0;
27593  memset (&match, 0, sizeof (match));
27594
27595  /* Handle COMDAT group.  */
27596  if (prefix != prefix_once && (text_seg->flags & SEC_LINK_ONCE) != 0)
27597    {
27598      match.group_name = elf_group_name (text_seg);
27599      if (match.group_name == NULL)
27600	{
27601	  as_bad (_("Group section `%s' has no group signature"),
27602		  segment_name (text_seg));
27603	  ignore_rest_of_line ();
27604	  return;
27605	}
27606      flags |= SHF_GROUP;
27607      linkonce = 1;
27608    }
27609
27610  obj_elf_change_section (sec_name, type, flags, 0, &match,
27611			  linkonce, 0);
27612
27613  /* Set the section link for index tables.  */
27614  if (idx)
27615    elf_linked_to_section (now_seg) = text_seg;
27616}
27617
27618
27619/* Start an unwind table entry.	 HAVE_DATA is nonzero if we have additional
27620   personality routine data.  Returns zero, or the index table value for
27621   an inline entry.  */
27622
27623static valueT
27624create_unwind_entry (int have_data)
27625{
27626  int size;
27627  addressT where;
27628  char *ptr;
27629  /* The current word of data.	*/
27630  valueT data;
27631  /* The number of bytes left in this word.  */
27632  int n;
27633
27634  finish_unwind_opcodes ();
27635
27636  /* Remember the current text section.	 */
27637  unwind.saved_seg = now_seg;
27638  unwind.saved_subseg = now_subseg;
27639
27640  start_unwind_section (now_seg, 0);
27641
27642  if (unwind.personality_routine == NULL)
27643    {
27644      if (unwind.personality_index == -2)
27645	{
27646	  if (have_data)
27647	    as_bad (_("handlerdata in cantunwind frame"));
27648	  return 1; /* EXIDX_CANTUNWIND.  */
27649	}
27650
27651      /* Use a default personality routine if none is specified.  */
27652      if (unwind.personality_index == -1)
27653	{
27654	  if (unwind.opcode_count > 3)
27655	    unwind.personality_index = 1;
27656	  else
27657	    unwind.personality_index = 0;
27658	}
27659
27660      /* Space for the personality routine entry.  */
27661      if (unwind.personality_index == 0)
27662	{
27663	  if (unwind.opcode_count > 3)
27664	    as_bad (_("too many unwind opcodes for personality routine 0"));
27665
27666	  if (!have_data)
27667	    {
27668	      /* All the data is inline in the index table.  */
27669	      data = 0x80;
27670	      n = 3;
27671	      while (unwind.opcode_count > 0)
27672		{
27673		  unwind.opcode_count--;
27674		  data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27675		  n--;
27676		}
27677
27678	      /* Pad with "finish" opcodes.  */
27679	      while (n--)
27680		data = (data << 8) | 0xb0;
27681
27682	      return data;
27683	    }
27684	  size = 0;
27685	}
27686      else
27687	/* We get two opcodes "free" in the first word.	 */
27688	size = unwind.opcode_count - 2;
27689    }
27690  else
27691    {
27692      /* PR 16765: Missing or misplaced unwind directives can trigger this.  */
27693      if (unwind.personality_index != -1)
27694	{
27695	  as_bad (_("attempt to recreate an unwind entry"));
27696	  return 1;
27697	}
27698
27699      /* An extra byte is required for the opcode count.	*/
27700      size = unwind.opcode_count + 1;
27701    }
27702
27703  size = (size + 3) >> 2;
27704  if (size > 0xff)
27705    as_bad (_("too many unwind opcodes"));
27706
27707  frag_align (2, 0, 0);
27708  record_alignment (now_seg, 2);
27709  unwind.table_entry = expr_build_dot ();
27710
27711  /* Allocate the table entry.	*/
27712  ptr = frag_more ((size << 2) + 4);
27713  /* PR 13449: Zero the table entries in case some of them are not used.  */
27714  memset (ptr, 0, (size << 2) + 4);
27715  where = frag_now_fix () - ((size << 2) + 4);
27716
27717  switch (unwind.personality_index)
27718    {
27719    case -1:
27720      /* ??? Should this be a PLT generating relocation?  */
27721      /* Custom personality routine.  */
27722      fix_new (frag_now, where, 4, unwind.personality_routine, 0, 1,
27723	       BFD_RELOC_ARM_PREL31);
27724
27725      where += 4;
27726      ptr += 4;
27727
27728      /* Set the first byte to the number of additional words.	*/
27729      data = size > 0 ? size - 1 : 0;
27730      n = 3;
27731      break;
27732
27733    /* ABI defined personality routines.  */
27734    case 0:
27735      /* Three opcodes bytes are packed into the first word.  */
27736      data = 0x80;
27737      n = 3;
27738      break;
27739
27740    case 1:
27741    case 2:
27742      /* The size and first two opcode bytes go in the first word.  */
27743      data = ((0x80 + unwind.personality_index) << 8) | size;
27744      n = 2;
27745      break;
27746
27747    default:
27748      /* Should never happen.  */
27749      abort ();
27750    }
27751
27752  /* Pack the opcodes into words (MSB first), reversing the list at the same
27753     time.  */
27754  while (unwind.opcode_count > 0)
27755    {
27756      if (n == 0)
27757	{
27758	  md_number_to_chars (ptr, data, 4);
27759	  ptr += 4;
27760	  n = 4;
27761	  data = 0;
27762	}
27763      unwind.opcode_count--;
27764      n--;
27765      data = (data << 8) | unwind.opcodes[unwind.opcode_count];
27766    }
27767
27768  /* Finish off the last word.	*/
27769  if (n < 4)
27770    {
27771      /* Pad with "finish" opcodes.  */
27772      while (n--)
27773	data = (data << 8) | 0xb0;
27774
27775      md_number_to_chars (ptr, data, 4);
27776    }
27777
27778  if (!have_data)
27779    {
27780      /* Add an empty descriptor if there is no user-specified data.   */
27781      ptr = frag_more (4);
27782      md_number_to_chars (ptr, 0, 4);
27783    }
27784
27785  return 0;
27786}
27787
27788
27789/* Initialize the DWARF-2 unwind information for this procedure.  */
27790
27791void
27792tc_arm_frame_initial_instructions (void)
27793{
27794  cfi_add_CFA_def_cfa (REG_SP, 0);
27795}
27796#endif /* OBJ_ELF */
27797
27798/* Convert REGNAME to a DWARF-2 register number.  */
27799
27800int
27801tc_arm_regname_to_dw2regnum (char *regname)
27802{
27803  int reg = arm_reg_parse (&regname, REG_TYPE_RN);
27804  if (reg != FAIL)
27805    return reg;
27806
27807  /* PR 16694: Allow VFP registers as well.  */
27808  reg = arm_reg_parse (&regname, REG_TYPE_VFS);
27809  if (reg != FAIL)
27810    return 64 + reg;
27811
27812  reg = arm_reg_parse (&regname, REG_TYPE_VFD);
27813  if (reg != FAIL)
27814    return reg + 256;
27815
27816  return FAIL;
27817}
27818
27819#ifdef TE_PE
27820void
27821tc_pe_dwarf2_emit_offset (symbolS *symbol, unsigned int size)
27822{
27823  expressionS exp;
27824
27825  exp.X_op = O_secrel;
27826  exp.X_add_symbol = symbol;
27827  exp.X_add_number = 0;
27828  emit_expr (&exp, size);
27829}
27830#endif
27831
27832/* MD interface: Symbol and relocation handling.  */
27833
27834/* Return the address within the segment that a PC-relative fixup is
27835   relative to.  For ARM, PC-relative fixups applied to instructions
27836   are generally relative to the location of the fixup plus 8 bytes.
27837   Thumb branches are offset by 4, and Thumb loads relative to PC
27838   require special handling.  */
27839
27840long
27841md_pcrel_from_section (fixS * fixP, segT seg)
27842{
27843  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
27844
27845  /* If this is pc-relative and we are going to emit a relocation
27846     then we just want to put out any pipeline compensation that the linker
27847     will need.  Otherwise we want to use the calculated base.
27848     For WinCE we skip the bias for externals as well, since this
27849     is how the MS ARM-CE assembler behaves and we want to be compatible.  */
27850  if (fixP->fx_pcrel
27851      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
27852	  || (arm_force_relocation (fixP)
27853#ifdef TE_WINCE
27854	      && !S_IS_EXTERNAL (fixP->fx_addsy)
27855#endif
27856	      )))
27857    base = 0;
27858
27859
27860  switch (fixP->fx_r_type)
27861    {
27862      /* PC relative addressing on the Thumb is slightly odd as the
27863	 bottom two bits of the PC are forced to zero for the
27864	 calculation.  This happens *after* application of the
27865	 pipeline offset.  However, Thumb adrl already adjusts for
27866	 this, so we need not do it again.  */
27867    case BFD_RELOC_ARM_THUMB_ADD:
27868      return base & ~3;
27869
27870    case BFD_RELOC_ARM_THUMB_OFFSET:
27871    case BFD_RELOC_ARM_T32_OFFSET_IMM:
27872    case BFD_RELOC_ARM_T32_ADD_PC12:
27873    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
27874      return (base + 4) & ~3;
27875
27876      /* Thumb branches are simply offset by +4.  */
27877    case BFD_RELOC_THUMB_PCREL_BRANCH5:
27878    case BFD_RELOC_THUMB_PCREL_BRANCH7:
27879    case BFD_RELOC_THUMB_PCREL_BRANCH9:
27880    case BFD_RELOC_THUMB_PCREL_BRANCH12:
27881    case BFD_RELOC_THUMB_PCREL_BRANCH20:
27882    case BFD_RELOC_THUMB_PCREL_BRANCH25:
27883    case BFD_RELOC_THUMB_PCREL_BFCSEL:
27884    case BFD_RELOC_ARM_THUMB_BF17:
27885    case BFD_RELOC_ARM_THUMB_BF19:
27886    case BFD_RELOC_ARM_THUMB_BF13:
27887    case BFD_RELOC_ARM_THUMB_LOOP12:
27888      return base + 4;
27889
27890    case BFD_RELOC_THUMB_PCREL_BRANCH23:
27891      if (fixP->fx_addsy
27892	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27893	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27894	  && ARM_IS_FUNC (fixP->fx_addsy)
27895	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27896	base = fixP->fx_where + fixP->fx_frag->fr_address;
27897       return base + 4;
27898
27899      /* BLX is like branches above, but forces the low two bits of PC to
27900	 zero.  */
27901    case BFD_RELOC_THUMB_PCREL_BLX:
27902      if (fixP->fx_addsy
27903	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27904	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27905	  && THUMB_IS_FUNC (fixP->fx_addsy)
27906	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27907	base = fixP->fx_where + fixP->fx_frag->fr_address;
27908      return (base + 4) & ~3;
27909
27910      /* ARM mode branches are offset by +8.  However, the Windows CE
27911	 loader expects the relocation not to take this into account.  */
27912    case BFD_RELOC_ARM_PCREL_BLX:
27913      if (fixP->fx_addsy
27914	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27915	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27916	  && ARM_IS_FUNC (fixP->fx_addsy)
27917	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27918	base = fixP->fx_where + fixP->fx_frag->fr_address;
27919      return base + 8;
27920
27921    case BFD_RELOC_ARM_PCREL_CALL:
27922      if (fixP->fx_addsy
27923	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27924	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
27925	  && THUMB_IS_FUNC (fixP->fx_addsy)
27926	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
27927	base = fixP->fx_where + fixP->fx_frag->fr_address;
27928      return base + 8;
27929
27930    case BFD_RELOC_ARM_PCREL_BRANCH:
27931    case BFD_RELOC_ARM_PCREL_JUMP:
27932    case BFD_RELOC_ARM_PLT32:
27933#ifdef TE_WINCE
27934      /* When handling fixups immediately, because we have already
27935	 discovered the value of a symbol, or the address of the frag involved
27936	 we must account for the offset by +8, as the OS loader will never see the reloc.
27937	 see fixup_segment() in write.c
27938	 The S_IS_EXTERNAL test handles the case of global symbols.
27939	 Those need the calculated base, not just the pipe compensation the linker will need.  */
27940      if (fixP->fx_pcrel
27941	  && fixP->fx_addsy != NULL
27942	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
27943	  && (S_IS_EXTERNAL (fixP->fx_addsy) || !arm_force_relocation (fixP)))
27944	return base + 8;
27945      return base;
27946#else
27947      return base + 8;
27948#endif
27949
27950
27951      /* ARM mode loads relative to PC are also offset by +8.  Unlike
27952	 branches, the Windows CE loader *does* expect the relocation
27953	 to take this into account.  */
27954    case BFD_RELOC_ARM_OFFSET_IMM:
27955    case BFD_RELOC_ARM_OFFSET_IMM8:
27956    case BFD_RELOC_ARM_HWLITERAL:
27957    case BFD_RELOC_ARM_LITERAL:
27958    case BFD_RELOC_ARM_CP_OFF_IMM:
27959      return base + 8;
27960
27961
27962      /* Other PC-relative relocations are un-offset.  */
27963    default:
27964      return base;
27965    }
27966}
27967
27968static bfd_boolean flag_warn_syms = TRUE;
27969
27970bfd_boolean
27971arm_tc_equal_in_insn (int c ATTRIBUTE_UNUSED, char * name)
27972{
27973  /* PR 18347 - Warn if the user attempts to create a symbol with the same
27974     name as an ARM instruction.  Whilst strictly speaking it is allowed, it
27975     does mean that the resulting code might be very confusing to the reader.
27976     Also this warning can be triggered if the user omits an operand before
27977     an immediate address, eg:
27978
27979       LDR =foo
27980
27981     GAS treats this as an assignment of the value of the symbol foo to a
27982     symbol LDR, and so (without this code) it will not issue any kind of
27983     warning or error message.
27984
27985     Note - ARM instructions are case-insensitive but the strings in the hash
27986     table are all stored in lower case, so we must first ensure that name is
27987     lower case too.  */
27988  if (flag_warn_syms && arm_ops_hsh)
27989    {
27990      char * nbuf = strdup (name);
27991      char * p;
27992
27993      for (p = nbuf; *p; p++)
27994	*p = TOLOWER (*p);
27995      if (str_hash_find (arm_ops_hsh, nbuf) != NULL)
27996	{
27997	  static htab_t  already_warned = NULL;
27998
27999	  if (already_warned == NULL)
28000	    already_warned = str_htab_create ();
28001	  /* Only warn about the symbol once.  To keep the code
28002	     simple we let str_hash_insert do the lookup for us.  */
28003	  if (str_hash_find (already_warned, nbuf) == NULL)
28004	    {
28005	      as_warn (_("[-mwarn-syms]: Assignment makes a symbol match an ARM instruction: %s"), name);
28006	      str_hash_insert (already_warned, nbuf, NULL, 0);
28007	    }
28008	}
28009      else
28010	free (nbuf);
28011    }
28012
28013  return FALSE;
28014}
28015
28016/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
28017   Otherwise we have no need to default values of symbols.  */
28018
28019symbolS *
28020md_undefined_symbol (char * name ATTRIBUTE_UNUSED)
28021{
28022#ifdef OBJ_ELF
28023  if (name[0] == '_' && name[1] == 'G'
28024      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
28025    {
28026      if (!GOT_symbol)
28027	{
28028	  if (symbol_find (name))
28029	    as_bad (_("GOT already in the symbol table"));
28030
28031	  GOT_symbol = symbol_new (name, undefined_section,
28032				   &zero_address_frag, 0);
28033	}
28034
28035      return GOT_symbol;
28036    }
28037#endif
28038
28039  return NULL;
28040}
28041
28042/* Subroutine of md_apply_fix.	 Check to see if an immediate can be
28043   computed as two separate immediate values, added together.  We
28044   already know that this value cannot be computed by just one ARM
28045   instruction.	 */
28046
28047static unsigned int
28048validate_immediate_twopart (unsigned int   val,
28049			    unsigned int * highpart)
28050{
28051  unsigned int a;
28052  unsigned int i;
28053
28054  for (i = 0; i < 32; i += 2)
28055    if (((a = rotate_left (val, i)) & 0xff) != 0)
28056      {
28057	if (a & 0xff00)
28058	  {
28059	    if (a & ~ 0xffff)
28060	      continue;
28061	    * highpart = (a  >> 8) | ((i + 24) << 7);
28062	  }
28063	else if (a & 0xff0000)
28064	  {
28065	    if (a & 0xff000000)
28066	      continue;
28067	    * highpart = (a >> 16) | ((i + 16) << 7);
28068	  }
28069	else
28070	  {
28071	    gas_assert (a & 0xff000000);
28072	    * highpart = (a >> 24) | ((i + 8) << 7);
28073	  }
28074
28075	return (a & 0xff) | (i << 7);
28076      }
28077
28078  return FAIL;
28079}
28080
28081static int
28082validate_offset_imm (unsigned int val, int hwse)
28083{
28084  if ((hwse && val > 255) || val > 4095)
28085    return FAIL;
28086  return val;
28087}
28088
28089/* Subroutine of md_apply_fix.	 Do those data_ops which can take a
28090   negative immediate constant by altering the instruction.  A bit of
28091   a hack really.
28092	MOV <-> MVN
28093	AND <-> BIC
28094	ADC <-> SBC
28095	by inverting the second operand, and
28096	ADD <-> SUB
28097	CMP <-> CMN
28098	by negating the second operand.	 */
28099
28100static int
28101negate_data_op (unsigned long * instruction,
28102		unsigned long	value)
28103{
28104  int op, new_inst;
28105  unsigned long negated, inverted;
28106
28107  negated = encode_arm_immediate (-value);
28108  inverted = encode_arm_immediate (~value);
28109
28110  op = (*instruction >> DATA_OP_SHIFT) & 0xf;
28111  switch (op)
28112    {
28113      /* First negates.	 */
28114    case OPCODE_SUB:		 /* ADD <-> SUB	 */
28115      new_inst = OPCODE_ADD;
28116      value = negated;
28117      break;
28118
28119    case OPCODE_ADD:
28120      new_inst = OPCODE_SUB;
28121      value = negated;
28122      break;
28123
28124    case OPCODE_CMP:		 /* CMP <-> CMN	 */
28125      new_inst = OPCODE_CMN;
28126      value = negated;
28127      break;
28128
28129    case OPCODE_CMN:
28130      new_inst = OPCODE_CMP;
28131      value = negated;
28132      break;
28133
28134      /* Now Inverted ops.  */
28135    case OPCODE_MOV:		 /* MOV <-> MVN	 */
28136      new_inst = OPCODE_MVN;
28137      value = inverted;
28138      break;
28139
28140    case OPCODE_MVN:
28141      new_inst = OPCODE_MOV;
28142      value = inverted;
28143      break;
28144
28145    case OPCODE_AND:		 /* AND <-> BIC	 */
28146      new_inst = OPCODE_BIC;
28147      value = inverted;
28148      break;
28149
28150    case OPCODE_BIC:
28151      new_inst = OPCODE_AND;
28152      value = inverted;
28153      break;
28154
28155    case OPCODE_ADC:		  /* ADC <-> SBC  */
28156      new_inst = OPCODE_SBC;
28157      value = inverted;
28158      break;
28159
28160    case OPCODE_SBC:
28161      new_inst = OPCODE_ADC;
28162      value = inverted;
28163      break;
28164
28165      /* We cannot do anything.	 */
28166    default:
28167      return FAIL;
28168    }
28169
28170  if (value == (unsigned) FAIL)
28171    return FAIL;
28172
28173  *instruction &= OPCODE_MASK;
28174  *instruction |= new_inst << DATA_OP_SHIFT;
28175  return value;
28176}
28177
28178/* Like negate_data_op, but for Thumb-2.   */
28179
28180static unsigned int
28181thumb32_negate_data_op (valueT *instruction, unsigned int value)
28182{
28183  unsigned int op, new_inst;
28184  unsigned int rd;
28185  unsigned int negated, inverted;
28186
28187  negated = encode_thumb32_immediate (-value);
28188  inverted = encode_thumb32_immediate (~value);
28189
28190  rd = (*instruction >> 8) & 0xf;
28191  op = (*instruction >> T2_DATA_OP_SHIFT) & 0xf;
28192  switch (op)
28193    {
28194      /* ADD <-> SUB.  Includes CMP <-> CMN.  */
28195    case T2_OPCODE_SUB:
28196      new_inst = T2_OPCODE_ADD;
28197      value = negated;
28198      break;
28199
28200    case T2_OPCODE_ADD:
28201      new_inst = T2_OPCODE_SUB;
28202      value = negated;
28203      break;
28204
28205      /* ORR <-> ORN.  Includes MOV <-> MVN.  */
28206    case T2_OPCODE_ORR:
28207      new_inst = T2_OPCODE_ORN;
28208      value = inverted;
28209      break;
28210
28211    case T2_OPCODE_ORN:
28212      new_inst = T2_OPCODE_ORR;
28213      value = inverted;
28214      break;
28215
28216      /* AND <-> BIC.  TST has no inverted equivalent.  */
28217    case T2_OPCODE_AND:
28218      new_inst = T2_OPCODE_BIC;
28219      if (rd == 15)
28220	value = FAIL;
28221      else
28222	value = inverted;
28223      break;
28224
28225    case T2_OPCODE_BIC:
28226      new_inst = T2_OPCODE_AND;
28227      value = inverted;
28228      break;
28229
28230      /* ADC <-> SBC  */
28231    case T2_OPCODE_ADC:
28232      new_inst = T2_OPCODE_SBC;
28233      value = inverted;
28234      break;
28235
28236    case T2_OPCODE_SBC:
28237      new_inst = T2_OPCODE_ADC;
28238      value = inverted;
28239      break;
28240
28241      /* We cannot do anything.	 */
28242    default:
28243      return FAIL;
28244    }
28245
28246  if (value == (unsigned int)FAIL)
28247    return FAIL;
28248
28249  *instruction &= T2_OPCODE_MASK;
28250  *instruction |= new_inst << T2_DATA_OP_SHIFT;
28251  return value;
28252}
28253
28254/* Read a 32-bit thumb instruction from buf.  */
28255
28256static unsigned long
28257get_thumb32_insn (char * buf)
28258{
28259  unsigned long insn;
28260  insn = md_chars_to_number (buf, THUMB_SIZE) << 16;
28261  insn |= md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28262
28263  return insn;
28264}
28265
28266/* We usually want to set the low bit on the address of thumb function
28267   symbols.  In particular .word foo - . should have the low bit set.
28268   Generic code tries to fold the difference of two symbols to
28269   a constant.  Prevent this and force a relocation when the first symbols
28270   is a thumb function.  */
28271
28272bfd_boolean
28273arm_optimize_expr (expressionS *l, operatorT op, expressionS *r)
28274{
28275  if (op == O_subtract
28276      && l->X_op == O_symbol
28277      && r->X_op == O_symbol
28278      && THUMB_IS_FUNC (l->X_add_symbol))
28279    {
28280      l->X_op = O_subtract;
28281      l->X_op_symbol = r->X_add_symbol;
28282      l->X_add_number -= r->X_add_number;
28283      return TRUE;
28284    }
28285
28286  /* Process as normal.  */
28287  return FALSE;
28288}
28289
28290/* Encode Thumb2 unconditional branches and calls. The encoding
28291   for the 2 are identical for the immediate values.  */
28292
28293static void
28294encode_thumb2_b_bl_offset (char * buf, offsetT value)
28295{
28296#define T2I1I2MASK  ((1 << 13) | (1 << 11))
28297  offsetT newval;
28298  offsetT newval2;
28299  addressT S, I1, I2, lo, hi;
28300
28301  S = (value >> 24) & 0x01;
28302  I1 = (value >> 23) & 0x01;
28303  I2 = (value >> 22) & 0x01;
28304  hi = (value >> 12) & 0x3ff;
28305  lo = (value >> 1) & 0x7ff;
28306  newval   = md_chars_to_number (buf, THUMB_SIZE);
28307  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
28308  newval  |= (S << 10) | hi;
28309  newval2 &=  ~T2I1I2MASK;
28310  newval2 |= (((I1 ^ S) << 13) | ((I2 ^ S) << 11) | lo) ^ T2I1I2MASK;
28311  md_number_to_chars (buf, newval, THUMB_SIZE);
28312  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
28313}
28314
28315void
28316md_apply_fix (fixS *	fixP,
28317	       valueT * valP,
28318	       segT	seg)
28319{
28320  valueT	 value = * valP;
28321  valueT	 newval;
28322  unsigned int	 newimm;
28323  unsigned long	 temp;
28324  int		 sign;
28325  char *	 buf = fixP->fx_where + fixP->fx_frag->fr_literal;
28326
28327  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
28328
28329  /* Note whether this will delete the relocation.  */
28330
28331  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
28332    fixP->fx_done = 1;
28333
28334  /* On a 64-bit host, silently truncate 'value' to 32 bits for
28335     consistency with the behaviour on 32-bit hosts.  Remember value
28336     for emit_reloc.  */
28337  value &= 0xffffffff;
28338  value ^= 0x80000000;
28339  value -= 0x80000000;
28340
28341  *valP = value;
28342  fixP->fx_addnumber = value;
28343
28344  /* Same treatment for fixP->fx_offset.  */
28345  fixP->fx_offset &= 0xffffffff;
28346  fixP->fx_offset ^= 0x80000000;
28347  fixP->fx_offset -= 0x80000000;
28348
28349  switch (fixP->fx_r_type)
28350    {
28351    case BFD_RELOC_NONE:
28352      /* This will need to go in the object file.  */
28353      fixP->fx_done = 0;
28354      break;
28355
28356    case BFD_RELOC_ARM_IMMEDIATE:
28357      /* We claim that this fixup has been processed here,
28358	 even if in fact we generate an error because we do
28359	 not have a reloc for it, so tc_gen_reloc will reject it.  */
28360      fixP->fx_done = 1;
28361
28362      if (fixP->fx_addsy)
28363	{
28364	  const char *msg = 0;
28365
28366	  if (! S_IS_DEFINED (fixP->fx_addsy))
28367	    msg = _("undefined symbol %s used as an immediate value");
28368	  else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28369	    msg = _("symbol %s is in a different section");
28370	  else if (S_IS_WEAK (fixP->fx_addsy))
28371	    msg = _("symbol %s is weak and may be overridden later");
28372
28373	  if (msg)
28374	    {
28375	      as_bad_where (fixP->fx_file, fixP->fx_line,
28376			    msg, S_GET_NAME (fixP->fx_addsy));
28377	      break;
28378	    }
28379	}
28380
28381      temp = md_chars_to_number (buf, INSN_SIZE);
28382
28383      /* If the offset is negative, we should use encoding A2 for ADR.  */
28384      if ((temp & 0xfff0000) == 0x28f0000 && (offsetT) value < 0)
28385	newimm = negate_data_op (&temp, value);
28386      else
28387	{
28388	  newimm = encode_arm_immediate (value);
28389
28390	  /* If the instruction will fail, see if we can fix things up by
28391	     changing the opcode.  */
28392	  if (newimm == (unsigned int) FAIL)
28393	    newimm = negate_data_op (&temp, value);
28394	  /* MOV accepts both ARM modified immediate (A1 encoding) and
28395	     UINT16 (A2 encoding) when possible, MOVW only accepts UINT16.
28396	     When disassembling, MOV is preferred when there is no encoding
28397	     overlap.  */
28398	  if (newimm == (unsigned int) FAIL
28399	      && ((temp >> DATA_OP_SHIFT) & 0xf) == OPCODE_MOV
28400	      && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)
28401	      && !((temp >> SBIT_SHIFT) & 0x1)
28402	      && value <= 0xffff)
28403	    {
28404	      /* Clear bits[23:20] to change encoding from A1 to A2.  */
28405	      temp &= 0xff0fffff;
28406	      /* Encoding high 4bits imm.  Code below will encode the remaining
28407		 low 12bits.  */
28408	      temp |= (value & 0x0000f000) << 4;
28409	      newimm = value & 0x00000fff;
28410	    }
28411	}
28412
28413      if (newimm == (unsigned int) FAIL)
28414	{
28415	  as_bad_where (fixP->fx_file, fixP->fx_line,
28416			_("invalid constant (%lx) after fixup"),
28417			(unsigned long) value);
28418	  break;
28419	}
28420
28421      newimm |= (temp & 0xfffff000);
28422      md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28423      break;
28424
28425    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
28426      {
28427	unsigned int highpart = 0;
28428	unsigned int newinsn  = 0xe1a00000; /* nop.  */
28429
28430	if (fixP->fx_addsy)
28431	  {
28432	    const char *msg = 0;
28433
28434	    if (! S_IS_DEFINED (fixP->fx_addsy))
28435	      msg = _("undefined symbol %s used as an immediate value");
28436	    else if (S_GET_SEGMENT (fixP->fx_addsy) != seg)
28437	      msg = _("symbol %s is in a different section");
28438	    else if (S_IS_WEAK (fixP->fx_addsy))
28439	      msg = _("symbol %s is weak and may be overridden later");
28440
28441	    if (msg)
28442	      {
28443		as_bad_where (fixP->fx_file, fixP->fx_line,
28444			      msg, S_GET_NAME (fixP->fx_addsy));
28445		break;
28446	      }
28447	  }
28448
28449	newimm = encode_arm_immediate (value);
28450	temp = md_chars_to_number (buf, INSN_SIZE);
28451
28452	/* If the instruction will fail, see if we can fix things up by
28453	   changing the opcode.	 */
28454	if (newimm == (unsigned int) FAIL
28455	    && (newimm = negate_data_op (& temp, value)) == (unsigned int) FAIL)
28456	  {
28457	    /* No ?  OK - try using two ADD instructions to generate
28458	       the value.  */
28459	    newimm = validate_immediate_twopart (value, & highpart);
28460
28461	    /* Yes - then make sure that the second instruction is
28462	       also an add.  */
28463	    if (newimm != (unsigned int) FAIL)
28464	      newinsn = temp;
28465	    /* Still No ?  Try using a negated value.  */
28466	    else if ((newimm = validate_immediate_twopart (- value, & highpart)) != (unsigned int) FAIL)
28467	      temp = newinsn = (temp & OPCODE_MASK) | OPCODE_SUB << DATA_OP_SHIFT;
28468	    /* Otherwise - give up.  */
28469	    else
28470	      {
28471		as_bad_where (fixP->fx_file, fixP->fx_line,
28472			      _("unable to compute ADRL instructions for PC offset of 0x%lx"),
28473			      (long) value);
28474		break;
28475	      }
28476
28477	    /* Replace the first operand in the 2nd instruction (which
28478	       is the PC) with the destination register.  We have
28479	       already added in the PC in the first instruction and we
28480	       do not want to do it again.  */
28481	    newinsn &= ~ 0xf0000;
28482	    newinsn |= ((newinsn & 0x0f000) << 4);
28483	  }
28484
28485	newimm |= (temp & 0xfffff000);
28486	md_number_to_chars (buf, (valueT) newimm, INSN_SIZE);
28487
28488	highpart |= (newinsn & 0xfffff000);
28489	md_number_to_chars (buf + INSN_SIZE, (valueT) highpart, INSN_SIZE);
28490      }
28491      break;
28492
28493    case BFD_RELOC_ARM_OFFSET_IMM:
28494      if (!fixP->fx_done && seg->use_rela_p)
28495	value = 0;
28496      /* Fall through.  */
28497
28498    case BFD_RELOC_ARM_LITERAL:
28499      sign = (offsetT) value > 0;
28500
28501      if ((offsetT) value < 0)
28502	value = - value;
28503
28504      if (validate_offset_imm (value, 0) == FAIL)
28505	{
28506	  if (fixP->fx_r_type == BFD_RELOC_ARM_LITERAL)
28507	    as_bad_where (fixP->fx_file, fixP->fx_line,
28508			  _("invalid literal constant: pool needs to be closer"));
28509	  else
28510	    as_bad_where (fixP->fx_file, fixP->fx_line,
28511			  _("bad immediate value for offset (%ld)"),
28512			  (long) value);
28513	  break;
28514	}
28515
28516      newval = md_chars_to_number (buf, INSN_SIZE);
28517      if (value == 0)
28518	newval &= 0xfffff000;
28519      else
28520	{
28521	  newval &= 0xff7ff000;
28522	  newval |= value | (sign ? INDEX_UP : 0);
28523	}
28524      md_number_to_chars (buf, newval, INSN_SIZE);
28525      break;
28526
28527    case BFD_RELOC_ARM_OFFSET_IMM8:
28528    case BFD_RELOC_ARM_HWLITERAL:
28529      sign = (offsetT) value > 0;
28530
28531      if ((offsetT) value < 0)
28532	value = - value;
28533
28534      if (validate_offset_imm (value, 1) == FAIL)
28535	{
28536	  if (fixP->fx_r_type == BFD_RELOC_ARM_HWLITERAL)
28537	    as_bad_where (fixP->fx_file, fixP->fx_line,
28538			  _("invalid literal constant: pool needs to be closer"));
28539	  else
28540	    as_bad_where (fixP->fx_file, fixP->fx_line,
28541			  _("bad immediate value for 8-bit offset (%ld)"),
28542			  (long) value);
28543	  break;
28544	}
28545
28546      newval = md_chars_to_number (buf, INSN_SIZE);
28547      if (value == 0)
28548	newval &= 0xfffff0f0;
28549      else
28550	{
28551	  newval &= 0xff7ff0f0;
28552	  newval |= ((value >> 4) << 8) | (value & 0xf) | (sign ? INDEX_UP : 0);
28553	}
28554      md_number_to_chars (buf, newval, INSN_SIZE);
28555      break;
28556
28557    case BFD_RELOC_ARM_T32_OFFSET_U8:
28558      if (value > 1020 || value % 4 != 0)
28559	as_bad_where (fixP->fx_file, fixP->fx_line,
28560		      _("bad immediate value for offset (%ld)"), (long) value);
28561      value /= 4;
28562
28563      newval = md_chars_to_number (buf+2, THUMB_SIZE);
28564      newval |= value;
28565      md_number_to_chars (buf+2, newval, THUMB_SIZE);
28566      break;
28567
28568    case BFD_RELOC_ARM_T32_OFFSET_IMM:
28569      /* This is a complicated relocation used for all varieties of Thumb32
28570	 load/store instruction with immediate offset:
28571
28572	 1110 100P u1WL NNNN XXXX YYYY iiii iiii - +/-(U) pre/post(P) 8-bit,
28573						   *4, optional writeback(W)
28574						   (doubleword load/store)
28575
28576	 1111 100S uTTL 1111 XXXX iiii iiii iiii - +/-(U) 12-bit PC-rel
28577	 1111 100S 0TTL NNNN XXXX 1Pu1 iiii iiii - +/-(U) pre/post(P) 8-bit
28578	 1111 100S 0TTL NNNN XXXX 1110 iiii iiii - positive 8-bit (T instruction)
28579	 1111 100S 1TTL NNNN XXXX iiii iiii iiii - positive 12-bit
28580	 1111 100S 0TTL NNNN XXXX 1100 iiii iiii - negative 8-bit
28581
28582	 Uppercase letters indicate bits that are already encoded at
28583	 this point.  Lowercase letters are our problem.  For the
28584	 second block of instructions, the secondary opcode nybble
28585	 (bits 8..11) is present, and bit 23 is zero, even if this is
28586	 a PC-relative operation.  */
28587      newval = md_chars_to_number (buf, THUMB_SIZE);
28588      newval <<= 16;
28589      newval |= md_chars_to_number (buf+THUMB_SIZE, THUMB_SIZE);
28590
28591      if ((newval & 0xf0000000) == 0xe0000000)
28592	{
28593	  /* Doubleword load/store: 8-bit offset, scaled by 4.  */
28594	  if ((offsetT) value >= 0)
28595	    newval |= (1 << 23);
28596	  else
28597	    value = -value;
28598	  if (value % 4 != 0)
28599	    {
28600	      as_bad_where (fixP->fx_file, fixP->fx_line,
28601			    _("offset not a multiple of 4"));
28602	      break;
28603	    }
28604	  value /= 4;
28605	  if (value > 0xff)
28606	    {
28607	      as_bad_where (fixP->fx_file, fixP->fx_line,
28608			    _("offset out of range"));
28609	      break;
28610	    }
28611	  newval &= ~0xff;
28612	}
28613      else if ((newval & 0x000f0000) == 0x000f0000)
28614	{
28615	  /* PC-relative, 12-bit offset.  */
28616	  if ((offsetT) value >= 0)
28617	    newval |= (1 << 23);
28618	  else
28619	    value = -value;
28620	  if (value > 0xfff)
28621	    {
28622	      as_bad_where (fixP->fx_file, fixP->fx_line,
28623			    _("offset out of range"));
28624	      break;
28625	    }
28626	  newval &= ~0xfff;
28627	}
28628      else if ((newval & 0x00000100) == 0x00000100)
28629	{
28630	  /* Writeback: 8-bit, +/- offset.  */
28631	  if ((offsetT) value >= 0)
28632	    newval |= (1 << 9);
28633	  else
28634	    value = -value;
28635	  if (value > 0xff)
28636	    {
28637	      as_bad_where (fixP->fx_file, fixP->fx_line,
28638			    _("offset out of range"));
28639	      break;
28640	    }
28641	  newval &= ~0xff;
28642	}
28643      else if ((newval & 0x00000f00) == 0x00000e00)
28644	{
28645	  /* T-instruction: positive 8-bit offset.  */
28646	  if (value > 0xff)
28647	    {
28648	      as_bad_where (fixP->fx_file, fixP->fx_line,
28649			    _("offset out of range"));
28650	      break;
28651	    }
28652	  newval &= ~0xff;
28653	  newval |= value;
28654	}
28655      else
28656	{
28657	  /* Positive 12-bit or negative 8-bit offset.  */
28658	  unsigned int limit;
28659	  if ((offsetT) value >= 0)
28660	    {
28661	      newval |= (1 << 23);
28662	      limit = 0xfff;
28663	    }
28664	  else
28665	    {
28666	      value = -value;
28667	      limit = 0xff;
28668	    }
28669	  if (value > limit)
28670	    {
28671	      as_bad_where (fixP->fx_file, fixP->fx_line,
28672			    _("offset out of range"));
28673	      break;
28674	    }
28675	  newval &= ~limit;
28676	}
28677
28678      newval |= value;
28679      md_number_to_chars (buf, (newval >> 16) & 0xffff, THUMB_SIZE);
28680      md_number_to_chars (buf + THUMB_SIZE, newval & 0xffff, THUMB_SIZE);
28681      break;
28682
28683    case BFD_RELOC_ARM_SHIFT_IMM:
28684      newval = md_chars_to_number (buf, INSN_SIZE);
28685      if (value > 32
28686	  || (value == 32
28687	      && (((newval & 0x60) == 0) || (newval & 0x60) == 0x60)))
28688	{
28689	  as_bad_where (fixP->fx_file, fixP->fx_line,
28690			_("shift expression is too large"));
28691	  break;
28692	}
28693
28694      if (value == 0)
28695	/* Shifts of zero must be done as lsl.	*/
28696	newval &= ~0x60;
28697      else if (value == 32)
28698	value = 0;
28699      newval &= 0xfffff07f;
28700      newval |= (value & 0x1f) << 7;
28701      md_number_to_chars (buf, newval, INSN_SIZE);
28702      break;
28703
28704    case BFD_RELOC_ARM_T32_IMMEDIATE:
28705    case BFD_RELOC_ARM_T32_ADD_IMM:
28706    case BFD_RELOC_ARM_T32_IMM12:
28707    case BFD_RELOC_ARM_T32_ADD_PC12:
28708      /* We claim that this fixup has been processed here,
28709	 even if in fact we generate an error because we do
28710	 not have a reloc for it, so tc_gen_reloc will reject it.  */
28711      fixP->fx_done = 1;
28712
28713      if (fixP->fx_addsy
28714	  && ! S_IS_DEFINED (fixP->fx_addsy))
28715	{
28716	  as_bad_where (fixP->fx_file, fixP->fx_line,
28717			_("undefined symbol %s used as an immediate value"),
28718			S_GET_NAME (fixP->fx_addsy));
28719	  break;
28720	}
28721
28722      newval = md_chars_to_number (buf, THUMB_SIZE);
28723      newval <<= 16;
28724      newval |= md_chars_to_number (buf+2, THUMB_SIZE);
28725
28726      newimm = FAIL;
28727      if ((fixP->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
28728	   /* ARMv8-M Baseline MOV will reach here, but it doesn't support
28729	      Thumb2 modified immediate encoding (T2).  */
28730	   && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2))
28731	  || fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28732	{
28733	  newimm = encode_thumb32_immediate (value);
28734	  if (newimm == (unsigned int) FAIL)
28735	    newimm = thumb32_negate_data_op (&newval, value);
28736	}
28737      if (newimm == (unsigned int) FAIL)
28738	{
28739	  if (fixP->fx_r_type != BFD_RELOC_ARM_T32_IMMEDIATE)
28740	    {
28741	      /* Turn add/sum into addw/subw.  */
28742	      if (fixP->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM)
28743		newval = (newval & 0xfeffffff) | 0x02000000;
28744	      /* No flat 12-bit imm encoding for addsw/subsw.  */
28745	      if ((newval & 0x00100000) == 0)
28746		{
28747		  /* 12 bit immediate for addw/subw.  */
28748		  if ((offsetT) value < 0)
28749		    {
28750		      value = -value;
28751		      newval ^= 0x00a00000;
28752		    }
28753		  if (value > 0xfff)
28754		    newimm = (unsigned int) FAIL;
28755		  else
28756		    newimm = value;
28757		}
28758	    }
28759	  else
28760	    {
28761	      /* MOV accepts both Thumb2 modified immediate (T2 encoding) and
28762		 UINT16 (T3 encoding), MOVW only accepts UINT16.  When
28763		 disassembling, MOV is preferred when there is no encoding
28764		 overlap.  */
28765	      if (((newval >> T2_DATA_OP_SHIFT) & 0xf) == T2_OPCODE_ORR
28766		  /* NOTE: MOV uses the ORR opcode in Thumb 2 mode
28767		     but with the Rn field [19:16] set to 1111.  */
28768		  && (((newval >> 16) & 0xf) == 0xf)
28769		  && ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2_v8m)
28770		  && !((newval >> T2_SBIT_SHIFT) & 0x1)
28771		  && value <= 0xffff)
28772		{
28773		  /* Toggle bit[25] to change encoding from T2 to T3.  */
28774		  newval ^= 1 << 25;
28775		  /* Clear bits[19:16].  */
28776		  newval &= 0xfff0ffff;
28777		  /* Encoding high 4bits imm.  Code below will encode the
28778		     remaining low 12bits.  */
28779		  newval |= (value & 0x0000f000) << 4;
28780		  newimm = value & 0x00000fff;
28781		}
28782	    }
28783	}
28784
28785      if (newimm == (unsigned int)FAIL)
28786	{
28787	  as_bad_where (fixP->fx_file, fixP->fx_line,
28788			_("invalid constant (%lx) after fixup"),
28789			(unsigned long) value);
28790	  break;
28791	}
28792
28793      newval |= (newimm & 0x800) << 15;
28794      newval |= (newimm & 0x700) << 4;
28795      newval |= (newimm & 0x0ff);
28796
28797      md_number_to_chars (buf,   (valueT) ((newval >> 16) & 0xffff), THUMB_SIZE);
28798      md_number_to_chars (buf+2, (valueT) (newval & 0xffff), THUMB_SIZE);
28799      break;
28800
28801    case BFD_RELOC_ARM_SMC:
28802      if (value > 0xf)
28803	as_bad_where (fixP->fx_file, fixP->fx_line,
28804		      _("invalid smc expression"));
28805
28806      newval = md_chars_to_number (buf, INSN_SIZE);
28807      newval |= (value & 0xf);
28808      md_number_to_chars (buf, newval, INSN_SIZE);
28809      break;
28810
28811    case BFD_RELOC_ARM_HVC:
28812      if (value > 0xffff)
28813	as_bad_where (fixP->fx_file, fixP->fx_line,
28814		      _("invalid hvc expression"));
28815      newval = md_chars_to_number (buf, INSN_SIZE);
28816      newval |= (value & 0xf) | ((value & 0xfff0) << 4);
28817      md_number_to_chars (buf, newval, INSN_SIZE);
28818      break;
28819
28820    case BFD_RELOC_ARM_SWI:
28821      if (fixP->tc_fix_data != 0)
28822	{
28823	  if (value > 0xff)
28824	    as_bad_where (fixP->fx_file, fixP->fx_line,
28825			  _("invalid swi expression"));
28826	  newval = md_chars_to_number (buf, THUMB_SIZE);
28827	  newval |= value;
28828	  md_number_to_chars (buf, newval, THUMB_SIZE);
28829	}
28830      else
28831	{
28832	  if (value > 0x00ffffff)
28833	    as_bad_where (fixP->fx_file, fixP->fx_line,
28834			  _("invalid swi expression"));
28835	  newval = md_chars_to_number (buf, INSN_SIZE);
28836	  newval |= value;
28837	  md_number_to_chars (buf, newval, INSN_SIZE);
28838	}
28839      break;
28840
28841    case BFD_RELOC_ARM_MULTI:
28842      if (value > 0xffff)
28843	as_bad_where (fixP->fx_file, fixP->fx_line,
28844		      _("invalid expression in load/store multiple"));
28845      newval = value | md_chars_to_number (buf, INSN_SIZE);
28846      md_number_to_chars (buf, newval, INSN_SIZE);
28847      break;
28848
28849#ifdef OBJ_ELF
28850    case BFD_RELOC_ARM_PCREL_CALL:
28851
28852      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28853	  && fixP->fx_addsy
28854	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28855	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28856	  && THUMB_IS_FUNC (fixP->fx_addsy))
28857	/* Flip the bl to blx. This is a simple flip
28858	   bit here because we generate PCREL_CALL for
28859	   unconditional bls.  */
28860	{
28861	  newval = md_chars_to_number (buf, INSN_SIZE);
28862	  newval = newval | 0x10000000;
28863	  md_number_to_chars (buf, newval, INSN_SIZE);
28864	  temp = 1;
28865	  fixP->fx_done = 1;
28866	}
28867      else
28868	temp = 3;
28869      goto arm_branch_common;
28870
28871    case BFD_RELOC_ARM_PCREL_JUMP:
28872      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28873	  && fixP->fx_addsy
28874	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28875	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28876	  && THUMB_IS_FUNC (fixP->fx_addsy))
28877	{
28878	  /* This would map to a bl<cond>, b<cond>,
28879	     b<always> to a Thumb function. We
28880	     need to force a relocation for this particular
28881	     case.  */
28882	  newval = md_chars_to_number (buf, INSN_SIZE);
28883	  fixP->fx_done = 0;
28884	}
28885      /* Fall through.  */
28886
28887    case BFD_RELOC_ARM_PLT32:
28888#endif
28889    case BFD_RELOC_ARM_PCREL_BRANCH:
28890      temp = 3;
28891      goto arm_branch_common;
28892
28893    case BFD_RELOC_ARM_PCREL_BLX:
28894
28895      temp = 1;
28896      if (ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
28897	  && fixP->fx_addsy
28898	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
28899	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
28900	  && ARM_IS_FUNC (fixP->fx_addsy))
28901	{
28902	  /* Flip the blx to a bl and warn.  */
28903	  const char *name = S_GET_NAME (fixP->fx_addsy);
28904	  newval = 0xeb000000;
28905	  as_warn_where (fixP->fx_file, fixP->fx_line,
28906			 _("blx to '%s' an ARM ISA state function changed to bl"),
28907			  name);
28908	  md_number_to_chars (buf, newval, INSN_SIZE);
28909	  temp = 3;
28910	  fixP->fx_done = 1;
28911	}
28912
28913#ifdef OBJ_ELF
28914       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
28915	 fixP->fx_r_type = BFD_RELOC_ARM_PCREL_CALL;
28916#endif
28917
28918    arm_branch_common:
28919      /* We are going to store value (shifted right by two) in the
28920	 instruction, in a 24 bit, signed field.  Bits 26 through 32 either
28921	 all clear or all set and bit 0 must be clear.  For B/BL bit 1 must
28922	 also be clear.  */
28923      if (value & temp)
28924	as_bad_where (fixP->fx_file, fixP->fx_line,
28925		      _("misaligned branch destination"));
28926      if ((value & 0xfe000000) != 0
28927	  && (value & 0xfe000000) != 0xfe000000)
28928	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28929
28930      if (fixP->fx_done || !seg->use_rela_p)
28931	{
28932	  newval = md_chars_to_number (buf, INSN_SIZE);
28933	  newval |= (value >> 2) & 0x00ffffff;
28934	  /* Set the H bit on BLX instructions.  */
28935	  if (temp == 1)
28936	    {
28937	      if (value & 2)
28938		newval |= 0x01000000;
28939	      else
28940		newval &= ~0x01000000;
28941	    }
28942	  md_number_to_chars (buf, newval, INSN_SIZE);
28943	}
28944      break;
28945
28946    case BFD_RELOC_THUMB_PCREL_BRANCH7: /* CBZ */
28947      /* CBZ can only branch forward.  */
28948
28949      /* Attempts to use CBZ to branch to the next instruction
28950	 (which, strictly speaking, are prohibited) will be turned into
28951	 no-ops.
28952
28953	 FIXME: It may be better to remove the instruction completely and
28954	 perform relaxation.  */
28955      if ((offsetT) value == -2)
28956	{
28957	  newval = md_chars_to_number (buf, THUMB_SIZE);
28958	  newval = 0xbf00; /* NOP encoding T1 */
28959	  md_number_to_chars (buf, newval, THUMB_SIZE);
28960	}
28961      else
28962	{
28963	  if (value & ~0x7e)
28964	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28965
28966	  if (fixP->fx_done || !seg->use_rela_p)
28967	    {
28968	      newval = md_chars_to_number (buf, THUMB_SIZE);
28969	      newval |= ((value & 0x3e) << 2) | ((value & 0x40) << 3);
28970	      md_number_to_chars (buf, newval, THUMB_SIZE);
28971	    }
28972	}
28973      break;
28974
28975    case BFD_RELOC_THUMB_PCREL_BRANCH9: /* Conditional branch.	*/
28976      if (out_of_range_p (value, 8))
28977	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28978
28979      if (fixP->fx_done || !seg->use_rela_p)
28980	{
28981	  newval = md_chars_to_number (buf, THUMB_SIZE);
28982	  newval |= (value & 0x1ff) >> 1;
28983	  md_number_to_chars (buf, newval, THUMB_SIZE);
28984	}
28985      break;
28986
28987    case BFD_RELOC_THUMB_PCREL_BRANCH12: /* Unconditional branch.  */
28988      if (out_of_range_p (value, 11))
28989	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
28990
28991      if (fixP->fx_done || !seg->use_rela_p)
28992	{
28993	  newval = md_chars_to_number (buf, THUMB_SIZE);
28994	  newval |= (value & 0xfff) >> 1;
28995	  md_number_to_chars (buf, newval, THUMB_SIZE);
28996	}
28997      break;
28998
28999    /* This relocation is misnamed, it should be BRANCH21.  */
29000    case BFD_RELOC_THUMB_PCREL_BRANCH20:
29001      if (fixP->fx_addsy
29002	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29003	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29004	  && ARM_IS_FUNC (fixP->fx_addsy)
29005	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29006	{
29007	  /* Force a relocation for a branch 20 bits wide.  */
29008	  fixP->fx_done = 0;
29009	}
29010      if (out_of_range_p (value, 20))
29011	as_bad_where (fixP->fx_file, fixP->fx_line,
29012		      _("conditional branch out of range"));
29013
29014      if (fixP->fx_done || !seg->use_rela_p)
29015	{
29016	  offsetT newval2;
29017	  addressT S, J1, J2, lo, hi;
29018
29019	  S  = (value & 0x00100000) >> 20;
29020	  J2 = (value & 0x00080000) >> 19;
29021	  J1 = (value & 0x00040000) >> 18;
29022	  hi = (value & 0x0003f000) >> 12;
29023	  lo = (value & 0x00000ffe) >> 1;
29024
29025	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29026	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29027	  newval  |= (S << 10) | hi;
29028	  newval2 |= (J1 << 13) | (J2 << 11) | lo;
29029	  md_number_to_chars (buf, newval, THUMB_SIZE);
29030	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29031	}
29032      break;
29033
29034    case BFD_RELOC_THUMB_PCREL_BLX:
29035      /* If there is a blx from a thumb state function to
29036	 another thumb function flip this to a bl and warn
29037	 about it.  */
29038
29039      if (fixP->fx_addsy
29040	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29041	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29042	  && THUMB_IS_FUNC (fixP->fx_addsy))
29043	{
29044	  const char *name = S_GET_NAME (fixP->fx_addsy);
29045	  as_warn_where (fixP->fx_file, fixP->fx_line,
29046			 _("blx to Thumb func '%s' from Thumb ISA state changed to bl"),
29047			 name);
29048	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29049	  newval = newval | 0x1000;
29050	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29051	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29052	  fixP->fx_done = 1;
29053	}
29054
29055
29056      goto thumb_bl_common;
29057
29058    case BFD_RELOC_THUMB_PCREL_BRANCH23:
29059      /* A bl from Thumb state ISA to an internal ARM state function
29060	 is converted to a blx.  */
29061      if (fixP->fx_addsy
29062	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29063	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29064	  && ARM_IS_FUNC (fixP->fx_addsy)
29065	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t))
29066	{
29067	  newval = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29068	  newval = newval & ~0x1000;
29069	  md_number_to_chars (buf+THUMB_SIZE, newval, THUMB_SIZE);
29070	  fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BLX;
29071	  fixP->fx_done = 1;
29072	}
29073
29074    thumb_bl_common:
29075
29076      if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29077	/* For a BLX instruction, make sure that the relocation is rounded up
29078	   to a word boundary.  This follows the semantics of the instruction
29079	   which specifies that bit 1 of the target address will come from bit
29080	   1 of the base address.  */
29081	value = (value + 3) & ~ 3;
29082
29083#ifdef OBJ_ELF
29084       if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4
29085	   && fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BLX)
29086	 fixP->fx_r_type = BFD_RELOC_THUMB_PCREL_BRANCH23;
29087#endif
29088
29089      if (out_of_range_p (value, 22))
29090	{
29091	  if (!(ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v6t2)))
29092	    as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29093	  else if (out_of_range_p (value, 24))
29094	    as_bad_where (fixP->fx_file, fixP->fx_line,
29095			  _("Thumb2 branch out of range"));
29096	}
29097
29098      if (fixP->fx_done || !seg->use_rela_p)
29099	encode_thumb2_b_bl_offset (buf, value);
29100
29101      break;
29102
29103    case BFD_RELOC_THUMB_PCREL_BRANCH25:
29104      if (out_of_range_p (value, 24))
29105	as_bad_where (fixP->fx_file, fixP->fx_line, BAD_RANGE);
29106
29107      if (fixP->fx_done || !seg->use_rela_p)
29108	  encode_thumb2_b_bl_offset (buf, value);
29109
29110      break;
29111
29112    case BFD_RELOC_8:
29113      if (fixP->fx_done || !seg->use_rela_p)
29114	*buf = value;
29115      break;
29116
29117    case BFD_RELOC_16:
29118      if (fixP->fx_done || !seg->use_rela_p)
29119	md_number_to_chars (buf, value, 2);
29120      break;
29121
29122#ifdef OBJ_ELF
29123    case BFD_RELOC_ARM_TLS_CALL:
29124    case BFD_RELOC_ARM_THM_TLS_CALL:
29125    case BFD_RELOC_ARM_TLS_DESCSEQ:
29126    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
29127    case BFD_RELOC_ARM_TLS_GOTDESC:
29128    case BFD_RELOC_ARM_TLS_GD32:
29129    case BFD_RELOC_ARM_TLS_LE32:
29130    case BFD_RELOC_ARM_TLS_IE32:
29131    case BFD_RELOC_ARM_TLS_LDM32:
29132    case BFD_RELOC_ARM_TLS_LDO32:
29133      S_SET_THREAD_LOCAL (fixP->fx_addsy);
29134      break;
29135
29136      /* Same handling as above, but with the arm_fdpic guard.  */
29137    case BFD_RELOC_ARM_TLS_GD32_FDPIC:
29138    case BFD_RELOC_ARM_TLS_IE32_FDPIC:
29139    case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
29140      if (arm_fdpic)
29141	{
29142	  S_SET_THREAD_LOCAL (fixP->fx_addsy);
29143	}
29144      else
29145	{
29146	  as_bad_where (fixP->fx_file, fixP->fx_line,
29147			_("Relocation supported only in FDPIC mode"));
29148	}
29149      break;
29150
29151    case BFD_RELOC_ARM_GOT32:
29152    case BFD_RELOC_ARM_GOTOFF:
29153      break;
29154
29155    case BFD_RELOC_ARM_GOT_PREL:
29156      if (fixP->fx_done || !seg->use_rela_p)
29157	md_number_to_chars (buf, value, 4);
29158      break;
29159
29160    case BFD_RELOC_ARM_TARGET2:
29161      /* TARGET2 is not partial-inplace, so we need to write the
29162	 addend here for REL targets, because it won't be written out
29163	 during reloc processing later.  */
29164      if (fixP->fx_done || !seg->use_rela_p)
29165	md_number_to_chars (buf, fixP->fx_offset, 4);
29166      break;
29167
29168      /* Relocations for FDPIC.  */
29169    case BFD_RELOC_ARM_GOTFUNCDESC:
29170    case BFD_RELOC_ARM_GOTOFFFUNCDESC:
29171    case BFD_RELOC_ARM_FUNCDESC:
29172      if (arm_fdpic)
29173	{
29174	  if (fixP->fx_done || !seg->use_rela_p)
29175	    md_number_to_chars (buf, 0, 4);
29176	}
29177      else
29178	{
29179	  as_bad_where (fixP->fx_file, fixP->fx_line,
29180			_("Relocation supported only in FDPIC mode"));
29181      }
29182      break;
29183#endif
29184
29185    case BFD_RELOC_RVA:
29186    case BFD_RELOC_32:
29187    case BFD_RELOC_ARM_TARGET1:
29188    case BFD_RELOC_ARM_ROSEGREL32:
29189    case BFD_RELOC_ARM_SBREL32:
29190    case BFD_RELOC_32_PCREL:
29191#ifdef TE_PE
29192    case BFD_RELOC_32_SECREL:
29193#endif
29194      if (fixP->fx_done || !seg->use_rela_p)
29195#ifdef TE_WINCE
29196	/* For WinCE we only do this for pcrel fixups.  */
29197	if (fixP->fx_done || fixP->fx_pcrel)
29198#endif
29199	  md_number_to_chars (buf, value, 4);
29200      break;
29201
29202#ifdef OBJ_ELF
29203    case BFD_RELOC_ARM_PREL31:
29204      if (fixP->fx_done || !seg->use_rela_p)
29205	{
29206	  newval = md_chars_to_number (buf, 4) & 0x80000000;
29207	  if ((value ^ (value >> 1)) & 0x40000000)
29208	    {
29209	      as_bad_where (fixP->fx_file, fixP->fx_line,
29210			    _("rel31 relocation overflow"));
29211	    }
29212	  newval |= value & 0x7fffffff;
29213	  md_number_to_chars (buf, newval, 4);
29214	}
29215      break;
29216#endif
29217
29218    case BFD_RELOC_ARM_CP_OFF_IMM:
29219    case BFD_RELOC_ARM_T32_CP_OFF_IMM:
29220    case BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM:
29221      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM)
29222	newval = md_chars_to_number (buf, INSN_SIZE);
29223      else
29224	newval = get_thumb32_insn (buf);
29225      if ((newval & 0x0f200f00) == 0x0d000900)
29226	{
29227	  /* This is a fp16 vstr/vldr.  The immediate offset in the mnemonic
29228	     has permitted values that are multiples of 2, in the range -510
29229	     to 510.  */
29230	  if (value + 510 > 510 + 510 || (value & 1))
29231	    as_bad_where (fixP->fx_file, fixP->fx_line,
29232			  _("co-processor offset out of range"));
29233	}
29234      else if ((newval & 0xfe001f80) == 0xec000f80)
29235	{
29236	  if (value + 511 > 512 + 511 || (value & 3))
29237	    as_bad_where (fixP->fx_file, fixP->fx_line,
29238			  _("co-processor offset out of range"));
29239	}
29240      else if (value + 1023 > 1023 + 1023 || (value & 3))
29241	as_bad_where (fixP->fx_file, fixP->fx_line,
29242		      _("co-processor offset out of range"));
29243    cp_off_common:
29244      sign = (offsetT) value > 0;
29245      if ((offsetT) value < 0)
29246	value = -value;
29247      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29248	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29249	newval = md_chars_to_number (buf, INSN_SIZE);
29250      else
29251	newval = get_thumb32_insn (buf);
29252      if (value == 0)
29253	{
29254	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29255	    newval &= 0xffffff80;
29256	  else
29257	    newval &= 0xffffff00;
29258	}
29259      else
29260	{
29261	  if (fixP->fx_r_type == BFD_RELOC_ARM_T32_VLDR_VSTR_OFF_IMM)
29262	    newval &= 0xff7fff80;
29263	  else
29264	    newval &= 0xff7fff00;
29265	  if ((newval & 0x0f200f00) == 0x0d000900)
29266	    {
29267	      /* This is a fp16 vstr/vldr.
29268
29269		 It requires the immediate offset in the instruction is shifted
29270		 left by 1 to be a half-word offset.
29271
29272		 Here, left shift by 1 first, and later right shift by 2
29273		 should get the right offset.  */
29274	      value <<= 1;
29275	    }
29276	  newval |= (value >> 2) | (sign ? INDEX_UP : 0);
29277	}
29278      if (fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
29279	  || fixP->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2)
29280	md_number_to_chars (buf, newval, INSN_SIZE);
29281      else
29282	put_thumb32_insn (buf, newval);
29283      break;
29284
29285    case BFD_RELOC_ARM_CP_OFF_IMM_S2:
29286    case BFD_RELOC_ARM_T32_CP_OFF_IMM_S2:
29287      if (value + 255 > 255 + 255)
29288	as_bad_where (fixP->fx_file, fixP->fx_line,
29289		      _("co-processor offset out of range"));
29290      value *= 4;
29291      goto cp_off_common;
29292
29293    case BFD_RELOC_ARM_THUMB_OFFSET:
29294      newval = md_chars_to_number (buf, THUMB_SIZE);
29295      /* Exactly what ranges, and where the offset is inserted depends
29296	 on the type of instruction, we can establish this from the
29297	 top 4 bits.  */
29298      switch (newval >> 12)
29299	{
29300	case 4: /* PC load.  */
29301	  /* Thumb PC loads are somewhat odd, bit 1 of the PC is
29302	     forced to zero for these loads; md_pcrel_from has already
29303	     compensated for this.  */
29304	  if (value & 3)
29305	    as_bad_where (fixP->fx_file, fixP->fx_line,
29306			  _("invalid offset, target not word aligned (0x%08lX)"),
29307			  (((unsigned long) fixP->fx_frag->fr_address
29308			    + (unsigned long) fixP->fx_where) & ~3)
29309			  + (unsigned long) value);
29310	  else if (get_recorded_alignment (seg) < 2)
29311	    as_warn_where (fixP->fx_file, fixP->fx_line,
29312			   _("section does not have enough alignment to ensure safe PC-relative loads"));
29313
29314	  if (value & ~0x3fc)
29315	    as_bad_where (fixP->fx_file, fixP->fx_line,
29316			  _("invalid offset, value too big (0x%08lX)"),
29317			  (long) value);
29318
29319	  newval |= value >> 2;
29320	  break;
29321
29322	case 9: /* SP load/store.  */
29323	  if (value & ~0x3fc)
29324	    as_bad_where (fixP->fx_file, fixP->fx_line,
29325			  _("invalid offset, value too big (0x%08lX)"),
29326			  (long) value);
29327	  newval |= value >> 2;
29328	  break;
29329
29330	case 6: /* Word load/store.  */
29331	  if (value & ~0x7c)
29332	    as_bad_where (fixP->fx_file, fixP->fx_line,
29333			  _("invalid offset, value too big (0x%08lX)"),
29334			  (long) value);
29335	  newval |= value << 4; /* 6 - 2.  */
29336	  break;
29337
29338	case 7: /* Byte load/store.  */
29339	  if (value & ~0x1f)
29340	    as_bad_where (fixP->fx_file, fixP->fx_line,
29341			  _("invalid offset, value too big (0x%08lX)"),
29342			  (long) value);
29343	  newval |= value << 6;
29344	  break;
29345
29346	case 8: /* Halfword load/store.	 */
29347	  if (value & ~0x3e)
29348	    as_bad_where (fixP->fx_file, fixP->fx_line,
29349			  _("invalid offset, value too big (0x%08lX)"),
29350			  (long) value);
29351	  newval |= value << 5; /* 6 - 1.  */
29352	  break;
29353
29354	default:
29355	  as_bad_where (fixP->fx_file, fixP->fx_line,
29356			"Unable to process relocation for thumb opcode: %lx",
29357			(unsigned long) newval);
29358	  break;
29359	}
29360      md_number_to_chars (buf, newval, THUMB_SIZE);
29361      break;
29362
29363    case BFD_RELOC_ARM_THUMB_ADD:
29364      /* This is a complicated relocation, since we use it for all of
29365	 the following immediate relocations:
29366
29367	    3bit ADD/SUB
29368	    8bit ADD/SUB
29369	    9bit ADD/SUB SP word-aligned
29370	   10bit ADD PC/SP word-aligned
29371
29372	 The type of instruction being processed is encoded in the
29373	 instruction field:
29374
29375	   0x8000  SUB
29376	   0x00F0  Rd
29377	   0x000F  Rs
29378      */
29379      newval = md_chars_to_number (buf, THUMB_SIZE);
29380      {
29381	int rd = (newval >> 4) & 0xf;
29382	int rs = newval & 0xf;
29383	int subtract = !!(newval & 0x8000);
29384
29385	/* Check for HI regs, only very restricted cases allowed:
29386	   Adjusting SP, and using PC or SP to get an address.	*/
29387	if ((rd > 7 && (rd != REG_SP || rs != REG_SP))
29388	    || (rs > 7 && rs != REG_SP && rs != REG_PC))
29389	  as_bad_where (fixP->fx_file, fixP->fx_line,
29390			_("invalid Hi register with immediate"));
29391
29392	/* If value is negative, choose the opposite instruction.  */
29393	if ((offsetT) value < 0)
29394	  {
29395	    value = -value;
29396	    subtract = !subtract;
29397	    if ((offsetT) value < 0)
29398	      as_bad_where (fixP->fx_file, fixP->fx_line,
29399			    _("immediate value out of range"));
29400	  }
29401
29402	if (rd == REG_SP)
29403	  {
29404 	    if (value & ~0x1fc)
29405	      as_bad_where (fixP->fx_file, fixP->fx_line,
29406			    _("invalid immediate for stack address calculation"));
29407	    newval = subtract ? T_OPCODE_SUB_ST : T_OPCODE_ADD_ST;
29408	    newval |= value >> 2;
29409	  }
29410	else if (rs == REG_PC || rs == REG_SP)
29411	  {
29412	    /* PR gas/18541.  If the addition is for a defined symbol
29413	       within range of an ADR instruction then accept it.  */
29414	    if (subtract
29415		&& value == 4
29416		&& fixP->fx_addsy != NULL)
29417	      {
29418		subtract = 0;
29419
29420		if (! S_IS_DEFINED (fixP->fx_addsy)
29421		    || S_GET_SEGMENT (fixP->fx_addsy) != seg
29422		    || S_IS_WEAK (fixP->fx_addsy))
29423		  {
29424		    as_bad_where (fixP->fx_file, fixP->fx_line,
29425				  _("address calculation needs a strongly defined nearby symbol"));
29426		  }
29427		else
29428		  {
29429		    offsetT v = fixP->fx_where + fixP->fx_frag->fr_address;
29430
29431		    /* Round up to the next 4-byte boundary.  */
29432		    if (v & 3)
29433		      v = (v + 3) & ~ 3;
29434		    else
29435		      v += 4;
29436		    v = S_GET_VALUE (fixP->fx_addsy) - v;
29437
29438		    if (v & ~0x3fc)
29439		      {
29440			as_bad_where (fixP->fx_file, fixP->fx_line,
29441				      _("symbol too far away"));
29442		      }
29443		    else
29444		      {
29445			fixP->fx_done = 1;
29446			value = v;
29447		      }
29448		  }
29449	      }
29450
29451	    if (subtract || value & ~0x3fc)
29452	      as_bad_where (fixP->fx_file, fixP->fx_line,
29453			    _("invalid immediate for address calculation (value = 0x%08lX)"),
29454			    (unsigned long) (subtract ? - value : value));
29455	    newval = (rs == REG_PC ? T_OPCODE_ADD_PC : T_OPCODE_ADD_SP);
29456	    newval |= rd << 8;
29457	    newval |= value >> 2;
29458	  }
29459	else if (rs == rd)
29460	  {
29461	    if (value & ~0xff)
29462	      as_bad_where (fixP->fx_file, fixP->fx_line,
29463			    _("immediate value out of range"));
29464	    newval = subtract ? T_OPCODE_SUB_I8 : T_OPCODE_ADD_I8;
29465	    newval |= (rd << 8) | value;
29466	  }
29467	else
29468	  {
29469	    if (value & ~0x7)
29470	      as_bad_where (fixP->fx_file, fixP->fx_line,
29471			    _("immediate value out of range"));
29472	    newval = subtract ? T_OPCODE_SUB_I3 : T_OPCODE_ADD_I3;
29473	    newval |= rd | (rs << 3) | (value << 6);
29474	  }
29475      }
29476      md_number_to_chars (buf, newval, THUMB_SIZE);
29477      break;
29478
29479    case BFD_RELOC_ARM_THUMB_IMM:
29480      newval = md_chars_to_number (buf, THUMB_SIZE);
29481      if (value > 255)
29482	as_bad_where (fixP->fx_file, fixP->fx_line,
29483		      _("invalid immediate: %ld is out of range"),
29484		      (long) value);
29485      newval |= value;
29486      md_number_to_chars (buf, newval, THUMB_SIZE);
29487      break;
29488
29489    case BFD_RELOC_ARM_THUMB_SHIFT:
29490      /* 5bit shift value (0..32).  LSL cannot take 32.	 */
29491      newval = md_chars_to_number (buf, THUMB_SIZE) & 0xf83f;
29492      temp = newval & 0xf800;
29493      if (value > 32 || (value == 32 && temp == T_OPCODE_LSL_I))
29494	as_bad_where (fixP->fx_file, fixP->fx_line,
29495		      _("invalid shift value: %ld"), (long) value);
29496      /* Shifts of zero must be encoded as LSL.	 */
29497      if (value == 0)
29498	newval = (newval & 0x003f) | T_OPCODE_LSL_I;
29499      /* Shifts of 32 are encoded as zero.  */
29500      else if (value == 32)
29501	value = 0;
29502      newval |= value << 6;
29503      md_number_to_chars (buf, newval, THUMB_SIZE);
29504      break;
29505
29506    case BFD_RELOC_VTABLE_INHERIT:
29507    case BFD_RELOC_VTABLE_ENTRY:
29508      fixP->fx_done = 0;
29509      return;
29510
29511    case BFD_RELOC_ARM_MOVW:
29512    case BFD_RELOC_ARM_MOVT:
29513    case BFD_RELOC_ARM_THUMB_MOVW:
29514    case BFD_RELOC_ARM_THUMB_MOVT:
29515      if (fixP->fx_done || !seg->use_rela_p)
29516	{
29517	  /* REL format relocations are limited to a 16-bit addend.  */
29518	  if (!fixP->fx_done)
29519	    {
29520	      if (value + 0x8000 > 0x7fff + 0x8000)
29521		  as_bad_where (fixP->fx_file, fixP->fx_line,
29522				_("offset out of range"));
29523	    }
29524	  else if (fixP->fx_r_type == BFD_RELOC_ARM_MOVT
29525		   || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29526	    {
29527	      value >>= 16;
29528	    }
29529
29530	  if (fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
29531	      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT)
29532	    {
29533	      newval = get_thumb32_insn (buf);
29534	      newval &= 0xfbf08f00;
29535	      newval |= (value & 0xf000) << 4;
29536	      newval |= (value & 0x0800) << 15;
29537	      newval |= (value & 0x0700) << 4;
29538	      newval |= (value & 0x00ff);
29539	      put_thumb32_insn (buf, newval);
29540	    }
29541	  else
29542	    {
29543	      newval = md_chars_to_number (buf, 4);
29544	      newval &= 0xfff0f000;
29545	      newval |= value & 0x0fff;
29546	      newval |= (value & 0xf000) << 4;
29547	      md_number_to_chars (buf, newval, 4);
29548	    }
29549	}
29550      return;
29551
29552   case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
29553   case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
29554   case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
29555   case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
29556      gas_assert (!fixP->fx_done);
29557      {
29558	bfd_vma insn;
29559	bfd_boolean is_mov;
29560	bfd_vma encoded_addend = value;
29561
29562	/* Check that addend can be encoded in instruction.  */
29563	if (!seg->use_rela_p && value > 255)
29564	  as_bad_where (fixP->fx_file, fixP->fx_line,
29565			_("the offset 0x%08lX is not representable"),
29566			(unsigned long) encoded_addend);
29567
29568	/* Extract the instruction.  */
29569	insn = md_chars_to_number (buf, THUMB_SIZE);
29570	is_mov = (insn & 0xf800) == 0x2000;
29571
29572	/* Encode insn.  */
29573	if (is_mov)
29574	  {
29575	    if (!seg->use_rela_p)
29576	      insn |= encoded_addend;
29577	  }
29578	else
29579	  {
29580	    int rd, rs;
29581
29582	    /* Extract the instruction.  */
29583	     /* Encoding is the following
29584		0x8000  SUB
29585		0x00F0  Rd
29586		0x000F  Rs
29587	     */
29588	     /* The following conditions must be true :
29589		- ADD
29590		- Rd == Rs
29591		- Rd <= 7
29592	     */
29593	    rd = (insn >> 4) & 0xf;
29594	    rs = insn & 0xf;
29595	    if ((insn & 0x8000) || (rd != rs) || rd > 7)
29596	      as_bad_where (fixP->fx_file, fixP->fx_line,
29597			_("Unable to process relocation for thumb opcode: %lx"),
29598			(unsigned long) insn);
29599
29600	    /* Encode as ADD immediate8 thumb 1 code.  */
29601	    insn = 0x3000 | (rd << 8);
29602
29603	    /* Place the encoded addend into the first 8 bits of the
29604	       instruction.  */
29605	    if (!seg->use_rela_p)
29606	      insn |= encoded_addend;
29607	  }
29608
29609	/* Update the instruction.  */
29610	md_number_to_chars (buf, insn, THUMB_SIZE);
29611      }
29612      break;
29613
29614   case BFD_RELOC_ARM_ALU_PC_G0_NC:
29615   case BFD_RELOC_ARM_ALU_PC_G0:
29616   case BFD_RELOC_ARM_ALU_PC_G1_NC:
29617   case BFD_RELOC_ARM_ALU_PC_G1:
29618   case BFD_RELOC_ARM_ALU_PC_G2:
29619   case BFD_RELOC_ARM_ALU_SB_G0_NC:
29620   case BFD_RELOC_ARM_ALU_SB_G0:
29621   case BFD_RELOC_ARM_ALU_SB_G1_NC:
29622   case BFD_RELOC_ARM_ALU_SB_G1:
29623   case BFD_RELOC_ARM_ALU_SB_G2:
29624     gas_assert (!fixP->fx_done);
29625     if (!seg->use_rela_p)
29626       {
29627	 bfd_vma insn;
29628	 bfd_vma encoded_addend;
29629	 bfd_vma addend_abs = llabs ((offsetT) value);
29630
29631	 /* Check that the absolute value of the addend can be
29632	    expressed as an 8-bit constant plus a rotation.  */
29633	 encoded_addend = encode_arm_immediate (addend_abs);
29634	 if (encoded_addend == (unsigned int) FAIL)
29635	   as_bad_where (fixP->fx_file, fixP->fx_line,
29636			 _("the offset 0x%08lX is not representable"),
29637			 (unsigned long) addend_abs);
29638
29639	 /* Extract the instruction.  */
29640	 insn = md_chars_to_number (buf, INSN_SIZE);
29641
29642	 /* If the addend is positive, use an ADD instruction.
29643	    Otherwise use a SUB.  Take care not to destroy the S bit.  */
29644	 insn &= 0xff1fffff;
29645	 if ((offsetT) value < 0)
29646	   insn |= 1 << 22;
29647	 else
29648	   insn |= 1 << 23;
29649
29650	 /* Place the encoded addend into the first 12 bits of the
29651	    instruction.  */
29652	 insn &= 0xfffff000;
29653	 insn |= encoded_addend;
29654
29655	 /* Update the instruction.  */
29656	 md_number_to_chars (buf, insn, INSN_SIZE);
29657       }
29658     break;
29659
29660    case BFD_RELOC_ARM_LDR_PC_G0:
29661    case BFD_RELOC_ARM_LDR_PC_G1:
29662    case BFD_RELOC_ARM_LDR_PC_G2:
29663    case BFD_RELOC_ARM_LDR_SB_G0:
29664    case BFD_RELOC_ARM_LDR_SB_G1:
29665    case BFD_RELOC_ARM_LDR_SB_G2:
29666      gas_assert (!fixP->fx_done);
29667      if (!seg->use_rela_p)
29668	{
29669	  bfd_vma insn;
29670	  bfd_vma addend_abs = llabs ((offsetT) value);
29671
29672	  /* Check that the absolute value of the addend can be
29673	     encoded in 12 bits.  */
29674	  if (addend_abs >= 0x1000)
29675	    as_bad_where (fixP->fx_file, fixP->fx_line,
29676			  _("bad offset 0x%08lX (only 12 bits available for the magnitude)"),
29677			  (unsigned long) addend_abs);
29678
29679	  /* Extract the instruction.  */
29680	  insn = md_chars_to_number (buf, INSN_SIZE);
29681
29682	  /* If the addend is negative, clear bit 23 of the instruction.
29683	     Otherwise set it.  */
29684	  if ((offsetT) value < 0)
29685	    insn &= ~(1 << 23);
29686	  else
29687	    insn |= 1 << 23;
29688
29689	  /* Place the absolute value of the addend into the first 12 bits
29690	     of the instruction.  */
29691	  insn &= 0xfffff000;
29692	  insn |= addend_abs;
29693
29694	  /* Update the instruction.  */
29695	  md_number_to_chars (buf, insn, INSN_SIZE);
29696	}
29697      break;
29698
29699    case BFD_RELOC_ARM_LDRS_PC_G0:
29700    case BFD_RELOC_ARM_LDRS_PC_G1:
29701    case BFD_RELOC_ARM_LDRS_PC_G2:
29702    case BFD_RELOC_ARM_LDRS_SB_G0:
29703    case BFD_RELOC_ARM_LDRS_SB_G1:
29704    case BFD_RELOC_ARM_LDRS_SB_G2:
29705      gas_assert (!fixP->fx_done);
29706      if (!seg->use_rela_p)
29707	{
29708	  bfd_vma insn;
29709	  bfd_vma addend_abs = llabs ((offsetT) value);
29710
29711	  /* Check that the absolute value of the addend can be
29712	     encoded in 8 bits.  */
29713	  if (addend_abs >= 0x100)
29714	    as_bad_where (fixP->fx_file, fixP->fx_line,
29715			  _("bad offset 0x%08lX (only 8 bits available for the magnitude)"),
29716			  (unsigned long) addend_abs);
29717
29718	  /* Extract the instruction.  */
29719	  insn = md_chars_to_number (buf, INSN_SIZE);
29720
29721	  /* If the addend is negative, clear bit 23 of the instruction.
29722	     Otherwise set it.  */
29723	  if ((offsetT) value < 0)
29724	    insn &= ~(1 << 23);
29725	  else
29726	    insn |= 1 << 23;
29727
29728	  /* Place the first four bits of the absolute value of the addend
29729	     into the first 4 bits of the instruction, and the remaining
29730	     four into bits 8 .. 11.  */
29731	  insn &= 0xfffff0f0;
29732	  insn |= (addend_abs & 0xf) | ((addend_abs & 0xf0) << 4);
29733
29734	  /* Update the instruction.  */
29735	  md_number_to_chars (buf, insn, INSN_SIZE);
29736	}
29737      break;
29738
29739    case BFD_RELOC_ARM_LDC_PC_G0:
29740    case BFD_RELOC_ARM_LDC_PC_G1:
29741    case BFD_RELOC_ARM_LDC_PC_G2:
29742    case BFD_RELOC_ARM_LDC_SB_G0:
29743    case BFD_RELOC_ARM_LDC_SB_G1:
29744    case BFD_RELOC_ARM_LDC_SB_G2:
29745      gas_assert (!fixP->fx_done);
29746      if (!seg->use_rela_p)
29747	{
29748	  bfd_vma insn;
29749	  bfd_vma addend_abs = llabs ((offsetT) value);
29750
29751	  /* Check that the absolute value of the addend is a multiple of
29752	     four and, when divided by four, fits in 8 bits.  */
29753	  if (addend_abs & 0x3)
29754	    as_bad_where (fixP->fx_file, fixP->fx_line,
29755			  _("bad offset 0x%08lX (must be word-aligned)"),
29756			  (unsigned long) addend_abs);
29757
29758	  if ((addend_abs >> 2) > 0xff)
29759	    as_bad_where (fixP->fx_file, fixP->fx_line,
29760			  _("bad offset 0x%08lX (must be an 8-bit number of words)"),
29761			  (unsigned long) addend_abs);
29762
29763	  /* Extract the instruction.  */
29764	  insn = md_chars_to_number (buf, INSN_SIZE);
29765
29766	  /* If the addend is negative, clear bit 23 of the instruction.
29767	     Otherwise set it.  */
29768	  if ((offsetT) value < 0)
29769	    insn &= ~(1 << 23);
29770	  else
29771	    insn |= 1 << 23;
29772
29773	  /* Place the addend (divided by four) into the first eight
29774	     bits of the instruction.  */
29775	  insn &= 0xfffffff0;
29776	  insn |= addend_abs >> 2;
29777
29778	  /* Update the instruction.  */
29779	  md_number_to_chars (buf, insn, INSN_SIZE);
29780	}
29781      break;
29782
29783    case BFD_RELOC_THUMB_PCREL_BRANCH5:
29784      if (fixP->fx_addsy
29785	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29786	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29787	  && ARM_IS_FUNC (fixP->fx_addsy)
29788	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29789	{
29790	  /* Force a relocation for a branch 5 bits wide.  */
29791	  fixP->fx_done = 0;
29792	}
29793      if (v8_1_branch_value_check (value, 5, FALSE) == FAIL)
29794	as_bad_where (fixP->fx_file, fixP->fx_line,
29795		      BAD_BRANCH_OFF);
29796
29797      if (fixP->fx_done || !seg->use_rela_p)
29798	{
29799	  addressT boff = value >> 1;
29800
29801	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29802	  newval |= (boff << 7);
29803	  md_number_to_chars (buf, newval, THUMB_SIZE);
29804	}
29805      break;
29806
29807    case BFD_RELOC_THUMB_PCREL_BFCSEL:
29808      if (fixP->fx_addsy
29809	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29810	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29811	  && ARM_IS_FUNC (fixP->fx_addsy)
29812	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29813	{
29814	  fixP->fx_done = 0;
29815	}
29816      if ((value & ~0x7f) && ((value & ~0x3f) != (valueT) ~0x3f))
29817	as_bad_where (fixP->fx_file, fixP->fx_line,
29818		      _("branch out of range"));
29819
29820      if (fixP->fx_done || !seg->use_rela_p)
29821	{
29822	  newval  = md_chars_to_number (buf, THUMB_SIZE);
29823
29824	  addressT boff = ((newval & 0x0780) >> 7) << 1;
29825	  addressT diff = value - boff;
29826
29827	  if (diff == 4)
29828	    {
29829	      newval |= 1 << 1; /* T bit.  */
29830	    }
29831	  else if (diff != 2)
29832	    {
29833	      as_bad_where (fixP->fx_file, fixP->fx_line,
29834			    _("out of range label-relative fixup value"));
29835	    }
29836	  md_number_to_chars (buf, newval, THUMB_SIZE);
29837	}
29838      break;
29839
29840    case BFD_RELOC_ARM_THUMB_BF17:
29841      if (fixP->fx_addsy
29842	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29843	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29844	  && ARM_IS_FUNC (fixP->fx_addsy)
29845	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29846	{
29847	  /* Force a relocation for a branch 17 bits wide.  */
29848	  fixP->fx_done = 0;
29849	}
29850
29851      if (v8_1_branch_value_check (value, 17, TRUE) == FAIL)
29852	as_bad_where (fixP->fx_file, fixP->fx_line,
29853		      BAD_BRANCH_OFF);
29854
29855      if (fixP->fx_done || !seg->use_rela_p)
29856	{
29857	  offsetT newval2;
29858	  addressT immA, immB, immC;
29859
29860	  immA = (value & 0x0001f000) >> 12;
29861	  immB = (value & 0x00000ffc) >> 2;
29862	  immC = (value & 0x00000002) >> 1;
29863
29864	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29865	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29866	  newval  |= immA;
29867	  newval2 |= (immC << 11) | (immB << 1);
29868	  md_number_to_chars (buf, newval, THUMB_SIZE);
29869	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29870	}
29871      break;
29872
29873    case BFD_RELOC_ARM_THUMB_BF19:
29874      if (fixP->fx_addsy
29875	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29876	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29877	  && ARM_IS_FUNC (fixP->fx_addsy)
29878	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29879	{
29880	  /* Force a relocation for a branch 19 bits wide.  */
29881	  fixP->fx_done = 0;
29882	}
29883
29884      if (v8_1_branch_value_check (value, 19, TRUE) == FAIL)
29885	as_bad_where (fixP->fx_file, fixP->fx_line,
29886		      BAD_BRANCH_OFF);
29887
29888      if (fixP->fx_done || !seg->use_rela_p)
29889	{
29890	  offsetT newval2;
29891	  addressT immA, immB, immC;
29892
29893	  immA = (value & 0x0007f000) >> 12;
29894	  immB = (value & 0x00000ffc) >> 2;
29895	  immC = (value & 0x00000002) >> 1;
29896
29897	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29898	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29899	  newval  |= immA;
29900	  newval2 |= (immC << 11) | (immB << 1);
29901	  md_number_to_chars (buf, newval, THUMB_SIZE);
29902	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29903	}
29904      break;
29905
29906    case BFD_RELOC_ARM_THUMB_BF13:
29907      if (fixP->fx_addsy
29908	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29909	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29910	  && ARM_IS_FUNC (fixP->fx_addsy)
29911	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29912	{
29913	  /* Force a relocation for a branch 13 bits wide.  */
29914	  fixP->fx_done = 0;
29915	}
29916
29917      if (v8_1_branch_value_check (value, 13, TRUE) == FAIL)
29918	as_bad_where (fixP->fx_file, fixP->fx_line,
29919		      BAD_BRANCH_OFF);
29920
29921      if (fixP->fx_done || !seg->use_rela_p)
29922	{
29923	  offsetT newval2;
29924	  addressT immA, immB, immC;
29925
29926	  immA = (value & 0x00001000) >> 12;
29927	  immB = (value & 0x00000ffc) >> 2;
29928	  immC = (value & 0x00000002) >> 1;
29929
29930	  newval   = md_chars_to_number (buf, THUMB_SIZE);
29931	  newval2  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29932	  newval  |= immA;
29933	  newval2 |= (immC << 11) | (immB << 1);
29934	  md_number_to_chars (buf, newval, THUMB_SIZE);
29935	  md_number_to_chars (buf + THUMB_SIZE, newval2, THUMB_SIZE);
29936	}
29937      break;
29938
29939    case BFD_RELOC_ARM_THUMB_LOOP12:
29940      if (fixP->fx_addsy
29941	  && (S_GET_SEGMENT (fixP->fx_addsy) == seg)
29942	  && !S_FORCE_RELOC (fixP->fx_addsy, TRUE)
29943	  && ARM_IS_FUNC (fixP->fx_addsy)
29944	  && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v8_1m_main))
29945	{
29946	  /* Force a relocation for a branch 12 bits wide.  */
29947	  fixP->fx_done = 0;
29948	}
29949
29950      bfd_vma insn = get_thumb32_insn (buf);
29951      /* le lr, <label>, le <label> or letp lr, <label> */
29952      if (((insn & 0xffffffff) == 0xf00fc001)
29953	  || ((insn & 0xffffffff) == 0xf02fc001)
29954	  || ((insn & 0xffffffff) == 0xf01fc001))
29955	value = -value;
29956
29957      if (v8_1_branch_value_check (value, 12, FALSE) == FAIL)
29958	as_bad_where (fixP->fx_file, fixP->fx_line,
29959		      BAD_BRANCH_OFF);
29960      if (fixP->fx_done || !seg->use_rela_p)
29961	{
29962	  addressT imml, immh;
29963
29964	  immh = (value & 0x00000ffc) >> 2;
29965	  imml = (value & 0x00000002) >> 1;
29966
29967	  newval  = md_chars_to_number (buf + THUMB_SIZE, THUMB_SIZE);
29968	  newval |= (imml << 11) | (immh << 1);
29969	  md_number_to_chars (buf + THUMB_SIZE, newval, THUMB_SIZE);
29970	}
29971      break;
29972
29973    case BFD_RELOC_ARM_V4BX:
29974      /* This will need to go in the object file.  */
29975      fixP->fx_done = 0;
29976      break;
29977
29978    case BFD_RELOC_UNUSED:
29979    default:
29980      as_bad_where (fixP->fx_file, fixP->fx_line,
29981		    _("bad relocation fixup type (%d)"), fixP->fx_r_type);
29982    }
29983}
29984
29985/* Translate internal representation of relocation info to BFD target
29986   format.  */
29987
29988arelent *
29989tc_gen_reloc (asection *section, fixS *fixp)
29990{
29991  arelent * reloc;
29992  bfd_reloc_code_real_type code;
29993
29994  reloc = XNEW (arelent);
29995
29996  reloc->sym_ptr_ptr = XNEW (asymbol *);
29997  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
29998  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
29999
30000  if (fixp->fx_pcrel)
30001    {
30002      if (section->use_rela_p)
30003	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
30004      else
30005	fixp->fx_offset = reloc->address;
30006    }
30007  reloc->addend = fixp->fx_offset;
30008
30009  switch (fixp->fx_r_type)
30010    {
30011    case BFD_RELOC_8:
30012      if (fixp->fx_pcrel)
30013	{
30014	  code = BFD_RELOC_8_PCREL;
30015	  break;
30016	}
30017      /* Fall through.  */
30018
30019    case BFD_RELOC_16:
30020      if (fixp->fx_pcrel)
30021	{
30022	  code = BFD_RELOC_16_PCREL;
30023	  break;
30024	}
30025      /* Fall through.  */
30026
30027    case BFD_RELOC_32:
30028      if (fixp->fx_pcrel)
30029	{
30030	  code = BFD_RELOC_32_PCREL;
30031	  break;
30032	}
30033      /* Fall through.  */
30034
30035    case BFD_RELOC_ARM_MOVW:
30036      if (fixp->fx_pcrel)
30037	{
30038	  code = BFD_RELOC_ARM_MOVW_PCREL;
30039	  break;
30040	}
30041      /* Fall through.  */
30042
30043    case BFD_RELOC_ARM_MOVT:
30044      if (fixp->fx_pcrel)
30045	{
30046	  code = BFD_RELOC_ARM_MOVT_PCREL;
30047	  break;
30048	}
30049      /* Fall through.  */
30050
30051    case BFD_RELOC_ARM_THUMB_MOVW:
30052      if (fixp->fx_pcrel)
30053	{
30054	  code = BFD_RELOC_ARM_THUMB_MOVW_PCREL;
30055	  break;
30056	}
30057      /* Fall through.  */
30058
30059    case BFD_RELOC_ARM_THUMB_MOVT:
30060      if (fixp->fx_pcrel)
30061	{
30062	  code = BFD_RELOC_ARM_THUMB_MOVT_PCREL;
30063	  break;
30064	}
30065      /* Fall through.  */
30066
30067    case BFD_RELOC_NONE:
30068    case BFD_RELOC_ARM_PCREL_BRANCH:
30069    case BFD_RELOC_ARM_PCREL_BLX:
30070    case BFD_RELOC_RVA:
30071    case BFD_RELOC_THUMB_PCREL_BRANCH7:
30072    case BFD_RELOC_THUMB_PCREL_BRANCH9:
30073    case BFD_RELOC_THUMB_PCREL_BRANCH12:
30074    case BFD_RELOC_THUMB_PCREL_BRANCH20:
30075    case BFD_RELOC_THUMB_PCREL_BRANCH23:
30076    case BFD_RELOC_THUMB_PCREL_BRANCH25:
30077    case BFD_RELOC_VTABLE_ENTRY:
30078    case BFD_RELOC_VTABLE_INHERIT:
30079#ifdef TE_PE
30080    case BFD_RELOC_32_SECREL:
30081#endif
30082      code = fixp->fx_r_type;
30083      break;
30084
30085    case BFD_RELOC_THUMB_PCREL_BLX:
30086#ifdef OBJ_ELF
30087      if (EF_ARM_EABI_VERSION (meabi_flags) >= EF_ARM_EABI_VER4)
30088	code = BFD_RELOC_THUMB_PCREL_BRANCH23;
30089      else
30090#endif
30091	code = BFD_RELOC_THUMB_PCREL_BLX;
30092      break;
30093
30094    case BFD_RELOC_ARM_LITERAL:
30095    case BFD_RELOC_ARM_HWLITERAL:
30096      /* If this is called then the a literal has
30097	 been referenced across a section boundary.  */
30098      as_bad_where (fixp->fx_file, fixp->fx_line,
30099		    _("literal referenced across section boundary"));
30100      return NULL;
30101
30102#ifdef OBJ_ELF
30103    case BFD_RELOC_ARM_TLS_CALL:
30104    case BFD_RELOC_ARM_THM_TLS_CALL:
30105    case BFD_RELOC_ARM_TLS_DESCSEQ:
30106    case BFD_RELOC_ARM_THM_TLS_DESCSEQ:
30107    case BFD_RELOC_ARM_GOT32:
30108    case BFD_RELOC_ARM_GOTOFF:
30109    case BFD_RELOC_ARM_GOT_PREL:
30110    case BFD_RELOC_ARM_PLT32:
30111    case BFD_RELOC_ARM_TARGET1:
30112    case BFD_RELOC_ARM_ROSEGREL32:
30113    case BFD_RELOC_ARM_SBREL32:
30114    case BFD_RELOC_ARM_PREL31:
30115    case BFD_RELOC_ARM_TARGET2:
30116    case BFD_RELOC_ARM_TLS_LDO32:
30117    case BFD_RELOC_ARM_PCREL_CALL:
30118    case BFD_RELOC_ARM_PCREL_JUMP:
30119    case BFD_RELOC_ARM_ALU_PC_G0_NC:
30120    case BFD_RELOC_ARM_ALU_PC_G0:
30121    case BFD_RELOC_ARM_ALU_PC_G1_NC:
30122    case BFD_RELOC_ARM_ALU_PC_G1:
30123    case BFD_RELOC_ARM_ALU_PC_G2:
30124    case BFD_RELOC_ARM_LDR_PC_G0:
30125    case BFD_RELOC_ARM_LDR_PC_G1:
30126    case BFD_RELOC_ARM_LDR_PC_G2:
30127    case BFD_RELOC_ARM_LDRS_PC_G0:
30128    case BFD_RELOC_ARM_LDRS_PC_G1:
30129    case BFD_RELOC_ARM_LDRS_PC_G2:
30130    case BFD_RELOC_ARM_LDC_PC_G0:
30131    case BFD_RELOC_ARM_LDC_PC_G1:
30132    case BFD_RELOC_ARM_LDC_PC_G2:
30133    case BFD_RELOC_ARM_ALU_SB_G0_NC:
30134    case BFD_RELOC_ARM_ALU_SB_G0:
30135    case BFD_RELOC_ARM_ALU_SB_G1_NC:
30136    case BFD_RELOC_ARM_ALU_SB_G1:
30137    case BFD_RELOC_ARM_ALU_SB_G2:
30138    case BFD_RELOC_ARM_LDR_SB_G0:
30139    case BFD_RELOC_ARM_LDR_SB_G1:
30140    case BFD_RELOC_ARM_LDR_SB_G2:
30141    case BFD_RELOC_ARM_LDRS_SB_G0:
30142    case BFD_RELOC_ARM_LDRS_SB_G1:
30143    case BFD_RELOC_ARM_LDRS_SB_G2:
30144    case BFD_RELOC_ARM_LDC_SB_G0:
30145    case BFD_RELOC_ARM_LDC_SB_G1:
30146    case BFD_RELOC_ARM_LDC_SB_G2:
30147    case BFD_RELOC_ARM_V4BX:
30148    case BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC:
30149    case BFD_RELOC_ARM_THUMB_ALU_ABS_G1_NC:
30150    case BFD_RELOC_ARM_THUMB_ALU_ABS_G2_NC:
30151    case BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC:
30152    case BFD_RELOC_ARM_GOTFUNCDESC:
30153    case BFD_RELOC_ARM_GOTOFFFUNCDESC:
30154    case BFD_RELOC_ARM_FUNCDESC:
30155    case BFD_RELOC_ARM_THUMB_BF17:
30156    case BFD_RELOC_ARM_THUMB_BF19:
30157    case BFD_RELOC_ARM_THUMB_BF13:
30158      code = fixp->fx_r_type;
30159      break;
30160
30161    case BFD_RELOC_ARM_TLS_GOTDESC:
30162    case BFD_RELOC_ARM_TLS_GD32:
30163    case BFD_RELOC_ARM_TLS_GD32_FDPIC:
30164    case BFD_RELOC_ARM_TLS_LE32:
30165    case BFD_RELOC_ARM_TLS_IE32:
30166    case BFD_RELOC_ARM_TLS_IE32_FDPIC:
30167    case BFD_RELOC_ARM_TLS_LDM32:
30168    case BFD_RELOC_ARM_TLS_LDM32_FDPIC:
30169      /* BFD will include the symbol's address in the addend.
30170	 But we don't want that, so subtract it out again here.  */
30171      if (!S_IS_COMMON (fixp->fx_addsy))
30172	reloc->addend -= (*reloc->sym_ptr_ptr)->value;
30173      code = fixp->fx_r_type;
30174      break;
30175#endif
30176
30177    case BFD_RELOC_ARM_IMMEDIATE:
30178      as_bad_where (fixp->fx_file, fixp->fx_line,
30179		    _("internal relocation (type: IMMEDIATE) not fixed up"));
30180      return NULL;
30181
30182    case BFD_RELOC_ARM_ADRL_IMMEDIATE:
30183      as_bad_where (fixp->fx_file, fixp->fx_line,
30184		    _("ADRL used for a symbol not defined in the same file"));
30185      return NULL;
30186
30187    case BFD_RELOC_THUMB_PCREL_BRANCH5:
30188    case BFD_RELOC_THUMB_PCREL_BFCSEL:
30189    case BFD_RELOC_ARM_THUMB_LOOP12:
30190      as_bad_where (fixp->fx_file, fixp->fx_line,
30191		    _("%s used for a symbol not defined in the same file"),
30192		    bfd_get_reloc_code_name (fixp->fx_r_type));
30193      return NULL;
30194
30195    case BFD_RELOC_ARM_OFFSET_IMM:
30196      if (section->use_rela_p)
30197	{
30198	  code = fixp->fx_r_type;
30199	  break;
30200	}
30201
30202      if (fixp->fx_addsy != NULL
30203	  && !S_IS_DEFINED (fixp->fx_addsy)
30204	  && S_IS_LOCAL (fixp->fx_addsy))
30205	{
30206	  as_bad_where (fixp->fx_file, fixp->fx_line,
30207			_("undefined local label `%s'"),
30208			S_GET_NAME (fixp->fx_addsy));
30209	  return NULL;
30210	}
30211
30212      as_bad_where (fixp->fx_file, fixp->fx_line,
30213		    _("internal_relocation (type: OFFSET_IMM) not fixed up"));
30214      return NULL;
30215
30216    default:
30217      {
30218	const char * type;
30219
30220	switch (fixp->fx_r_type)
30221	  {
30222	  case BFD_RELOC_NONE:		   type = "NONE";	  break;
30223	  case BFD_RELOC_ARM_OFFSET_IMM8:  type = "OFFSET_IMM8";  break;
30224	  case BFD_RELOC_ARM_SHIFT_IMM:	   type = "SHIFT_IMM";	  break;
30225	  case BFD_RELOC_ARM_SMC:	   type = "SMC";	  break;
30226	  case BFD_RELOC_ARM_SWI:	   type = "SWI";	  break;
30227	  case BFD_RELOC_ARM_MULTI:	   type = "MULTI";	  break;
30228	  case BFD_RELOC_ARM_CP_OFF_IMM:   type = "CP_OFF_IMM";	  break;
30229	  case BFD_RELOC_ARM_T32_OFFSET_IMM: type = "T32_OFFSET_IMM"; break;
30230	  case BFD_RELOC_ARM_T32_CP_OFF_IMM: type = "T32_CP_OFF_IMM"; break;
30231	  case BFD_RELOC_ARM_THUMB_ADD:	   type = "THUMB_ADD";	  break;
30232	  case BFD_RELOC_ARM_THUMB_SHIFT:  type = "THUMB_SHIFT";  break;
30233	  case BFD_RELOC_ARM_THUMB_IMM:	   type = "THUMB_IMM";	  break;
30234	  case BFD_RELOC_ARM_THUMB_OFFSET: type = "THUMB_OFFSET"; break;
30235	  default:			   type = _("<unknown>"); break;
30236	  }
30237	as_bad_where (fixp->fx_file, fixp->fx_line,
30238		      _("cannot represent %s relocation in this object file format"),
30239		      type);
30240	return NULL;
30241      }
30242    }
30243
30244#ifdef OBJ_ELF
30245  if ((code == BFD_RELOC_32_PCREL || code == BFD_RELOC_32)
30246      && GOT_symbol
30247      && fixp->fx_addsy == GOT_symbol)
30248    {
30249      code = BFD_RELOC_ARM_GOTPC;
30250      reloc->addend = fixp->fx_offset = reloc->address;
30251    }
30252#endif
30253
30254  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
30255
30256  if (reloc->howto == NULL)
30257    {
30258      as_bad_where (fixp->fx_file, fixp->fx_line,
30259		    _("cannot represent %s relocation in this object file format"),
30260		    bfd_get_reloc_code_name (code));
30261      return NULL;
30262    }
30263
30264  /* HACK: Since arm ELF uses Rel instead of Rela, encode the
30265     vtable entry to be used in the relocation's section offset.  */
30266  if (fixp->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30267    reloc->address = fixp->fx_offset;
30268
30269  return reloc;
30270}
30271
30272/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
30273
30274void
30275cons_fix_new_arm (fragS *	frag,
30276		  int		where,
30277		  int		size,
30278		  expressionS * exp,
30279		  bfd_reloc_code_real_type reloc)
30280{
30281  int pcrel = 0;
30282
30283  /* Pick a reloc.
30284     FIXME: @@ Should look at CPU word size.  */
30285  switch (size)
30286    {
30287    case 1:
30288      reloc = BFD_RELOC_8;
30289      break;
30290    case 2:
30291      reloc = BFD_RELOC_16;
30292      break;
30293    case 4:
30294    default:
30295      reloc = BFD_RELOC_32;
30296      break;
30297    case 8:
30298      reloc = BFD_RELOC_64;
30299      break;
30300    }
30301
30302#ifdef TE_PE
30303  if (exp->X_op == O_secrel)
30304  {
30305    exp->X_op = O_symbol;
30306    reloc = BFD_RELOC_32_SECREL;
30307  }
30308#endif
30309
30310  fix_new_exp (frag, where, size, exp, pcrel, reloc);
30311}
30312
30313#if defined (OBJ_COFF)
30314void
30315arm_validate_fix (fixS * fixP)
30316{
30317  /* If the destination of the branch is a defined symbol which does not have
30318     the THUMB_FUNC attribute, then we must be calling a function which has
30319     the (interfacearm) attribute.  We look for the Thumb entry point to that
30320     function and change the branch to refer to that function instead.	*/
30321  if (fixP->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH23
30322      && fixP->fx_addsy != NULL
30323      && S_IS_DEFINED (fixP->fx_addsy)
30324      && ! THUMB_IS_FUNC (fixP->fx_addsy))
30325    {
30326      fixP->fx_addsy = find_real_start (fixP->fx_addsy);
30327    }
30328}
30329#endif
30330
30331
30332int
30333arm_force_relocation (struct fix * fixp)
30334{
30335#if defined (OBJ_COFF) && defined (TE_PE)
30336  if (fixp->fx_r_type == BFD_RELOC_RVA)
30337    return 1;
30338#endif
30339
30340  /* In case we have a call or a branch to a function in ARM ISA mode from
30341     a thumb function or vice-versa force the relocation. These relocations
30342     are cleared off for some cores that might have blx and simple transformations
30343     are possible.  */
30344
30345#ifdef OBJ_ELF
30346  switch (fixp->fx_r_type)
30347    {
30348    case BFD_RELOC_ARM_PCREL_JUMP:
30349    case BFD_RELOC_ARM_PCREL_CALL:
30350    case BFD_RELOC_THUMB_PCREL_BLX:
30351      if (THUMB_IS_FUNC (fixp->fx_addsy))
30352	return 1;
30353      break;
30354
30355    case BFD_RELOC_ARM_PCREL_BLX:
30356    case BFD_RELOC_THUMB_PCREL_BRANCH25:
30357    case BFD_RELOC_THUMB_PCREL_BRANCH20:
30358    case BFD_RELOC_THUMB_PCREL_BRANCH23:
30359      if (ARM_IS_FUNC (fixp->fx_addsy))
30360	return 1;
30361      break;
30362
30363    default:
30364      break;
30365    }
30366#endif
30367
30368  /* Resolve these relocations even if the symbol is extern or weak.
30369     Technically this is probably wrong due to symbol preemption.
30370     In practice these relocations do not have enough range to be useful
30371     at dynamic link time, and some code (e.g. in the Linux kernel)
30372     expects these references to be resolved.  */
30373  if (fixp->fx_r_type == BFD_RELOC_ARM_IMMEDIATE
30374      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM
30375      || fixp->fx_r_type == BFD_RELOC_ARM_OFFSET_IMM8
30376      || fixp->fx_r_type == BFD_RELOC_ARM_ADRL_IMMEDIATE
30377      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM
30378      || fixp->fx_r_type == BFD_RELOC_ARM_CP_OFF_IMM_S2
30379      || fixp->fx_r_type == BFD_RELOC_ARM_THUMB_OFFSET
30380      || fixp->fx_r_type == BFD_RELOC_THUMB_PCREL_BRANCH12
30381      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_IMM
30382      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMMEDIATE
30383      || fixp->fx_r_type == BFD_RELOC_ARM_T32_IMM12
30384      || fixp->fx_r_type == BFD_RELOC_ARM_T32_OFFSET_IMM
30385      || fixp->fx_r_type == BFD_RELOC_ARM_T32_ADD_PC12
30386      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM
30387      || fixp->fx_r_type == BFD_RELOC_ARM_T32_CP_OFF_IMM_S2)
30388    return 0;
30389
30390  /* Always leave these relocations for the linker.  */
30391  if ((fixp->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30392       && fixp->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30393      || fixp->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30394    return 1;
30395
30396  /* Always generate relocations against function symbols.  */
30397  if (fixp->fx_r_type == BFD_RELOC_32
30398      && fixp->fx_addsy
30399      && (symbol_get_bfdsym (fixp->fx_addsy)->flags & BSF_FUNCTION))
30400    return 1;
30401
30402  return generic_force_reloc (fixp);
30403}
30404
30405#if defined (OBJ_ELF) || defined (OBJ_COFF)
30406/* Relocations against function names must be left unadjusted,
30407   so that the linker can use this information to generate interworking
30408   stubs.  The MIPS version of this function
30409   also prevents relocations that are mips-16 specific, but I do not
30410   know why it does this.
30411
30412   FIXME:
30413   There is one other problem that ought to be addressed here, but
30414   which currently is not:  Taking the address of a label (rather
30415   than a function) and then later jumping to that address.  Such
30416   addresses also ought to have their bottom bit set (assuming that
30417   they reside in Thumb code), but at the moment they will not.	 */
30418
30419bfd_boolean
30420arm_fix_adjustable (fixS * fixP)
30421{
30422  if (fixP->fx_addsy == NULL)
30423    return 1;
30424
30425  /* Preserve relocations against symbols with function type.  */
30426  if (symbol_get_bfdsym (fixP->fx_addsy)->flags & BSF_FUNCTION)
30427    return FALSE;
30428
30429  if (THUMB_IS_FUNC (fixP->fx_addsy)
30430      && fixP->fx_subsy == NULL)
30431    return FALSE;
30432
30433  /* We need the symbol name for the VTABLE entries.  */
30434  if (	 fixP->fx_r_type == BFD_RELOC_VTABLE_INHERIT
30435      || fixP->fx_r_type == BFD_RELOC_VTABLE_ENTRY)
30436    return FALSE;
30437
30438  /* Don't allow symbols to be discarded on GOT related relocs.	 */
30439  if (fixP->fx_r_type == BFD_RELOC_ARM_PLT32
30440      || fixP->fx_r_type == BFD_RELOC_ARM_GOT32
30441      || fixP->fx_r_type == BFD_RELOC_ARM_GOTOFF
30442      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32
30443      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GD32_FDPIC
30444      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LE32
30445      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32
30446      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_IE32_FDPIC
30447      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32
30448      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDM32_FDPIC
30449      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_LDO32
30450      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_GOTDESC
30451      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_CALL
30452      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_CALL
30453      || fixP->fx_r_type == BFD_RELOC_ARM_TLS_DESCSEQ
30454      || fixP->fx_r_type == BFD_RELOC_ARM_THM_TLS_DESCSEQ
30455      || fixP->fx_r_type == BFD_RELOC_ARM_TARGET2)
30456    return FALSE;
30457
30458  /* Similarly for group relocations.  */
30459  if ((fixP->fx_r_type >= BFD_RELOC_ARM_ALU_PC_G0_NC
30460       && fixP->fx_r_type <= BFD_RELOC_ARM_LDC_SB_G2)
30461      || fixP->fx_r_type == BFD_RELOC_ARM_LDR_PC_G0)
30462    return FALSE;
30463
30464  /* MOVW/MOVT REL relocations have limited offsets, so keep the symbols.  */
30465  if (fixP->fx_r_type == BFD_RELOC_ARM_MOVW
30466      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT
30467      || fixP->fx_r_type == BFD_RELOC_ARM_MOVW_PCREL
30468      || fixP->fx_r_type == BFD_RELOC_ARM_MOVT_PCREL
30469      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW
30470      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT
30471      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVW_PCREL
30472      || fixP->fx_r_type == BFD_RELOC_ARM_THUMB_MOVT_PCREL)
30473    return FALSE;
30474
30475  /* BFD_RELOC_ARM_THUMB_ALU_ABS_Gx_NC relocations have VERY limited
30476     offsets, so keep these symbols.  */
30477  if (fixP->fx_r_type >= BFD_RELOC_ARM_THUMB_ALU_ABS_G0_NC
30478      && fixP->fx_r_type <= BFD_RELOC_ARM_THUMB_ALU_ABS_G3_NC)
30479    return FALSE;
30480
30481  return TRUE;
30482}
30483#endif /* defined (OBJ_ELF) || defined (OBJ_COFF) */
30484
30485#ifdef OBJ_ELF
30486const char *
30487elf32_arm_target_format (void)
30488{
30489#ifdef TE_SYMBIAN
30490  return (target_big_endian
30491	  ? "elf32-bigarm-symbian"
30492	  : "elf32-littlearm-symbian");
30493#elif defined (TE_VXWORKS)
30494  return (target_big_endian
30495	  ? "elf32-bigarm-vxworks"
30496	  : "elf32-littlearm-vxworks");
30497#elif defined (TE_NACL)
30498  return (target_big_endian
30499	  ? "elf32-bigarm-nacl"
30500	  : "elf32-littlearm-nacl");
30501#else
30502  if (arm_fdpic)
30503    {
30504      if (target_big_endian)
30505	return "elf32-bigarm-fdpic";
30506      else
30507	return "elf32-littlearm-fdpic";
30508    }
30509  else
30510    {
30511      if (target_big_endian)
30512	return "elf32-bigarm";
30513      else
30514	return "elf32-littlearm";
30515    }
30516#endif
30517}
30518
30519void
30520armelf_frob_symbol (symbolS * symp,
30521		    int *     puntp)
30522{
30523  elf_frob_symbol (symp, puntp);
30524}
30525#endif
30526
30527/* MD interface: Finalization.	*/
30528
30529void
30530arm_cleanup (void)
30531{
30532  literal_pool * pool;
30533
30534  /* Ensure that all the predication blocks are properly closed.  */
30535  check_pred_blocks_finished ();
30536
30537  for (pool = list_of_pools; pool; pool = pool->next)
30538    {
30539      /* Put it at the end of the relevant section.  */
30540      subseg_set (pool->section, pool->sub_section);
30541#ifdef OBJ_ELF
30542      arm_elf_change_section ();
30543#endif
30544      s_ltorg (0);
30545    }
30546}
30547
30548#ifdef OBJ_ELF
30549/* Remove any excess mapping symbols generated for alignment frags in
30550   SEC.  We may have created a mapping symbol before a zero byte
30551   alignment; remove it if there's a mapping symbol after the
30552   alignment.  */
30553static void
30554check_mapping_symbols (bfd *abfd ATTRIBUTE_UNUSED, asection *sec,
30555		       void *dummy ATTRIBUTE_UNUSED)
30556{
30557  segment_info_type *seginfo = seg_info (sec);
30558  fragS *fragp;
30559
30560  if (seginfo == NULL || seginfo->frchainP == NULL)
30561    return;
30562
30563  for (fragp = seginfo->frchainP->frch_root;
30564       fragp != NULL;
30565       fragp = fragp->fr_next)
30566    {
30567      symbolS *sym = fragp->tc_frag_data.last_map;
30568      fragS *next = fragp->fr_next;
30569
30570      /* Variable-sized frags have been converted to fixed size by
30571	 this point.  But if this was variable-sized to start with,
30572	 there will be a fixed-size frag after it.  So don't handle
30573	 next == NULL.  */
30574      if (sym == NULL || next == NULL)
30575	continue;
30576
30577      if (S_GET_VALUE (sym) < next->fr_address)
30578	/* Not at the end of this frag.  */
30579	continue;
30580      know (S_GET_VALUE (sym) == next->fr_address);
30581
30582      do
30583	{
30584	  if (next->tc_frag_data.first_map != NULL)
30585	    {
30586	      /* Next frag starts with a mapping symbol.  Discard this
30587		 one.  */
30588	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30589	      break;
30590	    }
30591
30592	  if (next->fr_next == NULL)
30593	    {
30594	      /* This mapping symbol is at the end of the section.  Discard
30595		 it.  */
30596	      know (next->fr_fix == 0 && next->fr_var == 0);
30597	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
30598	      break;
30599	    }
30600
30601	  /* As long as we have empty frags without any mapping symbols,
30602	     keep looking.  */
30603	  /* If the next frag is non-empty and does not start with a
30604	     mapping symbol, then this mapping symbol is required.  */
30605	  if (next->fr_address != next->fr_next->fr_address)
30606	    break;
30607
30608	  next = next->fr_next;
30609	}
30610      while (next != NULL);
30611    }
30612}
30613#endif
30614
30615/* Adjust the symbol table.  This marks Thumb symbols as distinct from
30616   ARM ones.  */
30617
30618void
30619arm_adjust_symtab (void)
30620{
30621#ifdef OBJ_COFF
30622  symbolS * sym;
30623
30624  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30625    {
30626      if (ARM_IS_THUMB (sym))
30627	{
30628	  if (THUMB_IS_FUNC (sym))
30629	    {
30630	      /* Mark the symbol as a Thumb function.  */
30631	      if (   S_GET_STORAGE_CLASS (sym) == C_STAT
30632		  || S_GET_STORAGE_CLASS (sym) == C_LABEL)  /* This can happen!	 */
30633		S_SET_STORAGE_CLASS (sym, C_THUMBSTATFUNC);
30634
30635	      else if (S_GET_STORAGE_CLASS (sym) == C_EXT)
30636		S_SET_STORAGE_CLASS (sym, C_THUMBEXTFUNC);
30637	      else
30638		as_bad (_("%s: unexpected function type: %d"),
30639			S_GET_NAME (sym), S_GET_STORAGE_CLASS (sym));
30640	    }
30641	  else switch (S_GET_STORAGE_CLASS (sym))
30642	    {
30643	    case C_EXT:
30644	      S_SET_STORAGE_CLASS (sym, C_THUMBEXT);
30645	      break;
30646	    case C_STAT:
30647	      S_SET_STORAGE_CLASS (sym, C_THUMBSTAT);
30648	      break;
30649	    case C_LABEL:
30650	      S_SET_STORAGE_CLASS (sym, C_THUMBLABEL);
30651	      break;
30652	    default:
30653	      /* Do nothing.  */
30654	      break;
30655	    }
30656	}
30657
30658      if (ARM_IS_INTERWORK (sym))
30659	coffsymbol (symbol_get_bfdsym (sym))->native->u.syment.n_flags = 0xFF;
30660    }
30661#endif
30662#ifdef OBJ_ELF
30663  symbolS * sym;
30664  char	    bind;
30665
30666  for (sym = symbol_rootP; sym != NULL; sym = symbol_next (sym))
30667    {
30668      if (ARM_IS_THUMB (sym))
30669	{
30670	  elf_symbol_type * elf_sym;
30671
30672	  elf_sym = elf_symbol (symbol_get_bfdsym (sym));
30673	  bind = ELF_ST_BIND (elf_sym->internal_elf_sym.st_info);
30674
30675	  if (! bfd_is_arm_special_symbol_name (elf_sym->symbol.name,
30676		BFD_ARM_SPECIAL_SYM_TYPE_ANY))
30677	    {
30678	      /* If it's a .thumb_func, declare it as so,
30679		 otherwise tag label as .code 16.  */
30680	      if (THUMB_IS_FUNC (sym))
30681		ARM_SET_SYM_BRANCH_TYPE (elf_sym->internal_elf_sym.st_target_internal,
30682					 ST_BRANCH_TO_THUMB);
30683	      else if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
30684		elf_sym->internal_elf_sym.st_info =
30685		  ELF_ST_INFO (bind, STT_ARM_16BIT);
30686	    }
30687	}
30688    }
30689
30690  /* Remove any overlapping mapping symbols generated by alignment frags.  */
30691  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
30692  /* Now do generic ELF adjustments.  */
30693  elf_adjust_symtab ();
30694#endif
30695}
30696
30697/* MD interface: Initialization.  */
30698
30699static void
30700set_constant_flonums (void)
30701{
30702  int i;
30703
30704  for (i = 0; i < NUM_FLOAT_VALS; i++)
30705    if (atof_ieee ((char *) fp_const[i], 'x', fp_values[i]) == NULL)
30706      abort ();
30707}
30708
30709/* Auto-select Thumb mode if it's the only available instruction set for the
30710   given architecture.  */
30711
30712static void
30713autoselect_thumb_from_cpu_variant (void)
30714{
30715  if (!ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v1))
30716    opcode_select (16);
30717}
30718
30719void
30720md_begin (void)
30721{
30722  unsigned mach;
30723  unsigned int i;
30724
30725  arm_ops_hsh = str_htab_create ();
30726  arm_cond_hsh = str_htab_create ();
30727  arm_vcond_hsh = str_htab_create ();
30728  arm_shift_hsh = str_htab_create ();
30729  arm_psr_hsh = str_htab_create ();
30730  arm_v7m_psr_hsh = str_htab_create ();
30731  arm_reg_hsh = str_htab_create ();
30732  arm_reloc_hsh = str_htab_create ();
30733  arm_barrier_opt_hsh = str_htab_create ();
30734
30735  for (i = 0; i < sizeof (insns) / sizeof (struct asm_opcode); i++)
30736    if (str_hash_find (arm_ops_hsh, insns[i].template_name) == NULL)
30737      str_hash_insert (arm_ops_hsh, insns[i].template_name, insns + i, 0);
30738  for (i = 0; i < sizeof (conds) / sizeof (struct asm_cond); i++)
30739    str_hash_insert (arm_cond_hsh, conds[i].template_name, conds + i, 0);
30740  for (i = 0; i < sizeof (vconds) / sizeof (struct asm_cond); i++)
30741    str_hash_insert (arm_vcond_hsh, vconds[i].template_name, vconds + i, 0);
30742  for (i = 0; i < sizeof (shift_names) / sizeof (struct asm_shift_name); i++)
30743    str_hash_insert (arm_shift_hsh, shift_names[i].name, shift_names + i, 0);
30744  for (i = 0; i < sizeof (psrs) / sizeof (struct asm_psr); i++)
30745    str_hash_insert (arm_psr_hsh, psrs[i].template_name, psrs + i, 0);
30746  for (i = 0; i < sizeof (v7m_psrs) / sizeof (struct asm_psr); i++)
30747    str_hash_insert (arm_v7m_psr_hsh, v7m_psrs[i].template_name,
30748		     v7m_psrs + i, 0);
30749  for (i = 0; i < sizeof (reg_names) / sizeof (struct reg_entry); i++)
30750    str_hash_insert (arm_reg_hsh, reg_names[i].name, reg_names + i, 0);
30751  for (i = 0;
30752       i < sizeof (barrier_opt_names) / sizeof (struct asm_barrier_opt);
30753       i++)
30754    str_hash_insert (arm_barrier_opt_hsh, barrier_opt_names[i].template_name,
30755		     barrier_opt_names + i, 0);
30756#ifdef OBJ_ELF
30757  for (i = 0; i < ARRAY_SIZE (reloc_names); i++)
30758    {
30759      struct reloc_entry * entry = reloc_names + i;
30760
30761      if (arm_is_eabi() && entry->reloc == BFD_RELOC_ARM_PLT32)
30762	/* This makes encode_branch() use the EABI versions of this relocation.  */
30763	entry->reloc = BFD_RELOC_UNUSED;
30764
30765      str_hash_insert (arm_reloc_hsh, entry->name, entry, 0);
30766    }
30767#endif
30768
30769  set_constant_flonums ();
30770
30771  /* Set the cpu variant based on the command-line options.  We prefer
30772     -mcpu= over -march= if both are set (as for GCC); and we prefer
30773     -mfpu= over any other way of setting the floating point unit.
30774     Use of legacy options with new options are faulted.  */
30775  if (legacy_cpu)
30776    {
30777      if (mcpu_cpu_opt || march_cpu_opt)
30778	as_bad (_("use of old and new-style options to set CPU type"));
30779
30780      selected_arch = *legacy_cpu;
30781    }
30782  else if (mcpu_cpu_opt)
30783    {
30784      selected_arch = *mcpu_cpu_opt;
30785      selected_ext = *mcpu_ext_opt;
30786    }
30787  else if (march_cpu_opt)
30788    {
30789      selected_arch = *march_cpu_opt;
30790      selected_ext = *march_ext_opt;
30791    }
30792  ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
30793
30794  if (legacy_fpu)
30795    {
30796      if (mfpu_opt)
30797	as_bad (_("use of old and new-style options to set FPU type"));
30798
30799      selected_fpu = *legacy_fpu;
30800    }
30801  else if (mfpu_opt)
30802    selected_fpu = *mfpu_opt;
30803  else
30804    {
30805#if !(defined (EABI_DEFAULT) || defined (TE_LINUX) \
30806	|| defined (TE_NetBSD) || defined (TE_VXWORKS))
30807      /* Some environments specify a default FPU.  If they don't, infer it
30808	 from the processor.  */
30809      if (mcpu_fpu_opt)
30810	selected_fpu = *mcpu_fpu_opt;
30811      else if (march_fpu_opt)
30812	selected_fpu = *march_fpu_opt;
30813#else
30814      selected_fpu = fpu_default;
30815#endif
30816    }
30817
30818  if (ARM_FEATURE_ZERO (selected_fpu))
30819    {
30820      if (!no_cpu_selected ())
30821	selected_fpu = fpu_default;
30822      else
30823	selected_fpu = fpu_arch_fpa;
30824    }
30825
30826#ifdef CPU_DEFAULT
30827  if (ARM_FEATURE_ZERO (selected_arch))
30828    {
30829      selected_arch = cpu_default;
30830      selected_cpu = selected_arch;
30831    }
30832  ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30833#else
30834  /*  Autodection of feature mode: allow all features in cpu_variant but leave
30835      selected_cpu unset.  It will be set in aeabi_set_public_attributes ()
30836      after all instruction have been processed and we can decide what CPU
30837      should be selected.  */
30838  if (ARM_FEATURE_ZERO (selected_arch))
30839    ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
30840  else
30841    ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
30842#endif
30843
30844  autoselect_thumb_from_cpu_variant ();
30845
30846  arm_arch_used = thumb_arch_used = arm_arch_none;
30847
30848#if defined OBJ_COFF || defined OBJ_ELF
30849  {
30850    unsigned int flags = 0;
30851
30852#if defined OBJ_ELF
30853    flags = meabi_flags;
30854
30855    switch (meabi_flags)
30856      {
30857      case EF_ARM_EABI_UNKNOWN:
30858#endif
30859	/* Set the flags in the private structure.  */
30860	if (uses_apcs_26)      flags |= F_APCS26;
30861	if (support_interwork) flags |= F_INTERWORK;
30862	if (uses_apcs_float)   flags |= F_APCS_FLOAT;
30863	if (pic_code)	       flags |= F_PIC;
30864	if (!ARM_CPU_HAS_FEATURE (cpu_variant, fpu_any_hard))
30865	  flags |= F_SOFT_FLOAT;
30866
30867	switch (mfloat_abi_opt)
30868	  {
30869	  case ARM_FLOAT_ABI_SOFT:
30870	  case ARM_FLOAT_ABI_SOFTFP:
30871	    flags |= F_SOFT_FLOAT;
30872	    break;
30873
30874	  case ARM_FLOAT_ABI_HARD:
30875	    if (flags & F_SOFT_FLOAT)
30876	      as_bad (_("hard-float conflicts with specified fpu"));
30877	    break;
30878	  }
30879
30880	/* Using pure-endian doubles (even if soft-float).	*/
30881	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_endian_pure))
30882	  flags |= F_VFP_FLOAT;
30883
30884#if defined OBJ_ELF
30885	if (ARM_CPU_HAS_FEATURE (cpu_variant, fpu_arch_maverick))
30886	    flags |= EF_ARM_MAVERICK_FLOAT;
30887	break;
30888
30889      case EF_ARM_EABI_VER4:
30890      case EF_ARM_EABI_VER5:
30891	/* No additional flags to set.	*/
30892	break;
30893
30894      default:
30895	abort ();
30896      }
30897#endif
30898    bfd_set_private_flags (stdoutput, flags);
30899
30900    /* We have run out flags in the COFF header to encode the
30901       status of ATPCS support, so instead we create a dummy,
30902       empty, debug section called .arm.atpcs.	*/
30903    if (atpcs)
30904      {
30905	asection * sec;
30906
30907	sec = bfd_make_section (stdoutput, ".arm.atpcs");
30908
30909	if (sec != NULL)
30910	  {
30911	    bfd_set_section_flags (sec, SEC_READONLY | SEC_DEBUGGING);
30912	    bfd_set_section_size (sec, 0);
30913	    bfd_set_section_contents (stdoutput, sec, NULL, 0, 0);
30914	  }
30915      }
30916  }
30917#endif
30918
30919  /* Record the CPU type as well.  */
30920  if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt2))
30921    mach = bfd_mach_arm_iWMMXt2;
30922  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_iwmmxt))
30923    mach = bfd_mach_arm_iWMMXt;
30924  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_xscale))
30925    mach = bfd_mach_arm_XScale;
30926  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_cext_maverick))
30927    mach = bfd_mach_arm_ep9312;
30928  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5e))
30929    mach = bfd_mach_arm_5TE;
30930  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v5))
30931    {
30932      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30933	mach = bfd_mach_arm_5T;
30934      else
30935	mach = bfd_mach_arm_5;
30936    }
30937  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4))
30938    {
30939      if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v4t))
30940	mach = bfd_mach_arm_4T;
30941      else
30942	mach = bfd_mach_arm_4;
30943    }
30944  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3m))
30945    mach = bfd_mach_arm_3M;
30946  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v3))
30947    mach = bfd_mach_arm_3;
30948  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2s))
30949    mach = bfd_mach_arm_2a;
30950  else if (ARM_CPU_HAS_FEATURE (cpu_variant, arm_ext_v2))
30951    mach = bfd_mach_arm_2;
30952  else
30953    mach = bfd_mach_arm_unknown;
30954
30955  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
30956}
30957
30958/* Command line processing.  */
30959
30960/* md_parse_option
30961      Invocation line includes a switch not recognized by the base assembler.
30962      See if it's a processor-specific option.
30963
30964      This routine is somewhat complicated by the need for backwards
30965      compatibility (since older releases of gcc can't be changed).
30966      The new options try to make the interface as compatible as
30967      possible with GCC.
30968
30969      New options (supported) are:
30970
30971	      -mcpu=<cpu name>		 Assemble for selected processor
30972	      -march=<architecture name> Assemble for selected architecture
30973	      -mfpu=<fpu architecture>	 Assemble for selected FPU.
30974	      -EB/-mbig-endian		 Big-endian
30975	      -EL/-mlittle-endian	 Little-endian
30976	      -k			 Generate PIC code
30977	      -mthumb			 Start in Thumb mode
30978	      -mthumb-interwork		 Code supports ARM/Thumb interworking
30979
30980	      -m[no-]warn-deprecated     Warn about deprecated features
30981	      -m[no-]warn-syms		 Warn when symbols match instructions
30982
30983      For now we will also provide support for:
30984
30985	      -mapcs-32			 32-bit Program counter
30986	      -mapcs-26			 26-bit Program counter
30987	      -macps-float		 Floats passed in FP registers
30988	      -mapcs-reentrant		 Reentrant code
30989	      -matpcs
30990      (sometime these will probably be replaced with -mapcs=<list of options>
30991      and -matpcs=<list of options>)
30992
30993      The remaining options are only supported for back-wards compatibility.
30994      Cpu variants, the arm part is optional:
30995	      -m[arm]1		      Currently not supported.
30996	      -m[arm]2, -m[arm]250    Arm 2 and Arm 250 processor
30997	      -m[arm]3		      Arm 3 processor
30998	      -m[arm]6[xx],	      Arm 6 processors
30999	      -m[arm]7[xx][t][[d]m]   Arm 7 processors
31000	      -m[arm]8[10]	      Arm 8 processors
31001	      -m[arm]9[20][tdmi]      Arm 9 processors
31002	      -mstrongarm[110[0]]     StrongARM processors
31003	      -mxscale		      XScale processors
31004	      -m[arm]v[2345[t[e]]]    Arm architectures
31005	      -mall		      All (except the ARM1)
31006      FP variants:
31007	      -mfpa10, -mfpa11	      FPA10 and 11 co-processor instructions
31008	      -mfpe-old		      (No float load/store multiples)
31009	      -mvfpxd		      VFP Single precision
31010	      -mvfp		      All VFP
31011	      -mno-fpu		      Disable all floating point instructions
31012
31013      The following CPU names are recognized:
31014	      arm1, arm2, arm250, arm3, arm6, arm600, arm610, arm620,
31015	      arm7, arm7m, arm7d, arm7dm, arm7di, arm7dmi, arm70, arm700,
31016	      arm700i, arm710 arm710t, arm720, arm720t, arm740t, arm710c,
31017	      arm7100, arm7500, arm7500fe, arm7tdmi, arm8, arm810, arm9,
31018	      arm920, arm920t, arm940t, arm946, arm966, arm9tdmi, arm9e,
31019	      arm10t arm10e, arm1020t, arm1020e, arm10200e,
31020	      strongarm, strongarm110, strongarm1100, strongarm1110, xscale.
31021
31022      */
31023
31024const char * md_shortopts = "m:k";
31025
31026#ifdef ARM_BI_ENDIAN
31027#define OPTION_EB (OPTION_MD_BASE + 0)
31028#define OPTION_EL (OPTION_MD_BASE + 1)
31029#else
31030#if TARGET_BYTES_BIG_ENDIAN
31031#define OPTION_EB (OPTION_MD_BASE + 0)
31032#else
31033#define OPTION_EL (OPTION_MD_BASE + 1)
31034#endif
31035#endif
31036#define OPTION_FIX_V4BX (OPTION_MD_BASE + 2)
31037#define OPTION_FDPIC (OPTION_MD_BASE + 3)
31038
31039struct option md_longopts[] =
31040{
31041#ifdef OPTION_EB
31042  {"EB", no_argument, NULL, OPTION_EB},
31043#endif
31044#ifdef OPTION_EL
31045  {"EL", no_argument, NULL, OPTION_EL},
31046#endif
31047  {"fix-v4bx", no_argument, NULL, OPTION_FIX_V4BX},
31048#ifdef OBJ_ELF
31049  {"fdpic", no_argument, NULL, OPTION_FDPIC},
31050#endif
31051  {NULL, no_argument, NULL, 0}
31052};
31053
31054size_t md_longopts_size = sizeof (md_longopts);
31055
31056struct arm_option_table
31057{
31058  const char *  option;		/* Option name to match.  */
31059  const char *  help;		/* Help information.  */
31060  int *         var;		/* Variable to change.	*/
31061  int	        value;		/* What to change it to.  */
31062  const char *  deprecated;	/* If non-null, print this message.  */
31063};
31064
31065struct arm_option_table arm_opts[] =
31066{
31067  {"k",	     N_("generate PIC code"),	   &pic_code,	 1, NULL},
31068  {"mthumb", N_("assemble Thumb code"),	   &thumb_mode,	 1, NULL},
31069  {"mthumb-interwork", N_("support ARM/Thumb interworking"),
31070   &support_interwork, 1, NULL},
31071  {"mapcs-32", N_("code uses 32-bit program counter"), &uses_apcs_26, 0, NULL},
31072  {"mapcs-26", N_("code uses 26-bit program counter"), &uses_apcs_26, 1, NULL},
31073  {"mapcs-float", N_("floating point args are in fp regs"), &uses_apcs_float,
31074   1, NULL},
31075  {"mapcs-reentrant", N_("re-entrant code"), &pic_code, 1, NULL},
31076  {"matpcs", N_("code is ATPCS conformant"), &atpcs, 1, NULL},
31077  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
31078  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
31079   NULL},
31080
31081  /* These are recognized by the assembler, but have no affect on code.	 */
31082  {"mapcs-frame", N_("use frame pointer"), NULL, 0, NULL},
31083  {"mapcs-stack-check", N_("use stack size checking"), NULL, 0, NULL},
31084
31085  {"mwarn-deprecated", NULL, &warn_on_deprecated, 1, NULL},
31086  {"mno-warn-deprecated", N_("do not warn on use of deprecated feature"),
31087   &warn_on_deprecated, 0, NULL},
31088
31089  {"mwarn-restrict-it", N_("warn about performance deprecated IT instructions"
31090   " in ARMv8-A and ARMv8-R"), &warn_on_restrict_it, 1, NULL},
31091  {"mno-warn-restrict-it", NULL, &warn_on_restrict_it, 0, NULL},
31092
31093  {"mwarn-syms", N_("warn about symbols that match instruction names [default]"), (int *) (& flag_warn_syms), TRUE, NULL},
31094  {"mno-warn-syms", N_("disable warnings about symobls that match instructions"), (int *) (& flag_warn_syms), FALSE, NULL},
31095  {NULL, NULL, NULL, 0, NULL}
31096};
31097
31098struct arm_legacy_option_table
31099{
31100  const char *              option;		/* Option name to match.  */
31101  const arm_feature_set	**  var;		/* Variable to change.	*/
31102  const arm_feature_set	    value;		/* What to change it to.  */
31103  const char *              deprecated;		/* If non-null, print this message.  */
31104};
31105
31106const struct arm_legacy_option_table arm_legacy_opts[] =
31107{
31108  /* DON'T add any new processors to this list -- we want the whole list
31109     to go away...  Add them to the processors table instead.  */
31110  {"marm1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
31111  {"m1",	 &legacy_cpu, ARM_ARCH_V1,  N_("use -mcpu=arm1")},
31112  {"marm2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
31113  {"m2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -mcpu=arm2")},
31114  {"marm250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31115  {"m250",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm250")},
31116  {"marm3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31117  {"m3",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -mcpu=arm3")},
31118  {"marm6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
31119  {"m6",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm6")},
31120  {"marm600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
31121  {"m600",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm600")},
31122  {"marm610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
31123  {"m610",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm610")},
31124  {"marm620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
31125  {"m620",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm620")},
31126  {"marm7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
31127  {"m7",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7")},
31128  {"marm70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
31129  {"m70",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm70")},
31130  {"marm700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
31131  {"m700",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700")},
31132  {"marm700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
31133  {"m700i",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm700i")},
31134  {"marm710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
31135  {"m710",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710")},
31136  {"marm710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
31137  {"m710c",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm710c")},
31138  {"marm720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
31139  {"m720",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm720")},
31140  {"marm7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
31141  {"m7d",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7d")},
31142  {"marm7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
31143  {"m7di",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7di")},
31144  {"marm7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31145  {"m7m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7m")},
31146  {"marm7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31147  {"m7dm",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dm")},
31148  {"marm7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31149  {"m7dmi",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -mcpu=arm7dmi")},
31150  {"marm7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
31151  {"m7100",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7100")},
31152  {"marm7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
31153  {"m7500",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500")},
31154  {"marm7500fe", &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
31155  {"m7500fe",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -mcpu=arm7500fe")},
31156  {"marm7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31157  {"m7t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31158  {"marm7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31159  {"m7tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm7tdmi")},
31160  {"marm710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31161  {"m710t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm710t")},
31162  {"marm720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31163  {"m720t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm720t")},
31164  {"marm740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31165  {"m740t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm740t")},
31166  {"marm8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
31167  {"m8",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm8")},
31168  {"marm810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
31169  {"m810",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=arm810")},
31170  {"marm9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31171  {"m9",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9")},
31172  {"marm9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31173  {"m9tdmi",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm9tdmi")},
31174  {"marm920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31175  {"m920",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm920")},
31176  {"marm940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31177  {"m940",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -mcpu=arm940")},
31178  {"mstrongarm", &legacy_cpu, ARM_ARCH_V4,  N_("use -mcpu=strongarm")},
31179  {"mstrongarm110", &legacy_cpu, ARM_ARCH_V4,
31180   N_("use -mcpu=strongarm110")},
31181  {"mstrongarm1100", &legacy_cpu, ARM_ARCH_V4,
31182   N_("use -mcpu=strongarm1100")},
31183  {"mstrongarm1110", &legacy_cpu, ARM_ARCH_V4,
31184   N_("use -mcpu=strongarm1110")},
31185  {"mxscale",	 &legacy_cpu, ARM_ARCH_XSCALE, N_("use -mcpu=xscale")},
31186  {"miwmmxt",	 &legacy_cpu, ARM_ARCH_IWMMXT, N_("use -mcpu=iwmmxt")},
31187  {"mall",	 &legacy_cpu, ARM_ANY,	       N_("use -mcpu=all")},
31188
31189  /* Architecture variants -- don't add any more to this list either.  */
31190  {"mv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
31191  {"marmv2",	 &legacy_cpu, ARM_ARCH_V2,  N_("use -march=armv2")},
31192  {"mv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31193  {"marmv2a",	 &legacy_cpu, ARM_ARCH_V2S, N_("use -march=armv2a")},
31194  {"mv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
31195  {"marmv3",	 &legacy_cpu, ARM_ARCH_V3,  N_("use -march=armv3")},
31196  {"mv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31197  {"marmv3m",	 &legacy_cpu, ARM_ARCH_V3M, N_("use -march=armv3m")},
31198  {"mv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
31199  {"marmv4",	 &legacy_cpu, ARM_ARCH_V4,  N_("use -march=armv4")},
31200  {"mv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31201  {"marmv4t",	 &legacy_cpu, ARM_ARCH_V4T, N_("use -march=armv4t")},
31202  {"mv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
31203  {"marmv5",	 &legacy_cpu, ARM_ARCH_V5,  N_("use -march=armv5")},
31204  {"mv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31205  {"marmv5t",	 &legacy_cpu, ARM_ARCH_V5T, N_("use -march=armv5t")},
31206  {"mv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31207  {"marmv5e",	 &legacy_cpu, ARM_ARCH_V5TE, N_("use -march=armv5te")},
31208
31209  /* Floating point variants -- don't add any more to this list either.	 */
31210  {"mfpe-old",   &legacy_fpu, FPU_ARCH_FPE, N_("use -mfpu=fpe")},
31211  {"mfpa10",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa10")},
31212  {"mfpa11",     &legacy_fpu, FPU_ARCH_FPA, N_("use -mfpu=fpa11")},
31213  {"mno-fpu",    &legacy_fpu, ARM_ARCH_NONE,
31214   N_("use either -mfpu=softfpa or -mfpu=softvfp")},
31215
31216  {NULL, NULL, ARM_ARCH_NONE, NULL}
31217};
31218
31219struct arm_cpu_option_table
31220{
31221  const char *           name;
31222  size_t                 name_len;
31223  const arm_feature_set	 value;
31224  const arm_feature_set	 ext;
31225  /* For some CPUs we assume an FPU unless the user explicitly sets
31226     -mfpu=...	*/
31227  const arm_feature_set	 default_fpu;
31228  /* The canonical name of the CPU, or NULL to use NAME converted to upper
31229     case.  */
31230  const char *           canonical_name;
31231};
31232
31233/* This list should, at a minimum, contain all the cpu names
31234   recognized by GCC.  */
31235#define ARM_CPU_OPT(N, CN, V, E, DF) { N, sizeof (N) - 1, V, E, DF, CN }
31236
31237static const struct arm_cpu_option_table arm_cpus[] =
31238{
31239  ARM_CPU_OPT ("all",		  NULL,		       ARM_ANY,
31240	       ARM_ARCH_NONE,
31241	       FPU_ARCH_FPA),
31242  ARM_CPU_OPT ("arm1",		  NULL,		       ARM_ARCH_V1,
31243	       ARM_ARCH_NONE,
31244	       FPU_ARCH_FPA),
31245  ARM_CPU_OPT ("arm2",		  NULL,		       ARM_ARCH_V2,
31246	       ARM_ARCH_NONE,
31247	       FPU_ARCH_FPA),
31248  ARM_CPU_OPT ("arm250",	  NULL,		       ARM_ARCH_V2S,
31249	       ARM_ARCH_NONE,
31250	       FPU_ARCH_FPA),
31251  ARM_CPU_OPT ("arm3",		  NULL,		       ARM_ARCH_V2S,
31252	       ARM_ARCH_NONE,
31253	       FPU_ARCH_FPA),
31254  ARM_CPU_OPT ("arm6",		  NULL,		       ARM_ARCH_V3,
31255	       ARM_ARCH_NONE,
31256	       FPU_ARCH_FPA),
31257  ARM_CPU_OPT ("arm60",		  NULL,		       ARM_ARCH_V3,
31258	       ARM_ARCH_NONE,
31259	       FPU_ARCH_FPA),
31260  ARM_CPU_OPT ("arm600",	  NULL,		       ARM_ARCH_V3,
31261	       ARM_ARCH_NONE,
31262	       FPU_ARCH_FPA),
31263  ARM_CPU_OPT ("arm610",	  NULL,		       ARM_ARCH_V3,
31264	       ARM_ARCH_NONE,
31265	       FPU_ARCH_FPA),
31266  ARM_CPU_OPT ("arm620",	  NULL,		       ARM_ARCH_V3,
31267	       ARM_ARCH_NONE,
31268	       FPU_ARCH_FPA),
31269  ARM_CPU_OPT ("arm7",		  NULL,		       ARM_ARCH_V3,
31270	       ARM_ARCH_NONE,
31271	       FPU_ARCH_FPA),
31272  ARM_CPU_OPT ("arm7m",		  NULL,		       ARM_ARCH_V3M,
31273	       ARM_ARCH_NONE,
31274	       FPU_ARCH_FPA),
31275  ARM_CPU_OPT ("arm7d",		  NULL,		       ARM_ARCH_V3,
31276	       ARM_ARCH_NONE,
31277	       FPU_ARCH_FPA),
31278  ARM_CPU_OPT ("arm7dm",	  NULL,		       ARM_ARCH_V3M,
31279	       ARM_ARCH_NONE,
31280	       FPU_ARCH_FPA),
31281  ARM_CPU_OPT ("arm7di",	  NULL,		       ARM_ARCH_V3,
31282	       ARM_ARCH_NONE,
31283	       FPU_ARCH_FPA),
31284  ARM_CPU_OPT ("arm7dmi",	  NULL,		       ARM_ARCH_V3M,
31285	       ARM_ARCH_NONE,
31286	       FPU_ARCH_FPA),
31287  ARM_CPU_OPT ("arm70",		  NULL,		       ARM_ARCH_V3,
31288	       ARM_ARCH_NONE,
31289	       FPU_ARCH_FPA),
31290  ARM_CPU_OPT ("arm700",	  NULL,		       ARM_ARCH_V3,
31291	       ARM_ARCH_NONE,
31292	       FPU_ARCH_FPA),
31293  ARM_CPU_OPT ("arm700i",	  NULL,		       ARM_ARCH_V3,
31294	       ARM_ARCH_NONE,
31295	       FPU_ARCH_FPA),
31296  ARM_CPU_OPT ("arm710",	  NULL,		       ARM_ARCH_V3,
31297	       ARM_ARCH_NONE,
31298	       FPU_ARCH_FPA),
31299  ARM_CPU_OPT ("arm710t",	  NULL,		       ARM_ARCH_V4T,
31300	       ARM_ARCH_NONE,
31301	       FPU_ARCH_FPA),
31302  ARM_CPU_OPT ("arm720",	  NULL,		       ARM_ARCH_V3,
31303	       ARM_ARCH_NONE,
31304	       FPU_ARCH_FPA),
31305  ARM_CPU_OPT ("arm720t",	  NULL,		       ARM_ARCH_V4T,
31306	       ARM_ARCH_NONE,
31307	       FPU_ARCH_FPA),
31308  ARM_CPU_OPT ("arm740t",	  NULL,		       ARM_ARCH_V4T,
31309	       ARM_ARCH_NONE,
31310	       FPU_ARCH_FPA),
31311  ARM_CPU_OPT ("arm710c",	  NULL,		       ARM_ARCH_V3,
31312	       ARM_ARCH_NONE,
31313	       FPU_ARCH_FPA),
31314  ARM_CPU_OPT ("arm7100",	  NULL,		       ARM_ARCH_V3,
31315	       ARM_ARCH_NONE,
31316	       FPU_ARCH_FPA),
31317  ARM_CPU_OPT ("arm7500",	  NULL,		       ARM_ARCH_V3,
31318	       ARM_ARCH_NONE,
31319	       FPU_ARCH_FPA),
31320  ARM_CPU_OPT ("arm7500fe",	  NULL,		       ARM_ARCH_V3,
31321	       ARM_ARCH_NONE,
31322	       FPU_ARCH_FPA),
31323  ARM_CPU_OPT ("arm7t",		  NULL,		       ARM_ARCH_V4T,
31324	       ARM_ARCH_NONE,
31325	       FPU_ARCH_FPA),
31326  ARM_CPU_OPT ("arm7tdmi",	  NULL,		       ARM_ARCH_V4T,
31327	       ARM_ARCH_NONE,
31328	       FPU_ARCH_FPA),
31329  ARM_CPU_OPT ("arm7tdmi-s",	  NULL,		       ARM_ARCH_V4T,
31330	       ARM_ARCH_NONE,
31331	       FPU_ARCH_FPA),
31332  ARM_CPU_OPT ("arm8",		  NULL,		       ARM_ARCH_V4,
31333	       ARM_ARCH_NONE,
31334	       FPU_ARCH_FPA),
31335  ARM_CPU_OPT ("arm810",	  NULL,		       ARM_ARCH_V4,
31336	       ARM_ARCH_NONE,
31337	       FPU_ARCH_FPA),
31338  ARM_CPU_OPT ("strongarm",	  NULL,		       ARM_ARCH_V4,
31339	       ARM_ARCH_NONE,
31340	       FPU_ARCH_FPA),
31341  ARM_CPU_OPT ("strongarm1",	  NULL,		       ARM_ARCH_V4,
31342	       ARM_ARCH_NONE,
31343	       FPU_ARCH_FPA),
31344  ARM_CPU_OPT ("strongarm110",	  NULL,		       ARM_ARCH_V4,
31345	       ARM_ARCH_NONE,
31346	       FPU_ARCH_FPA),
31347  ARM_CPU_OPT ("strongarm1100",	  NULL,		       ARM_ARCH_V4,
31348	       ARM_ARCH_NONE,
31349	       FPU_ARCH_FPA),
31350  ARM_CPU_OPT ("strongarm1110",	  NULL,		       ARM_ARCH_V4,
31351	       ARM_ARCH_NONE,
31352	       FPU_ARCH_FPA),
31353  ARM_CPU_OPT ("arm9",		  NULL,		       ARM_ARCH_V4T,
31354	       ARM_ARCH_NONE,
31355	       FPU_ARCH_FPA),
31356  ARM_CPU_OPT ("arm920",	  "ARM920T",	       ARM_ARCH_V4T,
31357	       ARM_ARCH_NONE,
31358	       FPU_ARCH_FPA),
31359  ARM_CPU_OPT ("arm920t",	  NULL,		       ARM_ARCH_V4T,
31360	       ARM_ARCH_NONE,
31361	       FPU_ARCH_FPA),
31362  ARM_CPU_OPT ("arm922t",	  NULL,		       ARM_ARCH_V4T,
31363	       ARM_ARCH_NONE,
31364	       FPU_ARCH_FPA),
31365  ARM_CPU_OPT ("arm940t",	  NULL,		       ARM_ARCH_V4T,
31366	       ARM_ARCH_NONE,
31367	       FPU_ARCH_FPA),
31368  ARM_CPU_OPT ("arm9tdmi",	  NULL,		       ARM_ARCH_V4T,
31369	       ARM_ARCH_NONE,
31370	       FPU_ARCH_FPA),
31371  ARM_CPU_OPT ("fa526",		  NULL,		       ARM_ARCH_V4,
31372	       ARM_ARCH_NONE,
31373	       FPU_ARCH_FPA),
31374  ARM_CPU_OPT ("fa626",		  NULL,		       ARM_ARCH_V4,
31375	       ARM_ARCH_NONE,
31376	       FPU_ARCH_FPA),
31377
31378  /* For V5 or later processors we default to using VFP; but the user
31379     should really set the FPU type explicitly.	 */
31380  ARM_CPU_OPT ("arm9e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31381	       ARM_ARCH_NONE,
31382	       FPU_ARCH_VFP_V2),
31383  ARM_CPU_OPT ("arm9e",		  NULL,		       ARM_ARCH_V5TE,
31384	       ARM_ARCH_NONE,
31385	       FPU_ARCH_VFP_V2),
31386  ARM_CPU_OPT ("arm926ej",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
31387	       ARM_ARCH_NONE,
31388	       FPU_ARCH_VFP_V2),
31389  ARM_CPU_OPT ("arm926ejs",	  "ARM926EJ-S",	       ARM_ARCH_V5TEJ,
31390	       ARM_ARCH_NONE,
31391	       FPU_ARCH_VFP_V2),
31392  ARM_CPU_OPT ("arm926ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
31393	       ARM_ARCH_NONE,
31394	       FPU_ARCH_VFP_V2),
31395  ARM_CPU_OPT ("arm946e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31396	       ARM_ARCH_NONE,
31397	       FPU_ARCH_VFP_V2),
31398  ARM_CPU_OPT ("arm946e",	  "ARM946E-S",	       ARM_ARCH_V5TE,
31399	       ARM_ARCH_NONE,
31400	       FPU_ARCH_VFP_V2),
31401  ARM_CPU_OPT ("arm946e-s",	  NULL,		       ARM_ARCH_V5TE,
31402	       ARM_ARCH_NONE,
31403	       FPU_ARCH_VFP_V2),
31404  ARM_CPU_OPT ("arm966e-r0",	  NULL,		       ARM_ARCH_V5TExP,
31405	       ARM_ARCH_NONE,
31406	       FPU_ARCH_VFP_V2),
31407  ARM_CPU_OPT ("arm966e",	  "ARM966E-S",	       ARM_ARCH_V5TE,
31408	       ARM_ARCH_NONE,
31409	       FPU_ARCH_VFP_V2),
31410  ARM_CPU_OPT ("arm966e-s",	  NULL,		       ARM_ARCH_V5TE,
31411	       ARM_ARCH_NONE,
31412	       FPU_ARCH_VFP_V2),
31413  ARM_CPU_OPT ("arm968e-s",	  NULL,		       ARM_ARCH_V5TE,
31414	       ARM_ARCH_NONE,
31415	       FPU_ARCH_VFP_V2),
31416  ARM_CPU_OPT ("arm10t",	  NULL,		       ARM_ARCH_V5T,
31417	       ARM_ARCH_NONE,
31418	       FPU_ARCH_VFP_V1),
31419  ARM_CPU_OPT ("arm10tdmi",	  NULL,		       ARM_ARCH_V5T,
31420	       ARM_ARCH_NONE,
31421	       FPU_ARCH_VFP_V1),
31422  ARM_CPU_OPT ("arm10e",	  NULL,		       ARM_ARCH_V5TE,
31423	       ARM_ARCH_NONE,
31424	       FPU_ARCH_VFP_V2),
31425  ARM_CPU_OPT ("arm1020",	  "ARM1020E",	       ARM_ARCH_V5TE,
31426	       ARM_ARCH_NONE,
31427	       FPU_ARCH_VFP_V2),
31428  ARM_CPU_OPT ("arm1020t",	  NULL,		       ARM_ARCH_V5T,
31429	       ARM_ARCH_NONE,
31430	       FPU_ARCH_VFP_V1),
31431  ARM_CPU_OPT ("arm1020e",	  NULL,		       ARM_ARCH_V5TE,
31432	       ARM_ARCH_NONE,
31433	       FPU_ARCH_VFP_V2),
31434  ARM_CPU_OPT ("arm1022e",	  NULL,		       ARM_ARCH_V5TE,
31435	       ARM_ARCH_NONE,
31436	       FPU_ARCH_VFP_V2),
31437  ARM_CPU_OPT ("arm1026ejs",	  "ARM1026EJ-S",       ARM_ARCH_V5TEJ,
31438	       ARM_ARCH_NONE,
31439	       FPU_ARCH_VFP_V2),
31440  ARM_CPU_OPT ("arm1026ej-s",	  NULL,		       ARM_ARCH_V5TEJ,
31441	       ARM_ARCH_NONE,
31442	       FPU_ARCH_VFP_V2),
31443  ARM_CPU_OPT ("fa606te",	  NULL,		       ARM_ARCH_V5TE,
31444	       ARM_ARCH_NONE,
31445	       FPU_ARCH_VFP_V2),
31446  ARM_CPU_OPT ("fa616te",	  NULL,		       ARM_ARCH_V5TE,
31447	       ARM_ARCH_NONE,
31448	       FPU_ARCH_VFP_V2),
31449  ARM_CPU_OPT ("fa626te",	  NULL,		       ARM_ARCH_V5TE,
31450	       ARM_ARCH_NONE,
31451	       FPU_ARCH_VFP_V2),
31452  ARM_CPU_OPT ("fmp626",	  NULL,		       ARM_ARCH_V5TE,
31453	       ARM_ARCH_NONE,
31454	       FPU_ARCH_VFP_V2),
31455  ARM_CPU_OPT ("fa726te",	  NULL,		       ARM_ARCH_V5TE,
31456	       ARM_ARCH_NONE,
31457	       FPU_ARCH_VFP_V2),
31458  ARM_CPU_OPT ("arm1136js",	  "ARM1136J-S",	       ARM_ARCH_V6,
31459	       ARM_ARCH_NONE,
31460	       FPU_NONE),
31461  ARM_CPU_OPT ("arm1136j-s",	  NULL,		       ARM_ARCH_V6,
31462	       ARM_ARCH_NONE,
31463	       FPU_NONE),
31464  ARM_CPU_OPT ("arm1136jfs",	  "ARM1136JF-S",       ARM_ARCH_V6,
31465	       ARM_ARCH_NONE,
31466	       FPU_ARCH_VFP_V2),
31467  ARM_CPU_OPT ("arm1136jf-s",	  NULL,		       ARM_ARCH_V6,
31468	       ARM_ARCH_NONE,
31469	       FPU_ARCH_VFP_V2),
31470  ARM_CPU_OPT ("mpcore",	  "MPCore",	       ARM_ARCH_V6K,
31471	       ARM_ARCH_NONE,
31472	       FPU_ARCH_VFP_V2),
31473  ARM_CPU_OPT ("mpcorenovfp",	  "MPCore",	       ARM_ARCH_V6K,
31474	       ARM_ARCH_NONE,
31475	       FPU_NONE),
31476  ARM_CPU_OPT ("arm1156t2-s",	  NULL,		       ARM_ARCH_V6T2,
31477	       ARM_ARCH_NONE,
31478	       FPU_NONE),
31479  ARM_CPU_OPT ("arm1156t2f-s",	  NULL,		       ARM_ARCH_V6T2,
31480	       ARM_ARCH_NONE,
31481	       FPU_ARCH_VFP_V2),
31482  ARM_CPU_OPT ("arm1176jz-s",	  NULL,		       ARM_ARCH_V6KZ,
31483	       ARM_ARCH_NONE,
31484	       FPU_NONE),
31485  ARM_CPU_OPT ("arm1176jzf-s",	  NULL,		       ARM_ARCH_V6KZ,
31486	       ARM_ARCH_NONE,
31487	       FPU_ARCH_VFP_V2),
31488  ARM_CPU_OPT ("cortex-a5",	  "Cortex-A5",	       ARM_ARCH_V7A,
31489	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31490	       FPU_NONE),
31491  ARM_CPU_OPT ("cortex-a7",	  "Cortex-A7",	       ARM_ARCH_V7VE,
31492	       ARM_ARCH_NONE,
31493	       FPU_ARCH_NEON_VFP_V4),
31494  ARM_CPU_OPT ("cortex-a8",	  "Cortex-A8",	       ARM_ARCH_V7A,
31495	       ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
31496	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31497  ARM_CPU_OPT ("cortex-a9",	  "Cortex-A9",	       ARM_ARCH_V7A,
31498	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31499	       ARM_FEATURE_COPROC (FPU_VFP_V3 | FPU_NEON_EXT_V1)),
31500  ARM_CPU_OPT ("cortex-a12",	  "Cortex-A12",	       ARM_ARCH_V7VE,
31501	       ARM_ARCH_NONE,
31502	       FPU_ARCH_NEON_VFP_V4),
31503  ARM_CPU_OPT ("cortex-a15",	  "Cortex-A15",	       ARM_ARCH_V7VE,
31504	       ARM_ARCH_NONE,
31505	       FPU_ARCH_NEON_VFP_V4),
31506  ARM_CPU_OPT ("cortex-a17",	  "Cortex-A17",	       ARM_ARCH_V7VE,
31507	       ARM_ARCH_NONE,
31508	       FPU_ARCH_NEON_VFP_V4),
31509  ARM_CPU_OPT ("cortex-a32",	  "Cortex-A32",	       ARM_ARCH_V8A,
31510	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31511	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31512  ARM_CPU_OPT ("cortex-a35",	  "Cortex-A35",	       ARM_ARCH_V8A,
31513	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31514	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31515  ARM_CPU_OPT ("cortex-a53",	  "Cortex-A53",	       ARM_ARCH_V8A,
31516	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31517	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31518  ARM_CPU_OPT ("cortex-a55",    "Cortex-A55",	       ARM_ARCH_V8_2A,
31519	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31520	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31521  ARM_CPU_OPT ("cortex-a57",	  "Cortex-A57",	       ARM_ARCH_V8A,
31522	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31523	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31524  ARM_CPU_OPT ("cortex-a72",	  "Cortex-A72",	       ARM_ARCH_V8A,
31525	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31526	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31527  ARM_CPU_OPT ("cortex-a73",	  "Cortex-A73",	       ARM_ARCH_V8A,
31528	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31529	      FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31530  ARM_CPU_OPT ("cortex-a75",    "Cortex-A75",	       ARM_ARCH_V8_2A,
31531	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31532	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31533  ARM_CPU_OPT ("cortex-a76",    "Cortex-A76",	       ARM_ARCH_V8_2A,
31534	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31535	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31536  ARM_CPU_OPT ("cortex-a76ae",    "Cortex-A76AE",      ARM_ARCH_V8_2A,
31537	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31538	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31539  ARM_CPU_OPT ("cortex-a77",    "Cortex-A77",	       ARM_ARCH_V8_2A,
31540	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31541	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31542  ARM_CPU_OPT ("ares",    "Ares",	       ARM_ARCH_V8_2A,
31543	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31544	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31545  ARM_CPU_OPT ("cortex-r4",	  "Cortex-R4",	       ARM_ARCH_V7R,
31546	       ARM_ARCH_NONE,
31547	       FPU_NONE),
31548  ARM_CPU_OPT ("cortex-r4f",	  "Cortex-R4F",	       ARM_ARCH_V7R,
31549	       ARM_ARCH_NONE,
31550	       FPU_ARCH_VFP_V3D16),
31551  ARM_CPU_OPT ("cortex-r5",	  "Cortex-R5",	       ARM_ARCH_V7R,
31552	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31553	       FPU_NONE),
31554  ARM_CPU_OPT ("cortex-r7",	  "Cortex-R7",	       ARM_ARCH_V7R,
31555	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31556	       FPU_ARCH_VFP_V3D16),
31557  ARM_CPU_OPT ("cortex-r8",	  "Cortex-R8",	       ARM_ARCH_V7R,
31558	       ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV),
31559	       FPU_ARCH_VFP_V3D16),
31560  ARM_CPU_OPT ("cortex-r52",	  "Cortex-R52",	       ARM_ARCH_V8R,
31561	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31562	      FPU_ARCH_NEON_VFP_ARMV8),
31563  ARM_CPU_OPT ("cortex-m35p",	  "Cortex-M35P",       ARM_ARCH_V8M_MAIN,
31564	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31565	       FPU_NONE),
31566  ARM_CPU_OPT ("cortex-m33",	  "Cortex-M33",	       ARM_ARCH_V8M_MAIN,
31567	       ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31568	       FPU_NONE),
31569  ARM_CPU_OPT ("cortex-m23",	  "Cortex-M23",	       ARM_ARCH_V8M_BASE,
31570	       ARM_ARCH_NONE,
31571	       FPU_NONE),
31572  ARM_CPU_OPT ("cortex-m7",	  "Cortex-M7",	       ARM_ARCH_V7EM,
31573	       ARM_ARCH_NONE,
31574	       FPU_NONE),
31575  ARM_CPU_OPT ("cortex-m4",	  "Cortex-M4",	       ARM_ARCH_V7EM,
31576	       ARM_ARCH_NONE,
31577	       FPU_NONE),
31578  ARM_CPU_OPT ("cortex-m3",	  "Cortex-M3",	       ARM_ARCH_V7M,
31579	       ARM_ARCH_NONE,
31580	       FPU_NONE),
31581  ARM_CPU_OPT ("cortex-m1",	  "Cortex-M1",	       ARM_ARCH_V6SM,
31582	       ARM_ARCH_NONE,
31583	       FPU_NONE),
31584  ARM_CPU_OPT ("cortex-m0",	  "Cortex-M0",	       ARM_ARCH_V6SM,
31585	       ARM_ARCH_NONE,
31586	       FPU_NONE),
31587  ARM_CPU_OPT ("cortex-m0plus",	  "Cortex-M0+",	       ARM_ARCH_V6SM,
31588	       ARM_ARCH_NONE,
31589	       FPU_NONE),
31590  ARM_CPU_OPT ("exynos-m1",	  "Samsung Exynos M1", ARM_ARCH_V8A,
31591	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31592	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31593  ARM_CPU_OPT ("neoverse-n1",    "Neoverse N1",	       ARM_ARCH_V8_2A,
31594	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
31595	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_DOTPROD),
31596  /* ??? XSCALE is really an architecture.  */
31597  ARM_CPU_OPT ("xscale",	  NULL,		       ARM_ARCH_XSCALE,
31598	       ARM_ARCH_NONE,
31599	       FPU_ARCH_VFP_V2),
31600
31601  /* ??? iwmmxt is not a processor.  */
31602  ARM_CPU_OPT ("iwmmxt",	  NULL,		       ARM_ARCH_IWMMXT,
31603	       ARM_ARCH_NONE,
31604	       FPU_ARCH_VFP_V2),
31605  ARM_CPU_OPT ("iwmmxt2",	  NULL,		       ARM_ARCH_IWMMXT2,
31606	       ARM_ARCH_NONE,
31607	       FPU_ARCH_VFP_V2),
31608  ARM_CPU_OPT ("i80200",	  NULL,		       ARM_ARCH_XSCALE,
31609	       ARM_ARCH_NONE,
31610	       FPU_ARCH_VFP_V2),
31611
31612  /* Maverick.  */
31613  ARM_CPU_OPT ("ep9312",	  "ARM920T",
31614	       ARM_FEATURE_LOW (ARM_AEXT_V4T, ARM_CEXT_MAVERICK),
31615	       ARM_ARCH_NONE, FPU_ARCH_MAVERICK),
31616
31617  /* Marvell processors.  */
31618  ARM_CPU_OPT ("marvell-pj4",	  NULL,		       ARM_ARCH_V7A,
31619	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31620	       FPU_ARCH_VFP_V3D16),
31621  ARM_CPU_OPT ("marvell-whitney", NULL,		       ARM_ARCH_V7A,
31622	       ARM_FEATURE_CORE_LOW (ARM_EXT_MP | ARM_EXT_SEC),
31623	       FPU_ARCH_NEON_VFP_V4),
31624
31625  /* APM X-Gene family.  */
31626  ARM_CPU_OPT ("xgene1",	  "APM X-Gene 1",      ARM_ARCH_V8A,
31627	       ARM_ARCH_NONE,
31628	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31629  ARM_CPU_OPT ("xgene2",	  "APM X-Gene 2",      ARM_ARCH_V8A,
31630	       ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC),
31631	       FPU_ARCH_CRYPTO_NEON_VFP_ARMV8),
31632
31633  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31634};
31635#undef ARM_CPU_OPT
31636
31637struct arm_ext_table
31638{
31639  const char *		  name;
31640  size_t		  name_len;
31641  const arm_feature_set	  merge;
31642  const arm_feature_set	  clear;
31643};
31644
31645struct arm_arch_option_table
31646{
31647  const char *			name;
31648  size_t			name_len;
31649  const arm_feature_set		value;
31650  const arm_feature_set		default_fpu;
31651  const struct arm_ext_table *	ext_table;
31652};
31653
31654/* Used to add support for +E and +noE extension.  */
31655#define ARM_EXT(E, M, C) { E, sizeof (E) - 1, M, C }
31656/* Used to add support for a +E extension.  */
31657#define ARM_ADD(E, M) { E, sizeof(E) - 1, M, ARM_ARCH_NONE }
31658/* Used to add support for a +noE extension.  */
31659#define ARM_REMOVE(E, C) { E, sizeof(E) -1, ARM_ARCH_NONE, C }
31660
31661#define ALL_FP ARM_FEATURE (0, ARM_EXT2_FP16_INST | ARM_EXT2_FP16_FML, \
31662			    ~0 & ~FPU_ENDIAN_PURE)
31663
31664static const struct arm_ext_table armv5te_ext_table[] =
31665{
31666  ARM_EXT ("fp", FPU_ARCH_VFP_V2, ALL_FP),
31667  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31668};
31669
31670static const struct arm_ext_table armv7_ext_table[] =
31671{
31672  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31673  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31674};
31675
31676static const struct arm_ext_table armv7ve_ext_table[] =
31677{
31678  ARM_EXT ("fp", FPU_ARCH_VFP_V4D16, ALL_FP),
31679  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16),
31680  ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31681  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31682  ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31683  ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),  /* Alias for +fp.  */
31684  ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31685
31686  ARM_EXT ("simd", FPU_ARCH_NEON_VFP_V4,
31687	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31688
31689  /* Aliases for +simd.  */
31690  ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31691
31692  ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31693  ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31694  ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31695
31696  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31697};
31698
31699static const struct arm_ext_table armv7a_ext_table[] =
31700{
31701  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31702  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31703  ARM_ADD ("vfpv3", FPU_ARCH_VFP_V3),
31704  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31705  ARM_ADD ("vfpv3-fp16", FPU_ARCH_VFP_V3_FP16),
31706  ARM_ADD ("vfpv4-d16", FPU_ARCH_VFP_V4D16),
31707  ARM_ADD ("vfpv4", FPU_ARCH_VFP_V4),
31708
31709  ARM_EXT ("simd", FPU_ARCH_VFP_V3_PLUS_NEON_V1,
31710	   ARM_FEATURE_COPROC (FPU_NEON_EXT_V1 | FPU_NEON_EXT_FMA)),
31711
31712  /* Aliases for +simd.  */
31713  ARM_ADD ("neon", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31714  ARM_ADD ("neon-vfpv3", FPU_ARCH_VFP_V3_PLUS_NEON_V1),
31715
31716  ARM_ADD ("neon-fp16", FPU_ARCH_NEON_FP16),
31717  ARM_ADD ("neon-vfpv4", FPU_ARCH_NEON_VFP_V4),
31718
31719  ARM_ADD ("mp", ARM_FEATURE_CORE_LOW (ARM_EXT_MP)),
31720  ARM_ADD ("sec", ARM_FEATURE_CORE_LOW (ARM_EXT_SEC)),
31721  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31722};
31723
31724static const struct arm_ext_table armv7r_ext_table[] =
31725{
31726  ARM_ADD ("fp.sp", FPU_ARCH_VFP_V3xD),
31727  ARM_ADD ("vfpv3xd", FPU_ARCH_VFP_V3xD), /* Alias for +fp.sp.  */
31728  ARM_EXT ("fp", FPU_ARCH_VFP_V3D16, ALL_FP),
31729  ARM_ADD ("vfpv3-d16", FPU_ARCH_VFP_V3D16), /* Alias for +fp.  */
31730  ARM_ADD ("vfpv3xd-fp16", FPU_ARCH_VFP_V3xD_FP16),
31731  ARM_ADD ("vfpv3-d16-fp16", FPU_ARCH_VFP_V3D16_FP16),
31732  ARM_EXT ("idiv", ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
31733	   ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV)),
31734  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31735};
31736
31737static const struct arm_ext_table armv7em_ext_table[] =
31738{
31739  ARM_EXT ("fp", FPU_ARCH_VFP_V4_SP_D16, ALL_FP),
31740  /* Alias for +fp, used to be known as fpv4-sp-d16.  */
31741  ARM_ADD ("vfpv4-sp-d16", FPU_ARCH_VFP_V4_SP_D16),
31742  ARM_ADD ("fpv5", FPU_ARCH_VFP_V5_SP_D16),
31743  ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31744  ARM_ADD ("fpv5-d16", FPU_ARCH_VFP_V5D16),
31745  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31746};
31747
31748static const struct arm_ext_table armv8a_ext_table[] =
31749{
31750  ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31751  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31752  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31753	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31754
31755  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31756     should use the +simd option to turn on FP.  */
31757  ARM_REMOVE ("fp", ALL_FP),
31758  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31759  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31760  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31761};
31762
31763
31764static const struct arm_ext_table armv81a_ext_table[] =
31765{
31766  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31767  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31768	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31769
31770  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31771     should use the +simd option to turn on FP.  */
31772  ARM_REMOVE ("fp", ALL_FP),
31773  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31774  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31775  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31776};
31777
31778static const struct arm_ext_table armv82a_ext_table[] =
31779{
31780  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8_1),
31781  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_2_FP16),
31782  ARM_ADD ("fp16fml", FPU_ARCH_NEON_VFP_ARMV8_2_FP16FML),
31783  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31784  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31785  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1,
31786	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31787  ARM_ADD ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31788
31789  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31790     should use the +simd option to turn on FP.  */
31791  ARM_REMOVE ("fp", ALL_FP),
31792  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31793  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31794  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31795};
31796
31797static const struct arm_ext_table armv84a_ext_table[] =
31798{
31799  ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31800  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31801  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31802  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31803  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31804	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31805
31806  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31807     should use the +simd option to turn on FP.  */
31808  ARM_REMOVE ("fp", ALL_FP),
31809  ARM_ADD ("sb", ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB)),
31810  ARM_ADD ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES)),
31811  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31812};
31813
31814static const struct arm_ext_table armv85a_ext_table[] =
31815{
31816  ARM_ADD ("simd", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8),
31817  ARM_ADD ("fp16", FPU_ARCH_NEON_VFP_ARMV8_4_FP16FML),
31818  ARM_ADD ("bf16", ARM_FEATURE_CORE_HIGH (ARM_EXT2_BF16)),
31819  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31820  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_4,
31821	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31822
31823  /* Armv8-a does not allow an FP implementation without SIMD, so the user
31824     should use the +simd option to turn on FP.  */
31825  ARM_REMOVE ("fp", ALL_FP),
31826  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31827};
31828
31829static const struct arm_ext_table armv86a_ext_table[] =
31830{
31831  ARM_ADD ("i8mm", ARM_FEATURE_CORE_HIGH (ARM_EXT2_I8MM)),
31832  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31833};
31834
31835#define CDE_EXTENSIONS \
31836  ARM_ADD ("cdecp0", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE0)), \
31837  ARM_ADD ("cdecp1", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE1)), \
31838  ARM_ADD ("cdecp2", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE2)), \
31839  ARM_ADD ("cdecp3", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE3)), \
31840  ARM_ADD ("cdecp4", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE4)), \
31841  ARM_ADD ("cdecp5", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE5)), \
31842  ARM_ADD ("cdecp6", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE6)), \
31843  ARM_ADD ("cdecp7", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CDE | ARM_EXT2_CDE7))
31844
31845static const struct arm_ext_table armv8m_main_ext_table[] =
31846{
31847  ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31848		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31849  ARM_EXT ("fp", FPU_ARCH_VFP_V5_SP_D16, ALL_FP),
31850  ARM_ADD ("fp.dp", FPU_ARCH_VFP_V5D16),
31851  CDE_EXTENSIONS,
31852  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31853};
31854
31855
31856static const struct arm_ext_table armv8_1m_main_ext_table[] =
31857{
31858  ARM_EXT ("dsp", ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP),
31859		  ARM_FEATURE_CORE_LOW (ARM_AEXT_V8M_MAIN_DSP)),
31860  ARM_EXT ("fp",
31861	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31862			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA),
31863	   ALL_FP),
31864  ARM_ADD ("fp.dp",
31865	   ARM_FEATURE (0, ARM_EXT2_FP16_INST,
31866			FPU_VFP_V5D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31867  ARM_EXT ("mve", ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP, ARM_EXT2_MVE, 0),
31868	   ARM_FEATURE_CORE_HIGH (ARM_EXT2_MVE | ARM_EXT2_MVE_FP)),
31869  ARM_ADD ("mve.fp",
31870	   ARM_FEATURE (ARM_AEXT_V8M_MAIN_DSP,
31871			ARM_EXT2_FP16_INST | ARM_EXT2_MVE | ARM_EXT2_MVE_FP,
31872			FPU_VFP_V5_SP_D16 | FPU_VFP_EXT_FP16 | FPU_VFP_EXT_FMA)),
31873  CDE_EXTENSIONS,
31874  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31875};
31876
31877#undef CDE_EXTENSIONS
31878
31879static const struct arm_ext_table armv8r_ext_table[] =
31880{
31881  ARM_ADD ("crc", ARM_FEATURE_CORE_HIGH (ARM_EXT2_CRC)),
31882  ARM_ADD ("simd", FPU_ARCH_NEON_VFP_ARMV8),
31883  ARM_EXT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31884	   ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8)),
31885  ARM_REMOVE ("fp", ALL_FP),
31886  ARM_ADD ("fp.sp", FPU_ARCH_VFP_V5_SP_D16),
31887  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE }
31888};
31889
31890/* This list should, at a minimum, contain all the architecture names
31891   recognized by GCC.  */
31892#define ARM_ARCH_OPT(N, V, DF) { N, sizeof (N) - 1, V, DF, NULL }
31893#define ARM_ARCH_OPT2(N, V, DF, ext) \
31894  { N, sizeof (N) - 1, V, DF, ext##_ext_table }
31895
31896static const struct arm_arch_option_table arm_archs[] =
31897{
31898  ARM_ARCH_OPT ("all",		  ARM_ANY,		FPU_ARCH_FPA),
31899  ARM_ARCH_OPT ("armv1",	  ARM_ARCH_V1,		FPU_ARCH_FPA),
31900  ARM_ARCH_OPT ("armv2",	  ARM_ARCH_V2,		FPU_ARCH_FPA),
31901  ARM_ARCH_OPT ("armv2a",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31902  ARM_ARCH_OPT ("armv2s",	  ARM_ARCH_V2S,		FPU_ARCH_FPA),
31903  ARM_ARCH_OPT ("armv3",	  ARM_ARCH_V3,		FPU_ARCH_FPA),
31904  ARM_ARCH_OPT ("armv3m",	  ARM_ARCH_V3M,		FPU_ARCH_FPA),
31905  ARM_ARCH_OPT ("armv4",	  ARM_ARCH_V4,		FPU_ARCH_FPA),
31906  ARM_ARCH_OPT ("armv4xm",	  ARM_ARCH_V4xM,	FPU_ARCH_FPA),
31907  ARM_ARCH_OPT ("armv4t",	  ARM_ARCH_V4T,		FPU_ARCH_FPA),
31908  ARM_ARCH_OPT ("armv4txm",	  ARM_ARCH_V4TxM,	FPU_ARCH_FPA),
31909  ARM_ARCH_OPT ("armv5",	  ARM_ARCH_V5,		FPU_ARCH_VFP),
31910  ARM_ARCH_OPT ("armv5t",	  ARM_ARCH_V5T,		FPU_ARCH_VFP),
31911  ARM_ARCH_OPT ("armv5txm",	  ARM_ARCH_V5TxM,	FPU_ARCH_VFP),
31912  ARM_ARCH_OPT2 ("armv5te",	  ARM_ARCH_V5TE,	FPU_ARCH_VFP,	armv5te),
31913  ARM_ARCH_OPT2 ("armv5texp",	  ARM_ARCH_V5TExP,	FPU_ARCH_VFP, armv5te),
31914  ARM_ARCH_OPT2 ("armv5tej",	  ARM_ARCH_V5TEJ,	FPU_ARCH_VFP,	armv5te),
31915  ARM_ARCH_OPT2 ("armv6",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31916  ARM_ARCH_OPT2 ("armv6j",	  ARM_ARCH_V6,		FPU_ARCH_VFP,	armv5te),
31917  ARM_ARCH_OPT2 ("armv6k",	  ARM_ARCH_V6K,		FPU_ARCH_VFP,	armv5te),
31918  ARM_ARCH_OPT2 ("armv6z",	  ARM_ARCH_V6Z,		FPU_ARCH_VFP,	armv5te),
31919  /* The official spelling of this variant is ARMv6KZ, the name "armv6zk" is
31920     kept to preserve existing behaviour.  */
31921  ARM_ARCH_OPT2 ("armv6kz",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31922  ARM_ARCH_OPT2 ("armv6zk",	  ARM_ARCH_V6KZ,	FPU_ARCH_VFP,	armv5te),
31923  ARM_ARCH_OPT2 ("armv6t2",	  ARM_ARCH_V6T2,	FPU_ARCH_VFP,	armv5te),
31924  ARM_ARCH_OPT2 ("armv6kt2",	  ARM_ARCH_V6KT2,	FPU_ARCH_VFP,	armv5te),
31925  ARM_ARCH_OPT2 ("armv6zt2",	  ARM_ARCH_V6ZT2,	FPU_ARCH_VFP,	armv5te),
31926  /* The official spelling of this variant is ARMv6KZ, the name "armv6zkt2" is
31927     kept to preserve existing behaviour.  */
31928  ARM_ARCH_OPT2 ("armv6kzt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31929  ARM_ARCH_OPT2 ("armv6zkt2",	  ARM_ARCH_V6KZT2,	FPU_ARCH_VFP,	armv5te),
31930  ARM_ARCH_OPT ("armv6-m",	  ARM_ARCH_V6M,		FPU_ARCH_VFP),
31931  ARM_ARCH_OPT ("armv6s-m",	  ARM_ARCH_V6SM,	FPU_ARCH_VFP),
31932  ARM_ARCH_OPT2 ("armv7",	  ARM_ARCH_V7,		FPU_ARCH_VFP, armv7),
31933  /* The official spelling of the ARMv7 profile variants is the dashed form.
31934     Accept the non-dashed form for compatibility with old toolchains.  */
31935  ARM_ARCH_OPT2 ("armv7a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31936  ARM_ARCH_OPT2 ("armv7ve",	  ARM_ARCH_V7VE,	FPU_ARCH_VFP, armv7ve),
31937  ARM_ARCH_OPT2 ("armv7r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31938  ARM_ARCH_OPT ("armv7m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31939  ARM_ARCH_OPT2 ("armv7-a",	  ARM_ARCH_V7A,		FPU_ARCH_VFP, armv7a),
31940  ARM_ARCH_OPT2 ("armv7-r",	  ARM_ARCH_V7R,		FPU_ARCH_VFP, armv7r),
31941  ARM_ARCH_OPT ("armv7-m",	  ARM_ARCH_V7M,		FPU_ARCH_VFP),
31942  ARM_ARCH_OPT2 ("armv7e-m",	  ARM_ARCH_V7EM,	FPU_ARCH_VFP, armv7em),
31943  ARM_ARCH_OPT ("armv8-m.base",	  ARM_ARCH_V8M_BASE,	FPU_ARCH_VFP),
31944  ARM_ARCH_OPT2 ("armv8-m.main",  ARM_ARCH_V8M_MAIN,	FPU_ARCH_VFP,
31945		 armv8m_main),
31946  ARM_ARCH_OPT2 ("armv8.1-m.main", ARM_ARCH_V8_1M_MAIN,	FPU_ARCH_VFP,
31947		 armv8_1m_main),
31948  ARM_ARCH_OPT2 ("armv8-a",	  ARM_ARCH_V8A,		FPU_ARCH_VFP, armv8a),
31949  ARM_ARCH_OPT2 ("armv8.1-a",	  ARM_ARCH_V8_1A,	FPU_ARCH_VFP, armv81a),
31950  ARM_ARCH_OPT2 ("armv8.2-a",	  ARM_ARCH_V8_2A,	FPU_ARCH_VFP, armv82a),
31951  ARM_ARCH_OPT2 ("armv8.3-a",	  ARM_ARCH_V8_3A,	FPU_ARCH_VFP, armv82a),
31952  ARM_ARCH_OPT2 ("armv8-r",	  ARM_ARCH_V8R,		FPU_ARCH_VFP, armv8r),
31953  ARM_ARCH_OPT2 ("armv8.4-a",	  ARM_ARCH_V8_4A,	FPU_ARCH_VFP, armv84a),
31954  ARM_ARCH_OPT2 ("armv8.5-a",	  ARM_ARCH_V8_5A,	FPU_ARCH_VFP, armv85a),
31955  ARM_ARCH_OPT2 ("armv8.6-a",	  ARM_ARCH_V8_6A,	FPU_ARCH_VFP, armv86a),
31956  ARM_ARCH_OPT ("xscale",	  ARM_ARCH_XSCALE,	FPU_ARCH_VFP),
31957  ARM_ARCH_OPT ("iwmmxt",	  ARM_ARCH_IWMMXT,	FPU_ARCH_VFP),
31958  ARM_ARCH_OPT ("iwmmxt2",	  ARM_ARCH_IWMMXT2,	FPU_ARCH_VFP),
31959  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, NULL }
31960};
31961#undef ARM_ARCH_OPT
31962
31963/* ISA extensions in the co-processor and main instruction set space.  */
31964
31965struct arm_option_extension_value_table
31966{
31967  const char *           name;
31968  size_t                 name_len;
31969  const arm_feature_set  merge_value;
31970  const arm_feature_set  clear_value;
31971  /* List of architectures for which an extension is available.  ARM_ARCH_NONE
31972     indicates that an extension is available for all architectures while
31973     ARM_ANY marks an empty entry.  */
31974  const arm_feature_set  allowed_archs[2];
31975};
31976
31977/* The following table must be in alphabetical order with a NULL last entry.  */
31978
31979#define ARM_EXT_OPT(N, M, C, AA) { N, sizeof (N) - 1, M, C, { AA, ARM_ANY } }
31980#define ARM_EXT_OPT2(N, M, C, AA1, AA2) { N, sizeof (N) - 1, M, C, {AA1, AA2} }
31981
31982/* DEPRECATED: Refrain from using this table to add any new extensions, instead
31983   use the context sensitive approach using arm_ext_table's.  */
31984static const struct arm_option_extension_value_table arm_extensions[] =
31985{
31986  ARM_EXT_OPT ("crc",	 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31987			 ARM_FEATURE_CORE_HIGH(ARM_EXT2_CRC),
31988			 ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31989  ARM_EXT_OPT ("crypto", FPU_ARCH_CRYPTO_NEON_VFP_ARMV8,
31990			 ARM_FEATURE_COPROC (FPU_CRYPTO_ARMV8),
31991				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
31992  ARM_EXT_OPT ("dotprod", FPU_ARCH_DOTPROD_NEON_VFP_ARMV8,
31993			  ARM_FEATURE_COPROC (FPU_NEON_EXT_DOTPROD),
31994			  ARM_ARCH_V8_2A),
31995  ARM_EXT_OPT ("dsp",	ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31996			ARM_FEATURE_CORE_LOW (ARM_EXT_V5ExP | ARM_EXT_V6_DSP),
31997			ARM_FEATURE_CORE (ARM_EXT_V7M, ARM_EXT2_V8M)),
31998  ARM_EXT_OPT ("fp",     FPU_ARCH_VFP_ARMV8, ARM_FEATURE_COPROC (FPU_VFP_ARMV8),
31999				   ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32000  ARM_EXT_OPT ("fp16",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32001			ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST),
32002			ARM_ARCH_V8_2A),
32003  ARM_EXT_OPT ("fp16fml",  ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32004						  | ARM_EXT2_FP16_FML),
32005			   ARM_FEATURE_CORE_HIGH (ARM_EXT2_FP16_INST
32006						  | ARM_EXT2_FP16_FML),
32007			   ARM_ARCH_V8_2A),
32008  ARM_EXT_OPT2 ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32009			ARM_FEATURE_CORE_LOW (ARM_EXT_ADIV | ARM_EXT_DIV),
32010			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32011			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32012  /* Duplicate entry for the purpose of allowing ARMv7 to match in presence of
32013     Thumb divide instruction.  Due to this having the same name as the
32014     previous entry, this will be ignored when doing command-line parsing and
32015     only considered by build attribute selection code.  */
32016  ARM_EXT_OPT ("idiv",	ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32017			ARM_FEATURE_CORE_LOW (ARM_EXT_DIV),
32018			ARM_FEATURE_CORE_LOW (ARM_EXT_V7)),
32019  ARM_EXT_OPT ("iwmmxt",ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT),
32020			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT), ARM_ARCH_NONE),
32021  ARM_EXT_OPT ("iwmmxt2", ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2),
32022			ARM_FEATURE_COPROC (ARM_CEXT_IWMMXT2), ARM_ARCH_NONE),
32023  ARM_EXT_OPT ("maverick", ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK),
32024			ARM_FEATURE_COPROC (ARM_CEXT_MAVERICK), ARM_ARCH_NONE),
32025  ARM_EXT_OPT2 ("mp",	ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32026			ARM_FEATURE_CORE_LOW (ARM_EXT_MP),
32027			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A),
32028			ARM_FEATURE_CORE_LOW (ARM_EXT_V7R)),
32029  ARM_EXT_OPT ("os",	ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32030			ARM_FEATURE_CORE_LOW (ARM_EXT_OS),
32031				   ARM_FEATURE_CORE_LOW (ARM_EXT_V6M)),
32032  ARM_EXT_OPT ("pan",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_PAN),
32033			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_PAN, 0),
32034			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32035  ARM_EXT_OPT ("predres", ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32036			ARM_FEATURE_CORE_HIGH (ARM_EXT2_PREDRES),
32037			ARM_ARCH_V8A),
32038  ARM_EXT_OPT ("ras",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_RAS),
32039			ARM_FEATURE (ARM_EXT_V8, ARM_EXT2_RAS, 0),
32040			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32041  ARM_EXT_OPT ("rdma",  FPU_ARCH_NEON_VFP_ARMV8_1,
32042			ARM_FEATURE_COPROC (FPU_NEON_ARMV8 | FPU_NEON_EXT_RDMA),
32043			ARM_FEATURE_CORE_HIGH (ARM_EXT2_V8A)),
32044  ARM_EXT_OPT ("sb",	ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32045			ARM_FEATURE_CORE_HIGH (ARM_EXT2_SB),
32046			ARM_ARCH_V8A),
32047  ARM_EXT_OPT2 ("sec",	ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32048			ARM_FEATURE_CORE_LOW (ARM_EXT_SEC),
32049			ARM_FEATURE_CORE_LOW (ARM_EXT_V6K),
32050			ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32051  ARM_EXT_OPT ("simd",  FPU_ARCH_NEON_VFP_ARMV8,
32052			ARM_FEATURE_COPROC (FPU_NEON_ARMV8),
32053			ARM_FEATURE_CORE_LOW (ARM_EXT_V8)),
32054  ARM_EXT_OPT ("virt",	ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT | ARM_EXT_ADIV
32055				     | ARM_EXT_DIV),
32056			ARM_FEATURE_CORE_LOW (ARM_EXT_VIRT),
32057				   ARM_FEATURE_CORE_LOW (ARM_EXT_V7A)),
32058  ARM_EXT_OPT ("xscale",ARM_FEATURE_COPROC (ARM_CEXT_XSCALE),
32059			ARM_FEATURE_COPROC (ARM_CEXT_XSCALE), ARM_ARCH_NONE),
32060  { NULL, 0, ARM_ARCH_NONE, ARM_ARCH_NONE, { ARM_ARCH_NONE, ARM_ARCH_NONE } }
32061};
32062#undef ARM_EXT_OPT
32063
32064/* ISA floating-point and Advanced SIMD extensions.  */
32065struct arm_option_fpu_value_table
32066{
32067  const char *           name;
32068  const arm_feature_set  value;
32069};
32070
32071/* This list should, at a minimum, contain all the fpu names
32072   recognized by GCC.  */
32073static const struct arm_option_fpu_value_table arm_fpus[] =
32074{
32075  {"softfpa",		FPU_NONE},
32076  {"fpe",		FPU_ARCH_FPE},
32077  {"fpe2",		FPU_ARCH_FPE},
32078  {"fpe3",		FPU_ARCH_FPA},	/* Third release supports LFM/SFM.  */
32079  {"fpa",		FPU_ARCH_FPA},
32080  {"fpa10",		FPU_ARCH_FPA},
32081  {"fpa11",		FPU_ARCH_FPA},
32082  {"arm7500fe",		FPU_ARCH_FPA},
32083  {"softvfp",		FPU_ARCH_VFP},
32084  {"softvfp+vfp",	FPU_ARCH_VFP_V2},
32085  {"vfp",		FPU_ARCH_VFP_V2},
32086  {"vfp9",		FPU_ARCH_VFP_V2},
32087  {"vfp3",		FPU_ARCH_VFP_V3}, /* Undocumented, use vfpv3.  */
32088  {"vfp10",		FPU_ARCH_VFP_V2},
32089  {"vfp10-r0",		FPU_ARCH_VFP_V1},
32090  {"vfpxd",		FPU_ARCH_VFP_V1xD},
32091  {"vfpv2",		FPU_ARCH_VFP_V2},
32092  {"vfpv3",		FPU_ARCH_VFP_V3},
32093  {"vfpv3-fp16",	FPU_ARCH_VFP_V3_FP16},
32094  {"vfpv3-d16",		FPU_ARCH_VFP_V3D16},
32095  {"vfpv3-d16-fp16",	FPU_ARCH_VFP_V3D16_FP16},
32096  {"vfpv3xd",		FPU_ARCH_VFP_V3xD},
32097  {"vfpv3xd-fp16",	FPU_ARCH_VFP_V3xD_FP16},
32098  {"arm1020t",		FPU_ARCH_VFP_V1},
32099  {"arm1020e",		FPU_ARCH_VFP_V2},
32100  {"arm1136jfs",	FPU_ARCH_VFP_V2}, /* Undocumented, use arm1136jf-s.  */
32101  {"arm1136jf-s",	FPU_ARCH_VFP_V2},
32102  {"maverick",		FPU_ARCH_MAVERICK},
32103  {"neon",		FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32104  {"neon-vfpv3",	FPU_ARCH_VFP_V3_PLUS_NEON_V1},
32105  {"neon-fp16",		FPU_ARCH_NEON_FP16},
32106  {"vfpv4",		FPU_ARCH_VFP_V4},
32107  {"vfpv4-d16",		FPU_ARCH_VFP_V4D16},
32108  {"fpv4-sp-d16",	FPU_ARCH_VFP_V4_SP_D16},
32109  {"fpv5-d16",		FPU_ARCH_VFP_V5D16},
32110  {"fpv5-sp-d16",	FPU_ARCH_VFP_V5_SP_D16},
32111  {"neon-vfpv4",	FPU_ARCH_NEON_VFP_V4},
32112  {"fp-armv8",		FPU_ARCH_VFP_ARMV8},
32113  {"neon-fp-armv8",	FPU_ARCH_NEON_VFP_ARMV8},
32114  {"crypto-neon-fp-armv8",
32115			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8},
32116  {"neon-fp-armv8.1",	FPU_ARCH_NEON_VFP_ARMV8_1},
32117  {"crypto-neon-fp-armv8.1",
32118			FPU_ARCH_CRYPTO_NEON_VFP_ARMV8_1},
32119  {NULL,		ARM_ARCH_NONE}
32120};
32121
32122struct arm_option_value_table
32123{
32124  const char *name;
32125  long value;
32126};
32127
32128static const struct arm_option_value_table arm_float_abis[] =
32129{
32130  {"hard",	ARM_FLOAT_ABI_HARD},
32131  {"softfp",	ARM_FLOAT_ABI_SOFTFP},
32132  {"soft",	ARM_FLOAT_ABI_SOFT},
32133  {NULL,	0}
32134};
32135
32136#ifdef OBJ_ELF
32137/* We only know how to output GNU and ver 4/5 (AAELF) formats.  */
32138static const struct arm_option_value_table arm_eabis[] =
32139{
32140  {"gnu",	EF_ARM_EABI_UNKNOWN},
32141  {"4",		EF_ARM_EABI_VER4},
32142  {"5",		EF_ARM_EABI_VER5},
32143  {NULL,	0}
32144};
32145#endif
32146
32147struct arm_long_option_table
32148{
32149  const char * option;			/* Substring to match.	*/
32150  const char * help;			/* Help information.  */
32151  int (* func) (const char * subopt);	/* Function to decode sub-option.  */
32152  const char * deprecated;		/* If non-null, print this message.  */
32153};
32154
32155static bfd_boolean
32156arm_parse_extension (const char *str, const arm_feature_set *opt_set,
32157		     arm_feature_set *ext_set,
32158		     const struct arm_ext_table *ext_table)
32159{
32160  /* We insist on extensions being specified in alphabetical order, and with
32161     extensions being added before being removed.  We achieve this by having
32162     the global ARM_EXTENSIONS table in alphabetical order, and using the
32163     ADDING_VALUE variable to indicate whether we are adding an extension (1)
32164     or removing it (0) and only allowing it to change in the order
32165     -1 -> 1 -> 0.  */
32166  const struct arm_option_extension_value_table * opt = NULL;
32167  const arm_feature_set arm_any = ARM_ANY;
32168  int adding_value = -1;
32169
32170  while (str != NULL && *str != 0)
32171    {
32172      const char *ext;
32173      size_t len;
32174
32175      if (*str != '+')
32176	{
32177	  as_bad (_("invalid architectural extension"));
32178	  return FALSE;
32179	}
32180
32181      str++;
32182      ext = strchr (str, '+');
32183
32184      if (ext != NULL)
32185	len = ext - str;
32186      else
32187	len = strlen (str);
32188
32189      if (len >= 2 && strncmp (str, "no", 2) == 0)
32190	{
32191	  if (adding_value != 0)
32192	    {
32193	      adding_value = 0;
32194	      opt = arm_extensions;
32195	    }
32196
32197	  len -= 2;
32198	  str += 2;
32199	}
32200      else if (len > 0)
32201	{
32202	  if (adding_value == -1)
32203	    {
32204	      adding_value = 1;
32205	      opt = arm_extensions;
32206	    }
32207	  else if (adding_value != 1)
32208	    {
32209	      as_bad (_("must specify extensions to add before specifying "
32210			"those to remove"));
32211	      return FALSE;
32212	    }
32213	}
32214
32215      if (len == 0)
32216	{
32217	  as_bad (_("missing architectural extension"));
32218	  return FALSE;
32219	}
32220
32221      gas_assert (adding_value != -1);
32222      gas_assert (opt != NULL);
32223
32224      if (ext_table != NULL)
32225	{
32226	  const struct arm_ext_table * ext_opt = ext_table;
32227	  bfd_boolean found = FALSE;
32228	  for (; ext_opt->name != NULL; ext_opt++)
32229	    if (ext_opt->name_len == len
32230		&& strncmp (ext_opt->name, str, len) == 0)
32231	      {
32232		if (adding_value)
32233		  {
32234		    if (ARM_FEATURE_ZERO (ext_opt->merge))
32235			/* TODO: Option not supported.  When we remove the
32236			   legacy table this case should error out.  */
32237			continue;
32238
32239		    ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, ext_opt->merge);
32240		  }
32241		else
32242		  {
32243		    if (ARM_FEATURE_ZERO (ext_opt->clear))
32244			/* TODO: Option not supported.  When we remove the
32245			   legacy table this case should error out.  */
32246			continue;
32247		    ARM_CLEAR_FEATURE (*ext_set, *ext_set, ext_opt->clear);
32248		  }
32249		found = TRUE;
32250		break;
32251	      }
32252	  if (found)
32253	    {
32254	      str = ext;
32255	      continue;
32256	    }
32257	}
32258
32259      /* Scan over the options table trying to find an exact match. */
32260      for (; opt->name != NULL; opt++)
32261	if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32262	  {
32263	    int i, nb_allowed_archs =
32264	      sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32265	    /* Check we can apply the extension to this architecture.  */
32266	    for (i = 0; i < nb_allowed_archs; i++)
32267	      {
32268		/* Empty entry.  */
32269		if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_any))
32270		  continue;
32271		if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *opt_set))
32272		  break;
32273	      }
32274	    if (i == nb_allowed_archs)
32275	      {
32276		as_bad (_("extension does not apply to the base architecture"));
32277		return FALSE;
32278	      }
32279
32280	    /* Add or remove the extension.  */
32281	    if (adding_value)
32282	      ARM_MERGE_FEATURE_SETS (*ext_set, *ext_set, opt->merge_value);
32283	    else
32284	      ARM_CLEAR_FEATURE (*ext_set, *ext_set, opt->clear_value);
32285
32286	    /* Allowing Thumb division instructions for ARMv7 in autodetection
32287	       rely on this break so that duplicate extensions (extensions
32288	       with the same name as a previous extension in the list) are not
32289	       considered for command-line parsing.  */
32290	    break;
32291	  }
32292
32293      if (opt->name == NULL)
32294	{
32295	  /* Did we fail to find an extension because it wasn't specified in
32296	     alphabetical order, or because it does not exist?  */
32297
32298	  for (opt = arm_extensions; opt->name != NULL; opt++)
32299	    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32300	      break;
32301
32302	  if (opt->name == NULL)
32303	    as_bad (_("unknown architectural extension `%s'"), str);
32304	  else
32305	    as_bad (_("architectural extensions must be specified in "
32306		      "alphabetical order"));
32307
32308	  return FALSE;
32309	}
32310      else
32311	{
32312	  /* We should skip the extension we've just matched the next time
32313	     round.  */
32314	  opt++;
32315	}
32316
32317      str = ext;
32318    };
32319
32320  return TRUE;
32321}
32322
32323static bfd_boolean
32324arm_parse_fp16_opt (const char *str)
32325{
32326  if (strcasecmp (str, "ieee") == 0)
32327    fp16_format = ARM_FP16_FORMAT_IEEE;
32328  else if (strcasecmp (str, "alternative") == 0)
32329    fp16_format = ARM_FP16_FORMAT_ALTERNATIVE;
32330  else
32331    {
32332      as_bad (_("unrecognised float16 format \"%s\""), str);
32333      return FALSE;
32334    }
32335
32336  return TRUE;
32337}
32338
32339static bfd_boolean
32340arm_parse_cpu (const char *str)
32341{
32342  const struct arm_cpu_option_table *opt;
32343  const char *ext = strchr (str, '+');
32344  size_t len;
32345
32346  if (ext != NULL)
32347    len = ext - str;
32348  else
32349    len = strlen (str);
32350
32351  if (len == 0)
32352    {
32353      as_bad (_("missing cpu name `%s'"), str);
32354      return FALSE;
32355    }
32356
32357  for (opt = arm_cpus; opt->name != NULL; opt++)
32358    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32359      {
32360	mcpu_cpu_opt = &opt->value;
32361	if (mcpu_ext_opt == NULL)
32362	  mcpu_ext_opt = XNEW (arm_feature_set);
32363	*mcpu_ext_opt = opt->ext;
32364	mcpu_fpu_opt = &opt->default_fpu;
32365	if (opt->canonical_name)
32366	  {
32367	    gas_assert (sizeof selected_cpu_name > strlen (opt->canonical_name));
32368	    strcpy (selected_cpu_name, opt->canonical_name);
32369	  }
32370	else
32371	  {
32372	    size_t i;
32373
32374	    if (len >= sizeof selected_cpu_name)
32375	      len = (sizeof selected_cpu_name) - 1;
32376
32377	    for (i = 0; i < len; i++)
32378	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
32379	    selected_cpu_name[i] = 0;
32380	  }
32381
32382	if (ext != NULL)
32383	  return arm_parse_extension (ext, mcpu_cpu_opt, mcpu_ext_opt, NULL);
32384
32385	return TRUE;
32386      }
32387
32388  as_bad (_("unknown cpu `%s'"), str);
32389  return FALSE;
32390}
32391
32392static bfd_boolean
32393arm_parse_arch (const char *str)
32394{
32395  const struct arm_arch_option_table *opt;
32396  const char *ext = strchr (str, '+');
32397  size_t len;
32398
32399  if (ext != NULL)
32400    len = ext - str;
32401  else
32402    len = strlen (str);
32403
32404  if (len == 0)
32405    {
32406      as_bad (_("missing architecture name `%s'"), str);
32407      return FALSE;
32408    }
32409
32410  for (opt = arm_archs; opt->name != NULL; opt++)
32411    if (opt->name_len == len && strncmp (opt->name, str, len) == 0)
32412      {
32413	march_cpu_opt = &opt->value;
32414	if (march_ext_opt == NULL)
32415	  march_ext_opt = XNEW (arm_feature_set);
32416	*march_ext_opt = arm_arch_none;
32417	march_fpu_opt = &opt->default_fpu;
32418	selected_ctx_ext_table = opt->ext_table;
32419	strcpy (selected_cpu_name, opt->name);
32420
32421	if (ext != NULL)
32422	  return arm_parse_extension (ext, march_cpu_opt, march_ext_opt,
32423				      opt->ext_table);
32424
32425	return TRUE;
32426      }
32427
32428  as_bad (_("unknown architecture `%s'\n"), str);
32429  return FALSE;
32430}
32431
32432static bfd_boolean
32433arm_parse_fpu (const char * str)
32434{
32435  const struct arm_option_fpu_value_table * opt;
32436
32437  for (opt = arm_fpus; opt->name != NULL; opt++)
32438    if (streq (opt->name, str))
32439      {
32440	mfpu_opt = &opt->value;
32441	return TRUE;
32442      }
32443
32444  as_bad (_("unknown floating point format `%s'\n"), str);
32445  return FALSE;
32446}
32447
32448static bfd_boolean
32449arm_parse_float_abi (const char * str)
32450{
32451  const struct arm_option_value_table * opt;
32452
32453  for (opt = arm_float_abis; opt->name != NULL; opt++)
32454    if (streq (opt->name, str))
32455      {
32456	mfloat_abi_opt = opt->value;
32457	return TRUE;
32458      }
32459
32460  as_bad (_("unknown floating point abi `%s'\n"), str);
32461  return FALSE;
32462}
32463
32464#ifdef OBJ_ELF
32465static bfd_boolean
32466arm_parse_eabi (const char * str)
32467{
32468  const struct arm_option_value_table *opt;
32469
32470  for (opt = arm_eabis; opt->name != NULL; opt++)
32471    if (streq (opt->name, str))
32472      {
32473	meabi_flags = opt->value;
32474	return TRUE;
32475      }
32476  as_bad (_("unknown EABI `%s'\n"), str);
32477  return FALSE;
32478}
32479#endif
32480
32481static bfd_boolean
32482arm_parse_it_mode (const char * str)
32483{
32484  bfd_boolean ret = TRUE;
32485
32486  if (streq ("arm", str))
32487    implicit_it_mode = IMPLICIT_IT_MODE_ARM;
32488  else if (streq ("thumb", str))
32489    implicit_it_mode = IMPLICIT_IT_MODE_THUMB;
32490  else if (streq ("always", str))
32491    implicit_it_mode = IMPLICIT_IT_MODE_ALWAYS;
32492  else if (streq ("never", str))
32493    implicit_it_mode = IMPLICIT_IT_MODE_NEVER;
32494  else
32495    {
32496      as_bad (_("unknown implicit IT mode `%s', should be "\
32497		"arm, thumb, always, or never."), str);
32498      ret = FALSE;
32499    }
32500
32501  return ret;
32502}
32503
32504static bfd_boolean
32505arm_ccs_mode (const char * unused ATTRIBUTE_UNUSED)
32506{
32507  codecomposer_syntax = TRUE;
32508  arm_comment_chars[0] = ';';
32509  arm_line_separator_chars[0] = 0;
32510  return TRUE;
32511}
32512
32513struct arm_long_option_table arm_long_opts[] =
32514{
32515  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
32516   arm_parse_cpu, NULL},
32517  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
32518   arm_parse_arch, NULL},
32519  {"mfpu=", N_("<fpu name>\t  assemble for FPU architecture <fpu name>"),
32520   arm_parse_fpu, NULL},
32521  {"mfloat-abi=", N_("<abi>\t  assemble for floating point ABI <abi>"),
32522   arm_parse_float_abi, NULL},
32523#ifdef OBJ_ELF
32524  {"meabi=", N_("<ver>\t\t  assemble for eabi version <ver>"),
32525   arm_parse_eabi, NULL},
32526#endif
32527  {"mimplicit-it=", N_("<mode>\t  controls implicit insertion of IT instructions"),
32528   arm_parse_it_mode, NULL},
32529  {"mccs", N_("\t\t\t  TI CodeComposer Studio syntax compatibility mode"),
32530   arm_ccs_mode, NULL},
32531  {"mfp16-format=",
32532   N_("[ieee|alternative]\n\
32533                          set the encoding for half precision floating point "
32534			  "numbers to IEEE\n\
32535                          or Arm alternative format."),
32536   arm_parse_fp16_opt, NULL },
32537  {NULL, NULL, 0, NULL}
32538};
32539
32540int
32541md_parse_option (int c, const char * arg)
32542{
32543  struct arm_option_table *opt;
32544  const struct arm_legacy_option_table *fopt;
32545  struct arm_long_option_table *lopt;
32546
32547  switch (c)
32548    {
32549#ifdef OPTION_EB
32550    case OPTION_EB:
32551      target_big_endian = 1;
32552      break;
32553#endif
32554
32555#ifdef OPTION_EL
32556    case OPTION_EL:
32557      target_big_endian = 0;
32558      break;
32559#endif
32560
32561    case OPTION_FIX_V4BX:
32562      fix_v4bx = TRUE;
32563      break;
32564
32565#ifdef OBJ_ELF
32566    case OPTION_FDPIC:
32567      arm_fdpic = TRUE;
32568      break;
32569#endif /* OBJ_ELF */
32570
32571    case 'a':
32572      /* Listing option.  Just ignore these, we don't support additional
32573	 ones.	*/
32574      return 0;
32575
32576    default:
32577      for (opt = arm_opts; opt->option != NULL; opt++)
32578	{
32579	  if (c == opt->option[0]
32580	      && ((arg == NULL && opt->option[1] == 0)
32581		  || streq (arg, opt->option + 1)))
32582	    {
32583	      /* If the option is deprecated, tell the user.  */
32584	      if (warn_on_deprecated && opt->deprecated != NULL)
32585		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32586			   arg ? arg : "", _(opt->deprecated));
32587
32588	      if (opt->var != NULL)
32589		*opt->var = opt->value;
32590
32591	      return 1;
32592	    }
32593	}
32594
32595      for (fopt = arm_legacy_opts; fopt->option != NULL; fopt++)
32596	{
32597	  if (c == fopt->option[0]
32598	      && ((arg == NULL && fopt->option[1] == 0)
32599		  || streq (arg, fopt->option + 1)))
32600	    {
32601	      /* If the option is deprecated, tell the user.  */
32602	      if (warn_on_deprecated && fopt->deprecated != NULL)
32603		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
32604			   arg ? arg : "", _(fopt->deprecated));
32605
32606	      if (fopt->var != NULL)
32607		*fopt->var = &fopt->value;
32608
32609	      return 1;
32610	    }
32611	}
32612
32613      for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32614	{
32615	  /* These options are expected to have an argument.  */
32616	  if (c == lopt->option[0]
32617	      && arg != NULL
32618	      && strncmp (arg, lopt->option + 1,
32619			  strlen (lopt->option + 1)) == 0)
32620	    {
32621	      /* If the option is deprecated, tell the user.  */
32622	      if (warn_on_deprecated && lopt->deprecated != NULL)
32623		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
32624			   _(lopt->deprecated));
32625
32626	      /* Call the sup-option parser.  */
32627	      return lopt->func (arg + strlen (lopt->option) - 1);
32628	    }
32629	}
32630
32631      return 0;
32632    }
32633
32634  return 1;
32635}
32636
32637void
32638md_show_usage (FILE * fp)
32639{
32640  struct arm_option_table *opt;
32641  struct arm_long_option_table *lopt;
32642
32643  fprintf (fp, _(" ARM-specific assembler options:\n"));
32644
32645  for (opt = arm_opts; opt->option != NULL; opt++)
32646    if (opt->help != NULL)
32647      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
32648
32649  for (lopt = arm_long_opts; lopt->option != NULL; lopt++)
32650    if (lopt->help != NULL)
32651      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
32652
32653#ifdef OPTION_EB
32654  fprintf (fp, _("\
32655  -EB                     assemble code for a big-endian cpu\n"));
32656#endif
32657
32658#ifdef OPTION_EL
32659  fprintf (fp, _("\
32660  -EL                     assemble code for a little-endian cpu\n"));
32661#endif
32662
32663  fprintf (fp, _("\
32664  --fix-v4bx              Allow BX in ARMv4 code\n"));
32665
32666#ifdef OBJ_ELF
32667  fprintf (fp, _("\
32668  --fdpic                 generate an FDPIC object file\n"));
32669#endif /* OBJ_ELF */
32670}
32671
32672#ifdef OBJ_ELF
32673
32674typedef struct
32675{
32676  int val;
32677  arm_feature_set flags;
32678} cpu_arch_ver_table;
32679
32680/* Mapping from CPU features to EABI CPU arch values.  Table must be sorted
32681   chronologically for architectures, with an exception for ARMv6-M and
32682   ARMv6S-M due to legacy reasons.  No new architecture should have a
32683   special case.  This allows for build attribute selection results to be
32684   stable when new architectures are added.  */
32685static const cpu_arch_ver_table cpu_arch_ver[] =
32686{
32687    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V1},
32688    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2},
32689    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V2S},
32690    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3},
32691    {TAG_CPU_ARCH_PRE_V4,     ARM_ARCH_V3M},
32692    {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4xM},
32693    {TAG_CPU_ARCH_V4,	      ARM_ARCH_V4},
32694    {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4TxM},
32695    {TAG_CPU_ARCH_V4T,	      ARM_ARCH_V4T},
32696    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5xM},
32697    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5},
32698    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5TxM},
32699    {TAG_CPU_ARCH_V5T,	      ARM_ARCH_V5T},
32700    {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TExP},
32701    {TAG_CPU_ARCH_V5TE,	      ARM_ARCH_V5TE},
32702    {TAG_CPU_ARCH_V5TEJ,      ARM_ARCH_V5TEJ},
32703    {TAG_CPU_ARCH_V6,	      ARM_ARCH_V6},
32704    {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6Z},
32705    {TAG_CPU_ARCH_V6KZ,	      ARM_ARCH_V6KZ},
32706    {TAG_CPU_ARCH_V6K,	      ARM_ARCH_V6K},
32707    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6T2},
32708    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KT2},
32709    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6ZT2},
32710    {TAG_CPU_ARCH_V6T2,	      ARM_ARCH_V6KZT2},
32711
32712    /* When assembling a file with only ARMv6-M or ARMv6S-M instruction, GNU as
32713       always selected build attributes to match those of ARMv6-M
32714       (resp. ARMv6S-M).  However, due to these architectures being a strict
32715       subset of ARMv7-M in terms of instructions available, ARMv7-M attributes
32716       would be selected when fully respecting chronology of architectures.
32717       It is thus necessary to make a special case of ARMv6-M and ARMv6S-M and
32718       move them before ARMv7 architectures.  */
32719    {TAG_CPU_ARCH_V6_M,	      ARM_ARCH_V6M},
32720    {TAG_CPU_ARCH_V6S_M,      ARM_ARCH_V6SM},
32721
32722    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7},
32723    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7A},
32724    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7R},
32725    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7M},
32726    {TAG_CPU_ARCH_V7,	      ARM_ARCH_V7VE},
32727    {TAG_CPU_ARCH_V7E_M,      ARM_ARCH_V7EM},
32728    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8A},
32729    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_1A},
32730    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_2A},
32731    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_3A},
32732    {TAG_CPU_ARCH_V8M_BASE,   ARM_ARCH_V8M_BASE},
32733    {TAG_CPU_ARCH_V8M_MAIN,   ARM_ARCH_V8M_MAIN},
32734    {TAG_CPU_ARCH_V8R,	      ARM_ARCH_V8R},
32735    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_4A},
32736    {TAG_CPU_ARCH_V8,	      ARM_ARCH_V8_5A},
32737    {TAG_CPU_ARCH_V8_1M_MAIN, ARM_ARCH_V8_1M_MAIN},
32738    {TAG_CPU_ARCH_V8,	    ARM_ARCH_V8_6A},
32739    {-1,		    ARM_ARCH_NONE}
32740};
32741
32742/* Set an attribute if it has not already been set by the user.  */
32743
32744static void
32745aeabi_set_attribute_int (int tag, int value)
32746{
32747  if (tag < 1
32748      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32749      || !attributes_set_explicitly[tag])
32750    bfd_elf_add_proc_attr_int (stdoutput, tag, value);
32751}
32752
32753static void
32754aeabi_set_attribute_string (int tag, const char *value)
32755{
32756  if (tag < 1
32757      || tag >= NUM_KNOWN_OBJ_ATTRIBUTES
32758      || !attributes_set_explicitly[tag])
32759    bfd_elf_add_proc_attr_string (stdoutput, tag, value);
32760}
32761
32762/* Return whether features in the *NEEDED feature set are available via
32763   extensions for the architecture whose feature set is *ARCH_FSET.  */
32764
32765static bfd_boolean
32766have_ext_for_needed_feat_p (const arm_feature_set *arch_fset,
32767			    const arm_feature_set *needed)
32768{
32769  int i, nb_allowed_archs;
32770  arm_feature_set ext_fset;
32771  const struct arm_option_extension_value_table *opt;
32772
32773  ext_fset = arm_arch_none;
32774  for (opt = arm_extensions; opt->name != NULL; opt++)
32775    {
32776      /* Extension does not provide any feature we need.  */
32777      if (!ARM_CPU_HAS_FEATURE (*needed, opt->merge_value))
32778	continue;
32779
32780      nb_allowed_archs =
32781	sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[0]);
32782      for (i = 0; i < nb_allowed_archs; i++)
32783	{
32784	  /* Empty entry.  */
32785	  if (ARM_FEATURE_EQUAL (opt->allowed_archs[i], arm_arch_any))
32786	    break;
32787
32788	  /* Extension is available, add it.  */
32789	  if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], *arch_fset))
32790	    ARM_MERGE_FEATURE_SETS (ext_fset, ext_fset, opt->merge_value);
32791	}
32792    }
32793
32794  /* Can we enable all features in *needed?  */
32795  return ARM_FSET_CPU_SUBSET (*needed, ext_fset);
32796}
32797
32798/* Select value for Tag_CPU_arch and Tag_CPU_arch_profile build attributes for
32799   a given architecture feature set *ARCH_EXT_FSET including extension feature
32800   set *EXT_FSET.  Selection logic used depend on EXACT_MATCH:
32801   - if true, check for an exact match of the architecture modulo extensions;
32802   - otherwise, select build attribute value of the first superset
32803     architecture released so that results remains stable when new architectures
32804     are added.
32805   For -march/-mcpu=all the build attribute value of the most featureful
32806   architecture is returned.  Tag_CPU_arch_profile result is returned in
32807   PROFILE.  */
32808
32809static int
32810get_aeabi_cpu_arch_from_fset (const arm_feature_set *arch_ext_fset,
32811			      const arm_feature_set *ext_fset,
32812			      char *profile, int exact_match)
32813{
32814  arm_feature_set arch_fset;
32815  const cpu_arch_ver_table *p_ver, *p_ver_ret = NULL;
32816
32817  /* Select most featureful architecture with all its extensions if building
32818     for -march=all as the feature sets used to set build attributes.  */
32819  if (ARM_FEATURE_EQUAL (*arch_ext_fset, arm_arch_any))
32820    {
32821      /* Force revisiting of decision for each new architecture.  */
32822      gas_assert (MAX_TAG_CPU_ARCH <= TAG_CPU_ARCH_V8_1M_MAIN);
32823      *profile = 'A';
32824      return TAG_CPU_ARCH_V8;
32825    }
32826
32827  ARM_CLEAR_FEATURE (arch_fset, *arch_ext_fset, *ext_fset);
32828
32829  for (p_ver = cpu_arch_ver; p_ver->val != -1; p_ver++)
32830    {
32831      arm_feature_set known_arch_fset;
32832
32833      ARM_CLEAR_FEATURE (known_arch_fset, p_ver->flags, fpu_any);
32834      if (exact_match)
32835	{
32836	  /* Base architecture match user-specified architecture and
32837	     extensions, eg. ARMv6S-M matching -march=armv6-m+os.  */
32838	  if (ARM_FEATURE_EQUAL (*arch_ext_fset, known_arch_fset))
32839	    {
32840	      p_ver_ret = p_ver;
32841	      goto found;
32842	    }
32843	  /* Base architecture match user-specified architecture only
32844	     (eg. ARMv6-M in the same case as above).  Record it in case we
32845	     find a match with above condition.  */
32846	  else if (p_ver_ret == NULL
32847		   && ARM_FEATURE_EQUAL (arch_fset, known_arch_fset))
32848	    p_ver_ret = p_ver;
32849	}
32850      else
32851	{
32852
32853	  /* Architecture has all features wanted.  */
32854	  if (ARM_FSET_CPU_SUBSET (arch_fset, known_arch_fset))
32855	    {
32856	      arm_feature_set added_fset;
32857
32858	      /* Compute features added by this architecture over the one
32859		 recorded in p_ver_ret.  */
32860	      if (p_ver_ret != NULL)
32861		ARM_CLEAR_FEATURE (added_fset, known_arch_fset,
32862				   p_ver_ret->flags);
32863	      /* First architecture that match incl. with extensions, or the
32864		 only difference in features over the recorded match is
32865		 features that were optional and are now mandatory.  */
32866	      if (p_ver_ret == NULL
32867		  || ARM_FSET_CPU_SUBSET (added_fset, arch_fset))
32868		{
32869		  p_ver_ret = p_ver;
32870		  goto found;
32871		}
32872	    }
32873	  else if (p_ver_ret == NULL)
32874	    {
32875	      arm_feature_set needed_ext_fset;
32876
32877	      ARM_CLEAR_FEATURE (needed_ext_fset, arch_fset, known_arch_fset);
32878
32879	      /* Architecture has all features needed when using some
32880		 extensions.  Record it and continue searching in case there
32881		 exist an architecture providing all needed features without
32882		 the need for extensions (eg. ARMv6S-M Vs ARMv6-M with
32883		 OS extension).  */
32884	      if (have_ext_for_needed_feat_p (&known_arch_fset,
32885					      &needed_ext_fset))
32886		p_ver_ret = p_ver;
32887	    }
32888	}
32889    }
32890
32891  if (p_ver_ret == NULL)
32892    return -1;
32893
32894 found:
32895  /* Tag_CPU_arch_profile.  */
32896  if (!ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r)
32897      && (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7a)
32898          || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8)
32899          || (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_atomics)
32900              && !ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8m_m_only))))
32901    *profile = 'A';
32902  else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v7r)
32903      || ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_v8r))
32904    *profile = 'R';
32905  else if (ARM_CPU_HAS_FEATURE (p_ver_ret->flags, arm_ext_m))
32906    *profile = 'M';
32907  else
32908    *profile = '\0';
32909  return p_ver_ret->val;
32910}
32911
32912/* Set the public EABI object attributes.  */
32913
32914static void
32915aeabi_set_public_attributes (void)
32916{
32917  char profile = '\0';
32918  int arch = -1;
32919  int virt_sec = 0;
32920  int fp16_optional = 0;
32921  int skip_exact_match = 0;
32922  arm_feature_set flags, flags_arch, flags_ext;
32923
32924  /* Autodetection mode, choose the architecture based the instructions
32925     actually used.  */
32926  if (no_cpu_selected ())
32927    {
32928      ARM_MERGE_FEATURE_SETS (flags, arm_arch_used, thumb_arch_used);
32929
32930      if (ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any))
32931	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v1);
32932
32933      if (ARM_CPU_HAS_FEATURE (thumb_arch_used, arm_arch_any))
32934	ARM_MERGE_FEATURE_SETS (flags, flags, arm_ext_v4t);
32935
32936      /* Code run during relaxation relies on selected_cpu being set.  */
32937      ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
32938      flags_ext = arm_arch_none;
32939      ARM_CLEAR_FEATURE (selected_arch, flags_arch, flags_ext);
32940      selected_ext = flags_ext;
32941      selected_cpu = flags;
32942    }
32943  /* Otherwise, choose the architecture based on the capabilities of the
32944     requested cpu.  */
32945  else
32946    {
32947      ARM_MERGE_FEATURE_SETS (flags_arch, selected_arch, selected_ext);
32948      ARM_CLEAR_FEATURE (flags_arch, flags_arch, fpu_any);
32949      flags_ext = selected_ext;
32950      flags = selected_cpu;
32951    }
32952  ARM_MERGE_FEATURE_SETS (flags, flags, selected_fpu);
32953
32954  /* Allow the user to override the reported architecture.  */
32955  if (!ARM_FEATURE_ZERO (selected_object_arch))
32956    {
32957      ARM_CLEAR_FEATURE (flags_arch, selected_object_arch, fpu_any);
32958      flags_ext = arm_arch_none;
32959    }
32960  else
32961    skip_exact_match = ARM_FEATURE_EQUAL (selected_cpu, arm_arch_any);
32962
32963  /* When this function is run again after relaxation has happened there is no
32964     way to determine whether an architecture or CPU was specified by the user:
32965     - selected_cpu is set above for relaxation to work;
32966     - march_cpu_opt is not set if only -mcpu or .cpu is used;
32967     - mcpu_cpu_opt is set to arm_arch_any for autodetection.
32968     Therefore, if not in -march=all case we first try an exact match and fall
32969     back to autodetection.  */
32970  if (!skip_exact_match)
32971    arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 1);
32972  if (arch == -1)
32973    arch = get_aeabi_cpu_arch_from_fset (&flags_arch, &flags_ext, &profile, 0);
32974  if (arch == -1)
32975    as_bad (_("no architecture contains all the instructions used\n"));
32976
32977  /* Tag_CPU_name.  */
32978  if (selected_cpu_name[0])
32979    {
32980      char *q;
32981
32982      q = selected_cpu_name;
32983      if (strncmp (q, "armv", 4) == 0)
32984	{
32985	  int i;
32986
32987	  q += 4;
32988	  for (i = 0; q[i]; i++)
32989	    q[i] = TOUPPER (q[i]);
32990	}
32991      aeabi_set_attribute_string (Tag_CPU_name, q);
32992    }
32993
32994  /* Tag_CPU_arch.  */
32995  aeabi_set_attribute_int (Tag_CPU_arch, arch);
32996
32997  /* Tag_CPU_arch_profile.  */
32998  if (profile != '\0')
32999    aeabi_set_attribute_int (Tag_CPU_arch_profile, profile);
33000
33001  /* Tag_DSP_extension.  */
33002  if (ARM_CPU_HAS_FEATURE (selected_ext, arm_ext_dsp))
33003    aeabi_set_attribute_int (Tag_DSP_extension, 1);
33004
33005  ARM_CLEAR_FEATURE (flags_arch, flags, fpu_any);
33006  /* Tag_ARM_ISA_use.  */
33007  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v1)
33008      || ARM_FEATURE_ZERO (flags_arch))
33009    aeabi_set_attribute_int (Tag_ARM_ISA_use, 1);
33010
33011  /* Tag_THUMB_ISA_use.  */
33012  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v4t)
33013      || ARM_FEATURE_ZERO (flags_arch))
33014    {
33015      int thumb_isa_use;
33016
33017      if (!ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33018	  && ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m_m_only))
33019	thumb_isa_use = 3;
33020      else if (ARM_CPU_HAS_FEATURE (flags, arm_arch_t2))
33021	thumb_isa_use = 2;
33022      else
33023	thumb_isa_use = 1;
33024      aeabi_set_attribute_int (Tag_THUMB_ISA_use, thumb_isa_use);
33025    }
33026
33027  /* Tag_VFP_arch.  */
33028  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_armv8xd))
33029    aeabi_set_attribute_int (Tag_VFP_arch,
33030			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33031			     ? 7 : 8);
33032  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_fma))
33033    aeabi_set_attribute_int (Tag_VFP_arch,
33034			     ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32)
33035			     ? 5 : 6);
33036  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_d32))
33037    {
33038      fp16_optional = 1;
33039      aeabi_set_attribute_int (Tag_VFP_arch, 3);
33040    }
33041  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v3xd))
33042    {
33043      aeabi_set_attribute_int (Tag_VFP_arch, 4);
33044      fp16_optional = 1;
33045    }
33046  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v2))
33047    aeabi_set_attribute_int (Tag_VFP_arch, 2);
33048  else if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1)
33049	   || ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd))
33050    aeabi_set_attribute_int (Tag_VFP_arch, 1);
33051
33052  /* Tag_ABI_HardFP_use.  */
33053  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1xd)
33054      && !ARM_CPU_HAS_FEATURE (flags, fpu_vfp_ext_v1))
33055    aeabi_set_attribute_int (Tag_ABI_HardFP_use, 1);
33056
33057  /* Tag_WMMX_arch.  */
33058  if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt2))
33059    aeabi_set_attribute_int (Tag_WMMX_arch, 2);
33060  else if (ARM_CPU_HAS_FEATURE (flags, arm_cext_iwmmxt))
33061    aeabi_set_attribute_int (Tag_WMMX_arch, 1);
33062
33063  /* Tag_Advanced_SIMD_arch (formerly Tag_NEON_arch).  */
33064  if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v8_1))
33065    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 4);
33066  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_armv8))
33067    aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 3);
33068  else if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_v1))
33069    {
33070      if (ARM_CPU_HAS_FEATURE (flags, fpu_neon_ext_fma))
33071	{
33072	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 2);
33073	}
33074      else
33075	{
33076	  aeabi_set_attribute_int (Tag_Advanced_SIMD_arch, 1);
33077	  fp16_optional = 1;
33078	}
33079    }
33080
33081  if (ARM_CPU_HAS_FEATURE (flags, mve_fp_ext))
33082    aeabi_set_attribute_int (Tag_MVE_arch, 2);
33083  else if (ARM_CPU_HAS_FEATURE (flags, mve_ext))
33084    aeabi_set_attribute_int (Tag_MVE_arch, 1);
33085
33086  /* Tag_VFP_HP_extension (formerly Tag_NEON_FP16_arch).  */
33087  if (ARM_CPU_HAS_FEATURE (flags, fpu_vfp_fp16) && fp16_optional)
33088    aeabi_set_attribute_int (Tag_VFP_HP_extension, 1);
33089
33090  /* Tag_DIV_use.
33091
33092     We set Tag_DIV_use to two when integer divide instructions have been used
33093     in ARM state, or when Thumb integer divide instructions have been used,
33094     but we have no architecture profile set, nor have we any ARM instructions.
33095
33096     For ARMv8-A and ARMv8-M we set the tag to 0 as integer divide is implied
33097     by the base architecture.
33098
33099     For new architectures we will have to check these tests.  */
33100  gas_assert (arch <= TAG_CPU_ARCH_V8_1M_MAIN);
33101  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_v8)
33102      || ARM_CPU_HAS_FEATURE (flags, arm_ext_v8m))
33103    aeabi_set_attribute_int (Tag_DIV_use, 0);
33104  else if (ARM_CPU_HAS_FEATURE (flags, arm_ext_adiv)
33105	   || (profile == '\0'
33106	       && ARM_CPU_HAS_FEATURE (flags, arm_ext_div)
33107	       && !ARM_CPU_HAS_FEATURE (arm_arch_used, arm_arch_any)))
33108    aeabi_set_attribute_int (Tag_DIV_use, 2);
33109
33110  /* Tag_MP_extension_use.  */
33111  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_mp))
33112    aeabi_set_attribute_int (Tag_MPextension_use, 1);
33113
33114  /* Tag Virtualization_use.  */
33115  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_sec))
33116    virt_sec |= 1;
33117  if (ARM_CPU_HAS_FEATURE (flags, arm_ext_virt))
33118    virt_sec |= 2;
33119  if (virt_sec != 0)
33120    aeabi_set_attribute_int (Tag_Virtualization_use, virt_sec);
33121
33122  if (fp16_format != ARM_FP16_FORMAT_DEFAULT)
33123    aeabi_set_attribute_int (Tag_ABI_FP_16bit_format, fp16_format);
33124}
33125
33126/* Post relaxation hook.  Recompute ARM attributes now that relaxation is
33127   finished and free extension feature bits which will not be used anymore.  */
33128
33129void
33130arm_md_post_relax (void)
33131{
33132  aeabi_set_public_attributes ();
33133  XDELETE (mcpu_ext_opt);
33134  mcpu_ext_opt = NULL;
33135  XDELETE (march_ext_opt);
33136  march_ext_opt = NULL;
33137}
33138
33139/* Add the default contents for the .ARM.attributes section.  */
33140
33141void
33142arm_md_end (void)
33143{
33144  if (EF_ARM_EABI_VERSION (meabi_flags) < EF_ARM_EABI_VER4)
33145    return;
33146
33147  aeabi_set_public_attributes ();
33148}
33149#endif /* OBJ_ELF */
33150
33151/* Parse a .cpu directive.  */
33152
33153static void
33154s_arm_cpu (int ignored ATTRIBUTE_UNUSED)
33155{
33156  const struct arm_cpu_option_table *opt;
33157  char *name;
33158  char saved_char;
33159
33160  name = input_line_pointer;
33161  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33162    input_line_pointer++;
33163  saved_char = *input_line_pointer;
33164  *input_line_pointer = 0;
33165
33166  /* Skip the first "all" entry.  */
33167  for (opt = arm_cpus + 1; opt->name != NULL; opt++)
33168    if (streq (opt->name, name))
33169      {
33170	selected_arch = opt->value;
33171	selected_ext = opt->ext;
33172	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33173	if (opt->canonical_name)
33174	  strcpy (selected_cpu_name, opt->canonical_name);
33175	else
33176	  {
33177	    int i;
33178	    for (i = 0; opt->name[i]; i++)
33179	      selected_cpu_name[i] = TOUPPER (opt->name[i]);
33180
33181	    selected_cpu_name[i] = 0;
33182	  }
33183	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33184
33185	*input_line_pointer = saved_char;
33186	demand_empty_rest_of_line ();
33187	return;
33188      }
33189  as_bad (_("unknown cpu `%s'"), name);
33190  *input_line_pointer = saved_char;
33191  ignore_rest_of_line ();
33192}
33193
33194/* Parse a .arch directive.  */
33195
33196static void
33197s_arm_arch (int ignored ATTRIBUTE_UNUSED)
33198{
33199  const struct arm_arch_option_table *opt;
33200  char saved_char;
33201  char *name;
33202
33203  name = input_line_pointer;
33204  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33205    input_line_pointer++;
33206  saved_char = *input_line_pointer;
33207  *input_line_pointer = 0;
33208
33209  /* Skip the first "all" entry.  */
33210  for (opt = arm_archs + 1; opt->name != NULL; opt++)
33211    if (streq (opt->name, name))
33212      {
33213	selected_arch = opt->value;
33214	selected_ctx_ext_table = opt->ext_table;
33215	selected_ext = arm_arch_none;
33216	selected_cpu = selected_arch;
33217	strcpy (selected_cpu_name, opt->name);
33218	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33219	*input_line_pointer = saved_char;
33220	demand_empty_rest_of_line ();
33221	return;
33222      }
33223
33224  as_bad (_("unknown architecture `%s'\n"), name);
33225  *input_line_pointer = saved_char;
33226  ignore_rest_of_line ();
33227}
33228
33229/* Parse a .object_arch directive.  */
33230
33231static void
33232s_arm_object_arch (int ignored ATTRIBUTE_UNUSED)
33233{
33234  const struct arm_arch_option_table *opt;
33235  char saved_char;
33236  char *name;
33237
33238  name = input_line_pointer;
33239  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33240    input_line_pointer++;
33241  saved_char = *input_line_pointer;
33242  *input_line_pointer = 0;
33243
33244  /* Skip the first "all" entry.  */
33245  for (opt = arm_archs + 1; opt->name != NULL; opt++)
33246    if (streq (opt->name, name))
33247      {
33248	selected_object_arch = opt->value;
33249	*input_line_pointer = saved_char;
33250	demand_empty_rest_of_line ();
33251	return;
33252      }
33253
33254  as_bad (_("unknown architecture `%s'\n"), name);
33255  *input_line_pointer = saved_char;
33256  ignore_rest_of_line ();
33257}
33258
33259/* Parse a .arch_extension directive.  */
33260
33261static void
33262s_arm_arch_extension (int ignored ATTRIBUTE_UNUSED)
33263{
33264  const struct arm_option_extension_value_table *opt;
33265  char saved_char;
33266  char *name;
33267  int adding_value = 1;
33268
33269  name = input_line_pointer;
33270  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33271    input_line_pointer++;
33272  saved_char = *input_line_pointer;
33273  *input_line_pointer = 0;
33274
33275  if (strlen (name) >= 2
33276      && strncmp (name, "no", 2) == 0)
33277    {
33278      adding_value = 0;
33279      name += 2;
33280    }
33281
33282  /* Check the context specific extension table */
33283  if (selected_ctx_ext_table)
33284    {
33285      const struct arm_ext_table * ext_opt;
33286      for (ext_opt = selected_ctx_ext_table; ext_opt->name != NULL; ext_opt++)
33287        {
33288          if (streq (ext_opt->name, name))
33289	    {
33290	      if (adding_value)
33291		{
33292		  if (ARM_FEATURE_ZERO (ext_opt->merge))
33293		    /* TODO: Option not supported.  When we remove the
33294		    legacy table this case should error out.  */
33295		    continue;
33296		  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33297					  ext_opt->merge);
33298		}
33299	      else
33300		ARM_CLEAR_FEATURE (selected_ext, selected_ext, ext_opt->clear);
33301
33302	      ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33303	      ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33304	      *input_line_pointer = saved_char;
33305	      demand_empty_rest_of_line ();
33306	      return;
33307	    }
33308	}
33309    }
33310
33311  for (opt = arm_extensions; opt->name != NULL; opt++)
33312    if (streq (opt->name, name))
33313      {
33314	int i, nb_allowed_archs =
33315	  sizeof (opt->allowed_archs) / sizeof (opt->allowed_archs[i]);
33316	for (i = 0; i < nb_allowed_archs; i++)
33317	  {
33318	    /* Empty entry.  */
33319	    if (ARM_CPU_IS_ANY (opt->allowed_archs[i]))
33320	      continue;
33321	    if (ARM_FSET_CPU_SUBSET (opt->allowed_archs[i], selected_arch))
33322	      break;
33323	  }
33324
33325	if (i == nb_allowed_archs)
33326	  {
33327	    as_bad (_("architectural extension `%s' is not allowed for the "
33328		      "current base architecture"), name);
33329	    break;
33330	  }
33331
33332	if (adding_value)
33333	  ARM_MERGE_FEATURE_SETS (selected_ext, selected_ext,
33334				  opt->merge_value);
33335	else
33336	  ARM_CLEAR_FEATURE (selected_ext, selected_ext, opt->clear_value);
33337
33338	ARM_MERGE_FEATURE_SETS (selected_cpu, selected_arch, selected_ext);
33339	ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33340	*input_line_pointer = saved_char;
33341	demand_empty_rest_of_line ();
33342	/* Allowing Thumb division instructions for ARMv7 in autodetection rely
33343	   on this return so that duplicate extensions (extensions with the
33344	   same name as a previous extension in the list) are not considered
33345	   for command-line parsing.  */
33346	return;
33347      }
33348
33349  if (opt->name == NULL)
33350    as_bad (_("unknown architecture extension `%s'\n"), name);
33351
33352  *input_line_pointer = saved_char;
33353  ignore_rest_of_line ();
33354}
33355
33356/* Parse a .fpu directive.  */
33357
33358static void
33359s_arm_fpu (int ignored ATTRIBUTE_UNUSED)
33360{
33361  const struct arm_option_fpu_value_table *opt;
33362  char saved_char;
33363  char *name;
33364
33365  name = input_line_pointer;
33366  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
33367    input_line_pointer++;
33368  saved_char = *input_line_pointer;
33369  *input_line_pointer = 0;
33370
33371  for (opt = arm_fpus; opt->name != NULL; opt++)
33372    if (streq (opt->name, name))
33373      {
33374	selected_fpu = opt->value;
33375	ARM_CLEAR_FEATURE (selected_cpu, selected_cpu, fpu_any);
33376#ifndef CPU_DEFAULT
33377	if (no_cpu_selected ())
33378	  ARM_MERGE_FEATURE_SETS (cpu_variant, arm_arch_any, selected_fpu);
33379	else
33380#endif
33381	  ARM_MERGE_FEATURE_SETS (cpu_variant, selected_cpu, selected_fpu);
33382	*input_line_pointer = saved_char;
33383	demand_empty_rest_of_line ();
33384	return;
33385      }
33386
33387  as_bad (_("unknown floating point format `%s'\n"), name);
33388  *input_line_pointer = saved_char;
33389  ignore_rest_of_line ();
33390}
33391
33392/* Copy symbol information.  */
33393
33394void
33395arm_copy_symbol_attributes (symbolS *dest, symbolS *src)
33396{
33397  ARM_GET_FLAG (dest) = ARM_GET_FLAG (src);
33398}
33399
33400#ifdef OBJ_ELF
33401/* Given a symbolic attribute NAME, return the proper integer value.
33402   Returns -1 if the attribute is not known.  */
33403
33404int
33405arm_convert_symbolic_attribute (const char *name)
33406{
33407  static const struct
33408  {
33409    const char * name;
33410    const int    tag;
33411  }
33412  attribute_table[] =
33413    {
33414      /* When you modify this table you should
33415	 also modify the list in doc/c-arm.texi.  */
33416#define T(tag) {#tag, tag}
33417      T (Tag_CPU_raw_name),
33418      T (Tag_CPU_name),
33419      T (Tag_CPU_arch),
33420      T (Tag_CPU_arch_profile),
33421      T (Tag_ARM_ISA_use),
33422      T (Tag_THUMB_ISA_use),
33423      T (Tag_FP_arch),
33424      T (Tag_VFP_arch),
33425      T (Tag_WMMX_arch),
33426      T (Tag_Advanced_SIMD_arch),
33427      T (Tag_PCS_config),
33428      T (Tag_ABI_PCS_R9_use),
33429      T (Tag_ABI_PCS_RW_data),
33430      T (Tag_ABI_PCS_RO_data),
33431      T (Tag_ABI_PCS_GOT_use),
33432      T (Tag_ABI_PCS_wchar_t),
33433      T (Tag_ABI_FP_rounding),
33434      T (Tag_ABI_FP_denormal),
33435      T (Tag_ABI_FP_exceptions),
33436      T (Tag_ABI_FP_user_exceptions),
33437      T (Tag_ABI_FP_number_model),
33438      T (Tag_ABI_align_needed),
33439      T (Tag_ABI_align8_needed),
33440      T (Tag_ABI_align_preserved),
33441      T (Tag_ABI_align8_preserved),
33442      T (Tag_ABI_enum_size),
33443      T (Tag_ABI_HardFP_use),
33444      T (Tag_ABI_VFP_args),
33445      T (Tag_ABI_WMMX_args),
33446      T (Tag_ABI_optimization_goals),
33447      T (Tag_ABI_FP_optimization_goals),
33448      T (Tag_compatibility),
33449      T (Tag_CPU_unaligned_access),
33450      T (Tag_FP_HP_extension),
33451      T (Tag_VFP_HP_extension),
33452      T (Tag_ABI_FP_16bit_format),
33453      T (Tag_MPextension_use),
33454      T (Tag_DIV_use),
33455      T (Tag_nodefaults),
33456      T (Tag_also_compatible_with),
33457      T (Tag_conformance),
33458      T (Tag_T2EE_use),
33459      T (Tag_Virtualization_use),
33460      T (Tag_DSP_extension),
33461      T (Tag_MVE_arch),
33462      /* We deliberately do not include Tag_MPextension_use_legacy.  */
33463#undef T
33464    };
33465  unsigned int i;
33466
33467  if (name == NULL)
33468    return -1;
33469
33470  for (i = 0; i < ARRAY_SIZE (attribute_table); i++)
33471    if (streq (name, attribute_table[i].name))
33472      return attribute_table[i].tag;
33473
33474  return -1;
33475}
33476
33477/* Apply sym value for relocations only in the case that they are for
33478   local symbols in the same segment as the fixup and you have the
33479   respective architectural feature for blx and simple switches.  */
33480
33481int
33482arm_apply_sym_value (struct fix * fixP, segT this_seg)
33483{
33484  if (fixP->fx_addsy
33485      && ARM_CPU_HAS_FEATURE (selected_cpu, arm_ext_v5t)
33486      /* PR 17444: If the local symbol is in a different section then a reloc
33487	 will always be generated for it, so applying the symbol value now
33488	 will result in a double offset being stored in the relocation.  */
33489      && (S_GET_SEGMENT (fixP->fx_addsy) == this_seg)
33490      && !S_FORCE_RELOC (fixP->fx_addsy, TRUE))
33491    {
33492      switch (fixP->fx_r_type)
33493	{
33494	case BFD_RELOC_ARM_PCREL_BLX:
33495	case BFD_RELOC_THUMB_PCREL_BRANCH23:
33496	  if (ARM_IS_FUNC (fixP->fx_addsy))
33497	    return 1;
33498	  break;
33499
33500	case BFD_RELOC_ARM_PCREL_CALL:
33501	case BFD_RELOC_THUMB_PCREL_BLX:
33502	  if (THUMB_IS_FUNC (fixP->fx_addsy))
33503	    return 1;
33504	  break;
33505
33506	default:
33507	  break;
33508	}
33509
33510    }
33511  return 0;
33512}
33513#endif /* OBJ_ELF */
33514