1/* tc-aarch64.c -- Assemble for the AArch64 ISA
2
3   Copyright (C) 2009-2017 Free Software Foundation, Inc.
4   Contributed by ARM Ltd.
5
6   This file is part of GAS.
7
8   GAS is free software; you can redistribute it and/or modify
9   it under the terms of the GNU General Public License as published by
10   the Free Software Foundation; either version 3 of the license, or
11   (at your option) any later version.
12
13   GAS is distributed in the hope that it will be useful,
14   but WITHOUT ANY WARRANTY; without even the implied warranty of
15   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
16   GNU General Public License for more details.
17
18   You should have received a copy of the GNU General Public License
19   along with this program; see the file COPYING3. If not,
20   see <http://www.gnu.org/licenses/>.  */
21
22#include "as.h"
23#include <limits.h>
24#include <stdarg.h>
25#include "bfd_stdint.h"
26#define	 NO_RELOC 0
27#include "safe-ctype.h"
28#include "subsegs.h"
29#include "obstack.h"
30
31#ifdef OBJ_ELF
32#include "elf/aarch64.h"
33#include "dw2gencfi.h"
34#endif
35
36#include "dwarf2dbg.h"
37
38/* Types of processor to assemble for.  */
39#ifndef CPU_DEFAULT
40#define CPU_DEFAULT AARCH64_ARCH_V8
41#endif
42
43#define streq(a, b)	      (strcmp (a, b) == 0)
44
45#define END_OF_INSN '\0'
46
47static aarch64_feature_set cpu_variant;
48
49/* Variables that we set while parsing command-line options.  Once all
50   options have been read we re-process these values to set the real
51   assembly flags.  */
52static const aarch64_feature_set *mcpu_cpu_opt = NULL;
53static const aarch64_feature_set *march_cpu_opt = NULL;
54
55/* Constants for known architecture features.  */
56static const aarch64_feature_set cpu_default = CPU_DEFAULT;
57
58#ifdef OBJ_ELF
59/* Pre-defined "_GLOBAL_OFFSET_TABLE_"	*/
60static symbolS *GOT_symbol;
61
62/* Which ABI to use.  */
63enum aarch64_abi_type
64{
65  AARCH64_ABI_LP64 = 0,
66  AARCH64_ABI_ILP32 = 1
67};
68
69/* AArch64 ABI for the output file.  */
70static enum aarch64_abi_type aarch64_abi = AARCH64_ABI_LP64;
71
72/* When non-zero, program to a 32-bit model, in which the C data types
73   int, long and all pointer types are 32-bit objects (ILP32); or to a
74   64-bit model, in which the C int type is 32-bits but the C long type
75   and all pointer types are 64-bit objects (LP64).  */
76#define ilp32_p		(aarch64_abi == AARCH64_ABI_ILP32)
77#endif
78
79enum vector_el_type
80{
81  NT_invtype = -1,
82  NT_b,
83  NT_h,
84  NT_s,
85  NT_d,
86  NT_q,
87  NT_zero,
88  NT_merge
89};
90
91/* Bits for DEFINED field in vector_type_el.  */
92#define NTA_HASTYPE     1
93#define NTA_HASINDEX    2
94#define NTA_HASVARWIDTH 4
95
96struct vector_type_el
97{
98  enum vector_el_type type;
99  unsigned char defined;
100  unsigned width;
101  int64_t index;
102};
103
104#define FIXUP_F_HAS_EXPLICIT_SHIFT	0x00000001
105
106struct reloc
107{
108  bfd_reloc_code_real_type type;
109  expressionS exp;
110  int pc_rel;
111  enum aarch64_opnd opnd;
112  uint32_t flags;
113  unsigned need_libopcodes_p : 1;
114};
115
116struct aarch64_instruction
117{
118  /* libopcodes structure for instruction intermediate representation.  */
119  aarch64_inst base;
120  /* Record assembly errors found during the parsing.  */
121  struct
122    {
123      enum aarch64_operand_error_kind kind;
124      const char *error;
125    } parsing_error;
126  /* The condition that appears in the assembly line.  */
127  int cond;
128  /* Relocation information (including the GAS internal fixup).  */
129  struct reloc reloc;
130  /* Need to generate an immediate in the literal pool.  */
131  unsigned gen_lit_pool : 1;
132};
133
134typedef struct aarch64_instruction aarch64_instruction;
135
136static aarch64_instruction inst;
137
138static bfd_boolean parse_operands (char *, const aarch64_opcode *);
139static bfd_boolean programmer_friendly_fixup (aarch64_instruction *);
140
141/* Diagnostics inline function utilites.
142
143   These are lightweight utlities which should only be called by parse_operands
144   and other parsers.  GAS processes each assembly line by parsing it against
145   instruction template(s), in the case of multiple templates (for the same
146   mnemonic name), those templates are tried one by one until one succeeds or
147   all fail.  An assembly line may fail a few templates before being
148   successfully parsed; an error saved here in most cases is not a user error
149   but an error indicating the current template is not the right template.
150   Therefore it is very important that errors can be saved at a low cost during
151   the parsing; we don't want to slow down the whole parsing by recording
152   non-user errors in detail.
153
154   Remember that the objective is to help GAS pick up the most approapriate
155   error message in the case of multiple templates, e.g. FMOV which has 8
156   templates.  */
157
158static inline void
159clear_error (void)
160{
161  inst.parsing_error.kind = AARCH64_OPDE_NIL;
162  inst.parsing_error.error = NULL;
163}
164
165static inline bfd_boolean
166error_p (void)
167{
168  return inst.parsing_error.kind != AARCH64_OPDE_NIL;
169}
170
171static inline const char *
172get_error_message (void)
173{
174  return inst.parsing_error.error;
175}
176
177static inline enum aarch64_operand_error_kind
178get_error_kind (void)
179{
180  return inst.parsing_error.kind;
181}
182
183static inline void
184set_error (enum aarch64_operand_error_kind kind, const char *error)
185{
186  inst.parsing_error.kind = kind;
187  inst.parsing_error.error = error;
188}
189
190static inline void
191set_recoverable_error (const char *error)
192{
193  set_error (AARCH64_OPDE_RECOVERABLE, error);
194}
195
196/* Use the DESC field of the corresponding aarch64_operand entry to compose
197   the error message.  */
198static inline void
199set_default_error (void)
200{
201  set_error (AARCH64_OPDE_SYNTAX_ERROR, NULL);
202}
203
204static inline void
205set_syntax_error (const char *error)
206{
207  set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
208}
209
210static inline void
211set_first_syntax_error (const char *error)
212{
213  if (! error_p ())
214    set_error (AARCH64_OPDE_SYNTAX_ERROR, error);
215}
216
217static inline void
218set_fatal_syntax_error (const char *error)
219{
220  set_error (AARCH64_OPDE_FATAL_SYNTAX_ERROR, error);
221}
222
223/* Number of littlenums required to hold an extended precision number.  */
224#define MAX_LITTLENUMS 6
225
226/* Return value for certain parsers when the parsing fails; those parsers
227   return the information of the parsed result, e.g. register number, on
228   success.  */
229#define PARSE_FAIL -1
230
231/* This is an invalid condition code that means no conditional field is
232   present. */
233#define COND_ALWAYS 0x10
234
235typedef struct
236{
237  const char *template;
238  unsigned long value;
239} asm_barrier_opt;
240
241typedef struct
242{
243  const char *template;
244  uint32_t value;
245} asm_nzcv;
246
247struct reloc_entry
248{
249  char *name;
250  bfd_reloc_code_real_type reloc;
251};
252
253/* Macros to define the register types and masks for the purpose
254   of parsing.  */
255
256#undef AARCH64_REG_TYPES
257#define AARCH64_REG_TYPES	\
258  BASIC_REG_TYPE(R_32)	/* w[0-30] */	\
259  BASIC_REG_TYPE(R_64)	/* x[0-30] */	\
260  BASIC_REG_TYPE(SP_32)	/* wsp     */	\
261  BASIC_REG_TYPE(SP_64)	/* sp      */	\
262  BASIC_REG_TYPE(Z_32)	/* wzr     */	\
263  BASIC_REG_TYPE(Z_64)	/* xzr     */	\
264  BASIC_REG_TYPE(FP_B)	/* b[0-31] *//* NOTE: keep FP_[BHSDQ] consecutive! */\
265  BASIC_REG_TYPE(FP_H)	/* h[0-31] */	\
266  BASIC_REG_TYPE(FP_S)	/* s[0-31] */	\
267  BASIC_REG_TYPE(FP_D)	/* d[0-31] */	\
268  BASIC_REG_TYPE(FP_Q)	/* q[0-31] */	\
269  BASIC_REG_TYPE(VN)	/* v[0-31] */	\
270  BASIC_REG_TYPE(ZN)	/* z[0-31] */	\
271  BASIC_REG_TYPE(PN)	/* p[0-15] */	\
272  /* Typecheck: any 64-bit int reg         (inc SP exc XZR).  */	\
273  MULTI_REG_TYPE(R64_SP, REG_TYPE(R_64) | REG_TYPE(SP_64))		\
274  /* Typecheck: same, plus SVE registers.  */				\
275  MULTI_REG_TYPE(SVE_BASE, REG_TYPE(R_64) | REG_TYPE(SP_64)		\
276		 | REG_TYPE(ZN))					\
277  /* Typecheck: x[0-30], w[0-30] or [xw]zr.  */				\
278  MULTI_REG_TYPE(R_Z, REG_TYPE(R_32) | REG_TYPE(R_64)			\
279		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
280  /* Typecheck: same, plus SVE registers.  */				\
281  MULTI_REG_TYPE(SVE_OFFSET, REG_TYPE(R_32) | REG_TYPE(R_64)		\
282		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)			\
283		 | REG_TYPE(ZN))					\
284  /* Typecheck: x[0-30], w[0-30] or {w}sp.  */				\
285  MULTI_REG_TYPE(R_SP, REG_TYPE(R_32) | REG_TYPE(R_64)			\
286		 | REG_TYPE(SP_32) | REG_TYPE(SP_64))			\
287  /* Typecheck: any int                    (inc {W}SP inc [WX]ZR).  */	\
288  MULTI_REG_TYPE(R_Z_SP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
289		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
290		 | REG_TYPE(Z_32) | REG_TYPE(Z_64)) 			\
291  /* Typecheck: any [BHSDQ]P FP.  */					\
292  MULTI_REG_TYPE(BHSDQ, REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
293		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
294  /* Typecheck: any int or [BHSDQ]P FP or V reg (exc SP inc [WX]ZR).  */ \
295  MULTI_REG_TYPE(R_Z_BHSDQ_V, REG_TYPE(R_32) | REG_TYPE(R_64)		\
296		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
297		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
298		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q))	\
299  /* Typecheck: as above, but also Zn and Pn.  This should only be	\
300     used for SVE instructions, since Zn and Pn are valid symbols	\
301     in other contexts.  */						\
302  MULTI_REG_TYPE(R_Z_BHSDQ_VZP, REG_TYPE(R_32) | REG_TYPE(R_64)		\
303		 | REG_TYPE(Z_32) | REG_TYPE(Z_64) | REG_TYPE(VN)	\
304		 | REG_TYPE(FP_B) | REG_TYPE(FP_H)			\
305		 | REG_TYPE(FP_S) | REG_TYPE(FP_D) | REG_TYPE(FP_Q)	\
306		 | REG_TYPE(ZN) | REG_TYPE(PN))				\
307  /* Any integer register; used for error messages only.  */		\
308  MULTI_REG_TYPE(R_N, REG_TYPE(R_32) | REG_TYPE(R_64)			\
309		 | REG_TYPE(SP_32) | REG_TYPE(SP_64)			\
310		 | REG_TYPE(Z_32) | REG_TYPE(Z_64))			\
311  /* Pseudo type to mark the end of the enumerator sequence.  */	\
312  BASIC_REG_TYPE(MAX)
313
314#undef BASIC_REG_TYPE
315#define BASIC_REG_TYPE(T)	REG_TYPE_##T,
316#undef MULTI_REG_TYPE
317#define MULTI_REG_TYPE(T,V)	BASIC_REG_TYPE(T)
318
319/* Register type enumerators.  */
320typedef enum aarch64_reg_type_
321{
322  /* A list of REG_TYPE_*.  */
323  AARCH64_REG_TYPES
324} aarch64_reg_type;
325
326#undef BASIC_REG_TYPE
327#define BASIC_REG_TYPE(T)	1 << REG_TYPE_##T,
328#undef REG_TYPE
329#define REG_TYPE(T)		(1 << REG_TYPE_##T)
330#undef MULTI_REG_TYPE
331#define MULTI_REG_TYPE(T,V)	V,
332
333/* Structure for a hash table entry for a register.  */
334typedef struct
335{
336  const char *name;
337  unsigned char number;
338  ENUM_BITFIELD (aarch64_reg_type_) type : 8;
339  unsigned char builtin;
340} reg_entry;
341
342/* Values indexed by aarch64_reg_type to assist the type checking.  */
343static const unsigned reg_type_masks[] =
344{
345  AARCH64_REG_TYPES
346};
347
348#undef BASIC_REG_TYPE
349#undef REG_TYPE
350#undef MULTI_REG_TYPE
351#undef AARCH64_REG_TYPES
352
353/* Diagnostics used when we don't get a register of the expected type.
354   Note:  this has to synchronized with aarch64_reg_type definitions
355   above.  */
356static const char *
357get_reg_expected_msg (aarch64_reg_type reg_type)
358{
359  const char *msg;
360
361  switch (reg_type)
362    {
363    case REG_TYPE_R_32:
364      msg = N_("integer 32-bit register expected");
365      break;
366    case REG_TYPE_R_64:
367      msg = N_("integer 64-bit register expected");
368      break;
369    case REG_TYPE_R_N:
370      msg = N_("integer register expected");
371      break;
372    case REG_TYPE_R64_SP:
373      msg = N_("64-bit integer or SP register expected");
374      break;
375    case REG_TYPE_SVE_BASE:
376      msg = N_("base register expected");
377      break;
378    case REG_TYPE_R_Z:
379      msg = N_("integer or zero register expected");
380      break;
381    case REG_TYPE_SVE_OFFSET:
382      msg = N_("offset register expected");
383      break;
384    case REG_TYPE_R_SP:
385      msg = N_("integer or SP register expected");
386      break;
387    case REG_TYPE_R_Z_SP:
388      msg = N_("integer, zero or SP register expected");
389      break;
390    case REG_TYPE_FP_B:
391      msg = N_("8-bit SIMD scalar register expected");
392      break;
393    case REG_TYPE_FP_H:
394      msg = N_("16-bit SIMD scalar or floating-point half precision "
395	       "register expected");
396      break;
397    case REG_TYPE_FP_S:
398      msg = N_("32-bit SIMD scalar or floating-point single precision "
399	       "register expected");
400      break;
401    case REG_TYPE_FP_D:
402      msg = N_("64-bit SIMD scalar or floating-point double precision "
403	       "register expected");
404      break;
405    case REG_TYPE_FP_Q:
406      msg = N_("128-bit SIMD scalar or floating-point quad precision "
407	       "register expected");
408      break;
409    case REG_TYPE_R_Z_BHSDQ_V:
410    case REG_TYPE_R_Z_BHSDQ_VZP:
411      msg = N_("register expected");
412      break;
413    case REG_TYPE_BHSDQ:	/* any [BHSDQ]P FP  */
414      msg = N_("SIMD scalar or floating-point register expected");
415      break;
416    case REG_TYPE_VN:		/* any V reg  */
417      msg = N_("vector register expected");
418      break;
419    case REG_TYPE_ZN:
420      msg = N_("SVE vector register expected");
421      break;
422    case REG_TYPE_PN:
423      msg = N_("SVE predicate register expected");
424      break;
425    default:
426      as_fatal (_("invalid register type %d"), reg_type);
427    }
428  return msg;
429}
430
431/* Some well known registers that we refer to directly elsewhere.  */
432#define REG_SP	31
433
434/* Instructions take 4 bytes in the object file.  */
435#define INSN_SIZE	4
436
437static struct hash_control *aarch64_ops_hsh;
438static struct hash_control *aarch64_cond_hsh;
439static struct hash_control *aarch64_shift_hsh;
440static struct hash_control *aarch64_sys_regs_hsh;
441static struct hash_control *aarch64_pstatefield_hsh;
442static struct hash_control *aarch64_sys_regs_ic_hsh;
443static struct hash_control *aarch64_sys_regs_dc_hsh;
444static struct hash_control *aarch64_sys_regs_at_hsh;
445static struct hash_control *aarch64_sys_regs_tlbi_hsh;
446static struct hash_control *aarch64_reg_hsh;
447static struct hash_control *aarch64_barrier_opt_hsh;
448static struct hash_control *aarch64_nzcv_hsh;
449static struct hash_control *aarch64_pldop_hsh;
450static struct hash_control *aarch64_hint_opt_hsh;
451
452/* Stuff needed to resolve the label ambiguity
453   As:
454     ...
455     label:   <insn>
456   may differ from:
457     ...
458     label:
459	      <insn>  */
460
461static symbolS *last_label_seen;
462
463/* Literal pool structure.  Held on a per-section
464   and per-sub-section basis.  */
465
466#define MAX_LITERAL_POOL_SIZE 1024
467typedef struct literal_expression
468{
469  expressionS exp;
470  /* If exp.op == O_big then this bignum holds a copy of the global bignum value.  */
471  LITTLENUM_TYPE * bignum;
472} literal_expression;
473
474typedef struct literal_pool
475{
476  literal_expression literals[MAX_LITERAL_POOL_SIZE];
477  unsigned int next_free_entry;
478  unsigned int id;
479  symbolS *symbol;
480  segT section;
481  subsegT sub_section;
482  int size;
483  struct literal_pool *next;
484} literal_pool;
485
486/* Pointer to a linked list of literal pools.  */
487static literal_pool *list_of_pools = NULL;
488
489/* Pure syntax.	 */
490
491/* This array holds the chars that always start a comment.  If the
492   pre-processor is disabled, these aren't very useful.	 */
493const char comment_chars[] = "";
494
495/* This array holds the chars that only start a comment at the beginning of
496   a line.  If the line seems to have the form '# 123 filename'
497   .line and .file directives will appear in the pre-processed output.	*/
498/* Note that input_file.c hand checks for '#' at the beginning of the
499   first line of the input file.  This is because the compiler outputs
500   #NO_APP at the beginning of its output.  */
501/* Also note that comments like this one will always work.  */
502const char line_comment_chars[] = "#";
503
504const char line_separator_chars[] = ";";
505
506/* Chars that can be used to separate mant
507   from exp in floating point numbers.	*/
508const char EXP_CHARS[] = "eE";
509
510/* Chars that mean this number is a floating point constant.  */
511/* As in 0f12.456  */
512/* or	 0d1.2345e12  */
513
514const char FLT_CHARS[] = "rRsSfFdDxXeEpP";
515
516/* Prefix character that indicates the start of an immediate value.  */
517#define is_immediate_prefix(C) ((C) == '#')
518
519/* Separator character handling.  */
520
521#define skip_whitespace(str)  do { if (*(str) == ' ') ++(str); } while (0)
522
523static inline bfd_boolean
524skip_past_char (char **str, char c)
525{
526  if (**str == c)
527    {
528      (*str)++;
529      return TRUE;
530    }
531  else
532    return FALSE;
533}
534
535#define skip_past_comma(str) skip_past_char (str, ',')
536
537/* Arithmetic expressions (possibly involving symbols).	 */
538
539static bfd_boolean in_my_get_expression_p = FALSE;
540
541/* Third argument to my_get_expression.	 */
542#define GE_NO_PREFIX 0
543#define GE_OPT_PREFIX 1
544
545/* Return TRUE if the string pointed by *STR is successfully parsed
546   as an valid expression; *EP will be filled with the information of
547   such an expression.  Otherwise return FALSE.  */
548
549static bfd_boolean
550my_get_expression (expressionS * ep, char **str, int prefix_mode,
551		   int reject_absent)
552{
553  char *save_in;
554  segT seg;
555  int prefix_present_p = 0;
556
557  switch (prefix_mode)
558    {
559    case GE_NO_PREFIX:
560      break;
561    case GE_OPT_PREFIX:
562      if (is_immediate_prefix (**str))
563	{
564	  (*str)++;
565	  prefix_present_p = 1;
566	}
567      break;
568    default:
569      abort ();
570    }
571
572  memset (ep, 0, sizeof (expressionS));
573
574  save_in = input_line_pointer;
575  input_line_pointer = *str;
576  in_my_get_expression_p = TRUE;
577  seg = expression (ep);
578  in_my_get_expression_p = FALSE;
579
580  if (ep->X_op == O_illegal || (reject_absent && ep->X_op == O_absent))
581    {
582      /* We found a bad expression in md_operand().  */
583      *str = input_line_pointer;
584      input_line_pointer = save_in;
585      if (prefix_present_p && ! error_p ())
586	set_fatal_syntax_error (_("bad expression"));
587      else
588	set_first_syntax_error (_("bad expression"));
589      return FALSE;
590    }
591
592#ifdef OBJ_AOUT
593  if (seg != absolute_section
594      && seg != text_section
595      && seg != data_section
596      && seg != bss_section && seg != undefined_section)
597    {
598      set_syntax_error (_("bad segment"));
599      *str = input_line_pointer;
600      input_line_pointer = save_in;
601      return FALSE;
602    }
603#else
604  (void) seg;
605#endif
606
607  *str = input_line_pointer;
608  input_line_pointer = save_in;
609  return TRUE;
610}
611
612/* Turn a string in input_line_pointer into a floating point constant
613   of type TYPE, and store the appropriate bytes in *LITP.  The number
614   of LITTLENUMS emitted is stored in *SIZEP.  An error message is
615   returned, or NULL on OK.  */
616
617const char *
618md_atof (int type, char *litP, int *sizeP)
619{
620  return ieee_md_atof (type, litP, sizeP, target_big_endian);
621}
622
623/* We handle all bad expressions here, so that we can report the faulty
624   instruction in the error message.  */
625void
626md_operand (expressionS * exp)
627{
628  if (in_my_get_expression_p)
629    exp->X_op = O_illegal;
630}
631
632/* Immediate values.  */
633
634/* Errors may be set multiple times during parsing or bit encoding
635   (particularly in the Neon bits), but usually the earliest error which is set
636   will be the most meaningful. Avoid overwriting it with later (cascading)
637   errors by calling this function.  */
638
639static void
640first_error (const char *error)
641{
642  if (! error_p ())
643    set_syntax_error (error);
644}
645
646/* Similar to first_error, but this function accepts formatted error
647   message.  */
648static void
649first_error_fmt (const char *format, ...)
650{
651  va_list args;
652  enum
653  { size = 100 };
654  /* N.B. this single buffer will not cause error messages for different
655     instructions to pollute each other; this is because at the end of
656     processing of each assembly line, error message if any will be
657     collected by as_bad.  */
658  static char buffer[size];
659
660  if (! error_p ())
661    {
662      int ret ATTRIBUTE_UNUSED;
663      va_start (args, format);
664      ret = vsnprintf (buffer, size, format, args);
665      know (ret <= size - 1 && ret >= 0);
666      va_end (args);
667      set_syntax_error (buffer);
668    }
669}
670
671/* Register parsing.  */
672
673/* Generic register parser which is called by other specialized
674   register parsers.
675   CCP points to what should be the beginning of a register name.
676   If it is indeed a valid register name, advance CCP over it and
677   return the reg_entry structure; otherwise return NULL.
678   It does not issue diagnostics.  */
679
680static reg_entry *
681parse_reg (char **ccp)
682{
683  char *start = *ccp;
684  char *p;
685  reg_entry *reg;
686
687#ifdef REGISTER_PREFIX
688  if (*start != REGISTER_PREFIX)
689    return NULL;
690  start++;
691#endif
692
693  p = start;
694  if (!ISALPHA (*p) || !is_name_beginner (*p))
695    return NULL;
696
697  do
698    p++;
699  while (ISALPHA (*p) || ISDIGIT (*p) || *p == '_');
700
701  reg = (reg_entry *) hash_find_n (aarch64_reg_hsh, start, p - start);
702
703  if (!reg)
704    return NULL;
705
706  *ccp = p;
707  return reg;
708}
709
710/* Return TRUE if REG->TYPE is a valid type of TYPE; otherwise
711   return FALSE.  */
712static bfd_boolean
713aarch64_check_reg_type (const reg_entry *reg, aarch64_reg_type type)
714{
715  return (reg_type_masks[type] & (1 << reg->type)) != 0;
716}
717
718/* Try to parse a base or offset register.  Allow SVE base and offset
719   registers if REG_TYPE includes SVE registers.  Return the register
720   entry on success, setting *QUALIFIER to the register qualifier.
721   Return null otherwise.
722
723   Note that this function does not issue any diagnostics.  */
724
725static const reg_entry *
726aarch64_addr_reg_parse (char **ccp, aarch64_reg_type reg_type,
727			aarch64_opnd_qualifier_t *qualifier)
728{
729  char *str = *ccp;
730  const reg_entry *reg = parse_reg (&str);
731
732  if (reg == NULL)
733    return NULL;
734
735  switch (reg->type)
736    {
737    case REG_TYPE_R_32:
738    case REG_TYPE_SP_32:
739    case REG_TYPE_Z_32:
740      *qualifier = AARCH64_OPND_QLF_W;
741      break;
742
743    case REG_TYPE_R_64:
744    case REG_TYPE_SP_64:
745    case REG_TYPE_Z_64:
746      *qualifier = AARCH64_OPND_QLF_X;
747      break;
748
749    case REG_TYPE_ZN:
750      if ((reg_type_masks[reg_type] & (1 << REG_TYPE_ZN)) == 0
751	  || str[0] != '.')
752	return NULL;
753      switch (TOLOWER (str[1]))
754	{
755	case 's':
756	  *qualifier = AARCH64_OPND_QLF_S_S;
757	  break;
758	case 'd':
759	  *qualifier = AARCH64_OPND_QLF_S_D;
760	  break;
761	default:
762	  return NULL;
763	}
764      str += 2;
765      break;
766
767    default:
768      return NULL;
769    }
770
771  *ccp = str;
772
773  return reg;
774}
775
776/* Try to parse a base or offset register.  Return the register entry
777   on success, setting *QUALIFIER to the register qualifier.  Return null
778   otherwise.
779
780   Note that this function does not issue any diagnostics.  */
781
782static const reg_entry *
783aarch64_reg_parse_32_64 (char **ccp, aarch64_opnd_qualifier_t *qualifier)
784{
785  return aarch64_addr_reg_parse (ccp, REG_TYPE_R_Z_SP, qualifier);
786}
787
788/* Parse the qualifier of a vector register or vector element of type
789   REG_TYPE.  Fill in *PARSED_TYPE and return TRUE if the parsing
790   succeeds; otherwise return FALSE.
791
792   Accept only one occurrence of:
793   8b 16b 2h 4h 8h 2s 4s 1d 2d
794   b h s d q  */
795static bfd_boolean
796parse_vector_type_for_operand (aarch64_reg_type reg_type,
797			       struct vector_type_el *parsed_type, char **str)
798{
799  char *ptr = *str;
800  unsigned width;
801  unsigned element_size;
802  enum vector_el_type type;
803
804  /* skip '.' */
805  gas_assert (*ptr == '.');
806  ptr++;
807
808  if (reg_type == REG_TYPE_ZN || reg_type == REG_TYPE_PN || !ISDIGIT (*ptr))
809    {
810      width = 0;
811      goto elt_size;
812    }
813  width = strtoul (ptr, &ptr, 10);
814  if (width != 1 && width != 2 && width != 4 && width != 8 && width != 16)
815    {
816      first_error_fmt (_("bad size %d in vector width specifier"), width);
817      return FALSE;
818    }
819
820elt_size:
821  switch (TOLOWER (*ptr))
822    {
823    case 'b':
824      type = NT_b;
825      element_size = 8;
826      break;
827    case 'h':
828      type = NT_h;
829      element_size = 16;
830      break;
831    case 's':
832      type = NT_s;
833      element_size = 32;
834      break;
835    case 'd':
836      type = NT_d;
837      element_size = 64;
838      break;
839    case 'q':
840      if (reg_type == REG_TYPE_ZN || width == 1)
841	{
842	  type = NT_q;
843	  element_size = 128;
844	  break;
845	}
846      /* fall through.  */
847    default:
848      if (*ptr != '\0')
849	first_error_fmt (_("unexpected character `%c' in element size"), *ptr);
850      else
851	first_error (_("missing element size"));
852      return FALSE;
853    }
854  if (width != 0 && width * element_size != 64 && width * element_size != 128
855      && !(width == 2 && element_size == 16))
856    {
857      first_error_fmt (_
858		       ("invalid element size %d and vector size combination %c"),
859		       width, *ptr);
860      return FALSE;
861    }
862  ptr++;
863
864  parsed_type->type = type;
865  parsed_type->width = width;
866
867  *str = ptr;
868
869  return TRUE;
870}
871
872/* *STR contains an SVE zero/merge predication suffix.  Parse it into
873   *PARSED_TYPE and point *STR at the end of the suffix.  */
874
875static bfd_boolean
876parse_predication_for_operand (struct vector_type_el *parsed_type, char **str)
877{
878  char *ptr = *str;
879
880  /* Skip '/'.  */
881  gas_assert (*ptr == '/');
882  ptr++;
883  switch (TOLOWER (*ptr))
884    {
885    case 'z':
886      parsed_type->type = NT_zero;
887      break;
888    case 'm':
889      parsed_type->type = NT_merge;
890      break;
891    default:
892      if (*ptr != '\0' && *ptr != ',')
893	first_error_fmt (_("unexpected character `%c' in predication type"),
894			 *ptr);
895      else
896	first_error (_("missing predication type"));
897      return FALSE;
898    }
899  parsed_type->width = 0;
900  *str = ptr + 1;
901  return TRUE;
902}
903
904/* Parse a register of the type TYPE.
905
906   Return PARSE_FAIL if the string pointed by *CCP is not a valid register
907   name or the parsed register is not of TYPE.
908
909   Otherwise return the register number, and optionally fill in the actual
910   type of the register in *RTYPE when multiple alternatives were given, and
911   return the register shape and element index information in *TYPEINFO.
912
913   IN_REG_LIST should be set with TRUE if the caller is parsing a register
914   list.  */
915
916static int
917parse_typed_reg (char **ccp, aarch64_reg_type type, aarch64_reg_type *rtype,
918		 struct vector_type_el *typeinfo, bfd_boolean in_reg_list)
919{
920  char *str = *ccp;
921  const reg_entry *reg = parse_reg (&str);
922  struct vector_type_el atype;
923  struct vector_type_el parsetype;
924  bfd_boolean is_typed_vecreg = FALSE;
925
926  atype.defined = 0;
927  atype.type = NT_invtype;
928  atype.width = -1;
929  atype.index = 0;
930
931  if (reg == NULL)
932    {
933      if (typeinfo)
934	*typeinfo = atype;
935      set_default_error ();
936      return PARSE_FAIL;
937    }
938
939  if (! aarch64_check_reg_type (reg, type))
940    {
941      DEBUG_TRACE ("reg type check failed");
942      set_default_error ();
943      return PARSE_FAIL;
944    }
945  type = reg->type;
946
947  if ((type == REG_TYPE_VN || type == REG_TYPE_ZN || type == REG_TYPE_PN)
948      && (*str == '.' || (type == REG_TYPE_PN && *str == '/')))
949    {
950      if (*str == '.')
951	{
952	  if (!parse_vector_type_for_operand (type, &parsetype, &str))
953	    return PARSE_FAIL;
954	}
955      else
956	{
957	  if (!parse_predication_for_operand (&parsetype, &str))
958	    return PARSE_FAIL;
959	}
960
961      /* Register if of the form Vn.[bhsdq].  */
962      is_typed_vecreg = TRUE;
963
964      if (type == REG_TYPE_ZN || type == REG_TYPE_PN)
965	{
966	  /* The width is always variable; we don't allow an integer width
967	     to be specified.  */
968	  gas_assert (parsetype.width == 0);
969	  atype.defined |= NTA_HASVARWIDTH | NTA_HASTYPE;
970	}
971      else if (parsetype.width == 0)
972	/* Expect index. In the new scheme we cannot have
973	   Vn.[bhsdq] represent a scalar. Therefore any
974	   Vn.[bhsdq] should have an index following it.
975	   Except in reglists ofcourse.  */
976	atype.defined |= NTA_HASINDEX;
977      else
978	atype.defined |= NTA_HASTYPE;
979
980      atype.type = parsetype.type;
981      atype.width = parsetype.width;
982    }
983
984  if (skip_past_char (&str, '['))
985    {
986      expressionS exp;
987
988      /* Reject Sn[index] syntax.  */
989      if (!is_typed_vecreg)
990	{
991	  first_error (_("this type of register can't be indexed"));
992	  return PARSE_FAIL;
993	}
994
995      if (in_reg_list == TRUE)
996	{
997	  first_error (_("index not allowed inside register list"));
998	  return PARSE_FAIL;
999	}
1000
1001      atype.defined |= NTA_HASINDEX;
1002
1003      my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1004
1005      if (exp.X_op != O_constant)
1006	{
1007	  first_error (_("constant expression required"));
1008	  return PARSE_FAIL;
1009	}
1010
1011      if (! skip_past_char (&str, ']'))
1012	return PARSE_FAIL;
1013
1014      atype.index = exp.X_add_number;
1015    }
1016  else if (!in_reg_list && (atype.defined & NTA_HASINDEX) != 0)
1017    {
1018      /* Indexed vector register expected.  */
1019      first_error (_("indexed vector register expected"));
1020      return PARSE_FAIL;
1021    }
1022
1023  /* A vector reg Vn should be typed or indexed.  */
1024  if (type == REG_TYPE_VN && atype.defined == 0)
1025    {
1026      first_error (_("invalid use of vector register"));
1027    }
1028
1029  if (typeinfo)
1030    *typeinfo = atype;
1031
1032  if (rtype)
1033    *rtype = type;
1034
1035  *ccp = str;
1036
1037  return reg->number;
1038}
1039
1040/* Parse register.
1041
1042   Return the register number on success; return PARSE_FAIL otherwise.
1043
1044   If RTYPE is not NULL, return in *RTYPE the (possibly restricted) type of
1045   the register (e.g. NEON double or quad reg when either has been requested).
1046
1047   If this is a NEON vector register with additional type information, fill
1048   in the struct pointed to by VECTYPE (if non-NULL).
1049
1050   This parser does not handle register list.  */
1051
1052static int
1053aarch64_reg_parse (char **ccp, aarch64_reg_type type,
1054		   aarch64_reg_type *rtype, struct vector_type_el *vectype)
1055{
1056  struct vector_type_el atype;
1057  char *str = *ccp;
1058  int reg = parse_typed_reg (&str, type, rtype, &atype,
1059			     /*in_reg_list= */ FALSE);
1060
1061  if (reg == PARSE_FAIL)
1062    return PARSE_FAIL;
1063
1064  if (vectype)
1065    *vectype = atype;
1066
1067  *ccp = str;
1068
1069  return reg;
1070}
1071
1072static inline bfd_boolean
1073eq_vector_type_el (struct vector_type_el e1, struct vector_type_el e2)
1074{
1075  return
1076    e1.type == e2.type
1077    && e1.defined == e2.defined
1078    && e1.width == e2.width && e1.index == e2.index;
1079}
1080
1081/* This function parses a list of vector registers of type TYPE.
1082   On success, it returns the parsed register list information in the
1083   following encoded format:
1084
1085   bit   18-22   |   13-17   |   7-11    |    2-6    |   0-1
1086       4th regno | 3rd regno | 2nd regno | 1st regno | num_of_reg
1087
1088   The information of the register shape and/or index is returned in
1089   *VECTYPE.
1090
1091   It returns PARSE_FAIL if the register list is invalid.
1092
1093   The list contains one to four registers.
1094   Each register can be one of:
1095   <Vt>.<T>[<index>]
1096   <Vt>.<T>
1097   All <T> should be identical.
1098   All <index> should be identical.
1099   There are restrictions on <Vt> numbers which are checked later
1100   (by reg_list_valid_p).  */
1101
1102static int
1103parse_vector_reg_list (char **ccp, aarch64_reg_type type,
1104		       struct vector_type_el *vectype)
1105{
1106  char *str = *ccp;
1107  int nb_regs;
1108  struct vector_type_el typeinfo, typeinfo_first;
1109  int val, val_range;
1110  int in_range;
1111  int ret_val;
1112  int i;
1113  bfd_boolean error = FALSE;
1114  bfd_boolean expect_index = FALSE;
1115
1116  if (*str != '{')
1117    {
1118      set_syntax_error (_("expecting {"));
1119      return PARSE_FAIL;
1120    }
1121  str++;
1122
1123  nb_regs = 0;
1124  typeinfo_first.defined = 0;
1125  typeinfo_first.type = NT_invtype;
1126  typeinfo_first.width = -1;
1127  typeinfo_first.index = 0;
1128  ret_val = 0;
1129  val = -1;
1130  val_range = -1;
1131  in_range = 0;
1132  do
1133    {
1134      if (in_range)
1135	{
1136	  str++;		/* skip over '-' */
1137	  val_range = val;
1138	}
1139      val = parse_typed_reg (&str, type, NULL, &typeinfo,
1140			     /*in_reg_list= */ TRUE);
1141      if (val == PARSE_FAIL)
1142	{
1143	  set_first_syntax_error (_("invalid vector register in list"));
1144	  error = TRUE;
1145	  continue;
1146	}
1147      /* reject [bhsd]n */
1148      if (type == REG_TYPE_VN && typeinfo.defined == 0)
1149	{
1150	  set_first_syntax_error (_("invalid scalar register in list"));
1151	  error = TRUE;
1152	  continue;
1153	}
1154
1155      if (typeinfo.defined & NTA_HASINDEX)
1156	expect_index = TRUE;
1157
1158      if (in_range)
1159	{
1160	  if (val < val_range)
1161	    {
1162	      set_first_syntax_error
1163		(_("invalid range in vector register list"));
1164	      error = TRUE;
1165	    }
1166	  val_range++;
1167	}
1168      else
1169	{
1170	  val_range = val;
1171	  if (nb_regs == 0)
1172	    typeinfo_first = typeinfo;
1173	  else if (! eq_vector_type_el (typeinfo_first, typeinfo))
1174	    {
1175	      set_first_syntax_error
1176		(_("type mismatch in vector register list"));
1177	      error = TRUE;
1178	    }
1179	}
1180      if (! error)
1181	for (i = val_range; i <= val; i++)
1182	  {
1183	    ret_val |= i << (5 * nb_regs);
1184	    nb_regs++;
1185	  }
1186      in_range = 0;
1187    }
1188  while (skip_past_comma (&str) || (in_range = 1, *str == '-'));
1189
1190  skip_whitespace (str);
1191  if (*str != '}')
1192    {
1193      set_first_syntax_error (_("end of vector register list not found"));
1194      error = TRUE;
1195    }
1196  str++;
1197
1198  skip_whitespace (str);
1199
1200  if (expect_index)
1201    {
1202      if (skip_past_char (&str, '['))
1203	{
1204	  expressionS exp;
1205
1206	  my_get_expression (&exp, &str, GE_NO_PREFIX, 1);
1207	  if (exp.X_op != O_constant)
1208	    {
1209	      set_first_syntax_error (_("constant expression required."));
1210	      error = TRUE;
1211	    }
1212	  if (! skip_past_char (&str, ']'))
1213	    error = TRUE;
1214	  else
1215	    typeinfo_first.index = exp.X_add_number;
1216	}
1217      else
1218	{
1219	  set_first_syntax_error (_("expected index"));
1220	  error = TRUE;
1221	}
1222    }
1223
1224  if (nb_regs > 4)
1225    {
1226      set_first_syntax_error (_("too many registers in vector register list"));
1227      error = TRUE;
1228    }
1229  else if (nb_regs == 0)
1230    {
1231      set_first_syntax_error (_("empty vector register list"));
1232      error = TRUE;
1233    }
1234
1235  *ccp = str;
1236  if (! error)
1237    *vectype = typeinfo_first;
1238
1239  return error ? PARSE_FAIL : (ret_val << 2) | (nb_regs - 1);
1240}
1241
1242/* Directives: register aliases.  */
1243
1244static reg_entry *
1245insert_reg_alias (char *str, int number, aarch64_reg_type type)
1246{
1247  reg_entry *new;
1248  const char *name;
1249
1250  if ((new = hash_find (aarch64_reg_hsh, str)) != 0)
1251    {
1252      if (new->builtin)
1253	as_warn (_("ignoring attempt to redefine built-in register '%s'"),
1254		 str);
1255
1256      /* Only warn about a redefinition if it's not defined as the
1257         same register.  */
1258      else if (new->number != number || new->type != type)
1259	as_warn (_("ignoring redefinition of register alias '%s'"), str);
1260
1261      return NULL;
1262    }
1263
1264  name = xstrdup (str);
1265  new = XNEW (reg_entry);
1266
1267  new->name = name;
1268  new->number = number;
1269  new->type = type;
1270  new->builtin = FALSE;
1271
1272  if (hash_insert (aarch64_reg_hsh, name, (void *) new))
1273    abort ();
1274
1275  return new;
1276}
1277
1278/* Look for the .req directive.	 This is of the form:
1279
1280	new_register_name .req existing_register_name
1281
1282   If we find one, or if it looks sufficiently like one that we want to
1283   handle any error here, return TRUE.  Otherwise return FALSE.  */
1284
1285static bfd_boolean
1286create_register_alias (char *newname, char *p)
1287{
1288  const reg_entry *old;
1289  char *oldname, *nbuf;
1290  size_t nlen;
1291
1292  /* The input scrubber ensures that whitespace after the mnemonic is
1293     collapsed to single spaces.  */
1294  oldname = p;
1295  if (strncmp (oldname, " .req ", 6) != 0)
1296    return FALSE;
1297
1298  oldname += 6;
1299  if (*oldname == '\0')
1300    return FALSE;
1301
1302  old = hash_find (aarch64_reg_hsh, oldname);
1303  if (!old)
1304    {
1305      as_warn (_("unknown register '%s' -- .req ignored"), oldname);
1306      return TRUE;
1307    }
1308
1309  /* If TC_CASE_SENSITIVE is defined, then newname already points to
1310     the desired alias name, and p points to its end.  If not, then
1311     the desired alias name is in the global original_case_string.  */
1312#ifdef TC_CASE_SENSITIVE
1313  nlen = p - newname;
1314#else
1315  newname = original_case_string;
1316  nlen = strlen (newname);
1317#endif
1318
1319  nbuf = xmemdup0 (newname, nlen);
1320
1321  /* Create aliases under the new name as stated; an all-lowercase
1322     version of the new name; and an all-uppercase version of the new
1323     name.  */
1324  if (insert_reg_alias (nbuf, old->number, old->type) != NULL)
1325    {
1326      for (p = nbuf; *p; p++)
1327	*p = TOUPPER (*p);
1328
1329      if (strncmp (nbuf, newname, nlen))
1330	{
1331	  /* If this attempt to create an additional alias fails, do not bother
1332	     trying to create the all-lower case alias.  We will fail and issue
1333	     a second, duplicate error message.  This situation arises when the
1334	     programmer does something like:
1335	     foo .req r0
1336	     Foo .req r1
1337	     The second .req creates the "Foo" alias but then fails to create
1338	     the artificial FOO alias because it has already been created by the
1339	     first .req.  */
1340	  if (insert_reg_alias (nbuf, old->number, old->type) == NULL)
1341	    {
1342	      free (nbuf);
1343	      return TRUE;
1344	    }
1345	}
1346
1347      for (p = nbuf; *p; p++)
1348	*p = TOLOWER (*p);
1349
1350      if (strncmp (nbuf, newname, nlen))
1351	insert_reg_alias (nbuf, old->number, old->type);
1352    }
1353
1354  free (nbuf);
1355  return TRUE;
1356}
1357
1358/* Should never be called, as .req goes between the alias and the
1359   register name, not at the beginning of the line.  */
1360static void
1361s_req (int a ATTRIBUTE_UNUSED)
1362{
1363  as_bad (_("invalid syntax for .req directive"));
1364}
1365
1366/* The .unreq directive deletes an alias which was previously defined
1367   by .req.  For example:
1368
1369       my_alias .req r11
1370       .unreq my_alias	  */
1371
1372static void
1373s_unreq (int a ATTRIBUTE_UNUSED)
1374{
1375  char *name;
1376  char saved_char;
1377
1378  name = input_line_pointer;
1379
1380  while (*input_line_pointer != 0
1381	 && *input_line_pointer != ' ' && *input_line_pointer != '\n')
1382    ++input_line_pointer;
1383
1384  saved_char = *input_line_pointer;
1385  *input_line_pointer = 0;
1386
1387  if (!*name)
1388    as_bad (_("invalid syntax for .unreq directive"));
1389  else
1390    {
1391      reg_entry *reg = hash_find (aarch64_reg_hsh, name);
1392
1393      if (!reg)
1394	as_bad (_("unknown register alias '%s'"), name);
1395      else if (reg->builtin)
1396	as_warn (_("ignoring attempt to undefine built-in register '%s'"),
1397		 name);
1398      else
1399	{
1400	  char *p;
1401	  char *nbuf;
1402
1403	  hash_delete (aarch64_reg_hsh, name, FALSE);
1404	  free ((char *) reg->name);
1405	  free (reg);
1406
1407	  /* Also locate the all upper case and all lower case versions.
1408	     Do not complain if we cannot find one or the other as it
1409	     was probably deleted above.  */
1410
1411	  nbuf = strdup (name);
1412	  for (p = nbuf; *p; p++)
1413	    *p = TOUPPER (*p);
1414	  reg = hash_find (aarch64_reg_hsh, nbuf);
1415	  if (reg)
1416	    {
1417	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1418	      free ((char *) reg->name);
1419	      free (reg);
1420	    }
1421
1422	  for (p = nbuf; *p; p++)
1423	    *p = TOLOWER (*p);
1424	  reg = hash_find (aarch64_reg_hsh, nbuf);
1425	  if (reg)
1426	    {
1427	      hash_delete (aarch64_reg_hsh, nbuf, FALSE);
1428	      free ((char *) reg->name);
1429	      free (reg);
1430	    }
1431
1432	  free (nbuf);
1433	}
1434    }
1435
1436  *input_line_pointer = saved_char;
1437  demand_empty_rest_of_line ();
1438}
1439
1440/* Directives: Instruction set selection.  */
1441
1442#ifdef OBJ_ELF
1443/* This code is to handle mapping symbols as defined in the ARM AArch64 ELF
1444   spec.  (See "Mapping symbols", section 4.5.4, ARM AAELF64 version 0.05).
1445   Note that previously, $a and $t has type STT_FUNC (BSF_OBJECT flag),
1446   and $d has type STT_OBJECT (BSF_OBJECT flag). Now all three are untyped.  */
1447
1448/* Create a new mapping symbol for the transition to STATE.  */
1449
1450static void
1451make_mapping_symbol (enum mstate state, valueT value, fragS * frag)
1452{
1453  symbolS *symbolP;
1454  const char *symname;
1455  int type;
1456
1457  switch (state)
1458    {
1459    case MAP_DATA:
1460      symname = "$d";
1461      type = BSF_NO_FLAGS;
1462      break;
1463    case MAP_INSN:
1464      symname = "$x";
1465      type = BSF_NO_FLAGS;
1466      break;
1467    default:
1468      abort ();
1469    }
1470
1471  symbolP = symbol_new (symname, now_seg, value, frag);
1472  symbol_get_bfdsym (symbolP)->flags |= type | BSF_LOCAL;
1473
1474  /* Save the mapping symbols for future reference.  Also check that
1475     we do not place two mapping symbols at the same offset within a
1476     frag.  We'll handle overlap between frags in
1477     check_mapping_symbols.
1478
1479     If .fill or other data filling directive generates zero sized data,
1480     the mapping symbol for the following code will have the same value
1481     as the one generated for the data filling directive.  In this case,
1482     we replace the old symbol with the new one at the same address.  */
1483  if (value == 0)
1484    {
1485      if (frag->tc_frag_data.first_map != NULL)
1486	{
1487	  know (S_GET_VALUE (frag->tc_frag_data.first_map) == 0);
1488	  symbol_remove (frag->tc_frag_data.first_map, &symbol_rootP,
1489			 &symbol_lastP);
1490	}
1491      frag->tc_frag_data.first_map = symbolP;
1492    }
1493  if (frag->tc_frag_data.last_map != NULL)
1494    {
1495      know (S_GET_VALUE (frag->tc_frag_data.last_map) <=
1496	    S_GET_VALUE (symbolP));
1497      if (S_GET_VALUE (frag->tc_frag_data.last_map) == S_GET_VALUE (symbolP))
1498	symbol_remove (frag->tc_frag_data.last_map, &symbol_rootP,
1499		       &symbol_lastP);
1500    }
1501  frag->tc_frag_data.last_map = symbolP;
1502}
1503
1504/* We must sometimes convert a region marked as code to data during
1505   code alignment, if an odd number of bytes have to be padded.  The
1506   code mapping symbol is pushed to an aligned address.  */
1507
1508static void
1509insert_data_mapping_symbol (enum mstate state,
1510			    valueT value, fragS * frag, offsetT bytes)
1511{
1512  /* If there was already a mapping symbol, remove it.  */
1513  if (frag->tc_frag_data.last_map != NULL
1514      && S_GET_VALUE (frag->tc_frag_data.last_map) ==
1515      frag->fr_address + value)
1516    {
1517      symbolS *symp = frag->tc_frag_data.last_map;
1518
1519      if (value == 0)
1520	{
1521	  know (frag->tc_frag_data.first_map == symp);
1522	  frag->tc_frag_data.first_map = NULL;
1523	}
1524      frag->tc_frag_data.last_map = NULL;
1525      symbol_remove (symp, &symbol_rootP, &symbol_lastP);
1526    }
1527
1528  make_mapping_symbol (MAP_DATA, value, frag);
1529  make_mapping_symbol (state, value + bytes, frag);
1530}
1531
1532static void mapping_state_2 (enum mstate state, int max_chars);
1533
1534/* Set the mapping state to STATE.  Only call this when about to
1535   emit some STATE bytes to the file.  */
1536
1537void
1538mapping_state (enum mstate state)
1539{
1540  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1541
1542  if (state == MAP_INSN)
1543    /* AArch64 instructions require 4-byte alignment.  When emitting
1544       instructions into any section, record the appropriate section
1545       alignment.  */
1546    record_alignment (now_seg, 2);
1547
1548  if (mapstate == state)
1549    /* The mapping symbol has already been emitted.
1550       There is nothing else to do.  */
1551    return;
1552
1553#define TRANSITION(from, to) (mapstate == (from) && state == (to))
1554  if (TRANSITION (MAP_UNDEFINED, MAP_DATA) && !subseg_text_p (now_seg))
1555    /* Emit MAP_DATA within executable section in order.  Otherwise, it will be
1556       evaluated later in the next else.  */
1557    return;
1558  else if (TRANSITION (MAP_UNDEFINED, MAP_INSN))
1559    {
1560      /* Only add the symbol if the offset is > 0:
1561	 if we're at the first frag, check it's size > 0;
1562	 if we're not at the first frag, then for sure
1563	 the offset is > 0.  */
1564      struct frag *const frag_first = seg_info (now_seg)->frchainP->frch_root;
1565      const int add_symbol = (frag_now != frag_first)
1566	|| (frag_now_fix () > 0);
1567
1568      if (add_symbol)
1569	make_mapping_symbol (MAP_DATA, (valueT) 0, frag_first);
1570    }
1571#undef TRANSITION
1572
1573  mapping_state_2 (state, 0);
1574}
1575
1576/* Same as mapping_state, but MAX_CHARS bytes have already been
1577   allocated.  Put the mapping symbol that far back.  */
1578
1579static void
1580mapping_state_2 (enum mstate state, int max_chars)
1581{
1582  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1583
1584  if (!SEG_NORMAL (now_seg))
1585    return;
1586
1587  if (mapstate == state)
1588    /* The mapping symbol has already been emitted.
1589       There is nothing else to do.  */
1590    return;
1591
1592  seg_info (now_seg)->tc_segment_info_data.mapstate = state;
1593  make_mapping_symbol (state, (valueT) frag_now_fix () - max_chars, frag_now);
1594}
1595#else
1596#define mapping_state(x)	/* nothing */
1597#define mapping_state_2(x, y)	/* nothing */
1598#endif
1599
1600/* Directives: sectioning and alignment.  */
1601
1602static void
1603s_bss (int ignore ATTRIBUTE_UNUSED)
1604{
1605  /* We don't support putting frags in the BSS segment, we fake it by
1606     marking in_bss, then looking at s_skip for clues.  */
1607  subseg_set (bss_section, 0);
1608  demand_empty_rest_of_line ();
1609  mapping_state (MAP_DATA);
1610}
1611
1612static void
1613s_even (int ignore ATTRIBUTE_UNUSED)
1614{
1615  /* Never make frag if expect extra pass.  */
1616  if (!need_pass_2)
1617    frag_align (1, 0, 0);
1618
1619  record_alignment (now_seg, 1);
1620
1621  demand_empty_rest_of_line ();
1622}
1623
1624/* Directives: Literal pools.  */
1625
1626static literal_pool *
1627find_literal_pool (int size)
1628{
1629  literal_pool *pool;
1630
1631  for (pool = list_of_pools; pool != NULL; pool = pool->next)
1632    {
1633      if (pool->section == now_seg
1634	  && pool->sub_section == now_subseg && pool->size == size)
1635	break;
1636    }
1637
1638  return pool;
1639}
1640
1641static literal_pool *
1642find_or_make_literal_pool (int size)
1643{
1644  /* Next literal pool ID number.  */
1645  static unsigned int latest_pool_num = 1;
1646  literal_pool *pool;
1647
1648  pool = find_literal_pool (size);
1649
1650  if (pool == NULL)
1651    {
1652      /* Create a new pool.  */
1653      pool = XNEW (literal_pool);
1654      if (!pool)
1655	return NULL;
1656
1657      /* Currently we always put the literal pool in the current text
1658         section.  If we were generating "small" model code where we
1659         knew that all code and initialised data was within 1MB then
1660         we could output literals to mergeable, read-only data
1661         sections. */
1662
1663      pool->next_free_entry = 0;
1664      pool->section = now_seg;
1665      pool->sub_section = now_subseg;
1666      pool->size = size;
1667      pool->next = list_of_pools;
1668      pool->symbol = NULL;
1669
1670      /* Add it to the list.  */
1671      list_of_pools = pool;
1672    }
1673
1674  /* New pools, and emptied pools, will have a NULL symbol.  */
1675  if (pool->symbol == NULL)
1676    {
1677      pool->symbol = symbol_create (FAKE_LABEL_NAME, undefined_section,
1678				    (valueT) 0, &zero_address_frag);
1679      pool->id = latest_pool_num++;
1680    }
1681
1682  /* Done.  */
1683  return pool;
1684}
1685
1686/* Add the literal of size SIZE in *EXP to the relevant literal pool.
1687   Return TRUE on success, otherwise return FALSE.  */
1688static bfd_boolean
1689add_to_lit_pool (expressionS *exp, int size)
1690{
1691  literal_pool *pool;
1692  unsigned int entry;
1693
1694  pool = find_or_make_literal_pool (size);
1695
1696  /* Check if this literal value is already in the pool.  */
1697  for (entry = 0; entry < pool->next_free_entry; entry++)
1698    {
1699      expressionS * litexp = & pool->literals[entry].exp;
1700
1701      if ((litexp->X_op == exp->X_op)
1702	  && (exp->X_op == O_constant)
1703	  && (litexp->X_add_number == exp->X_add_number)
1704	  && (litexp->X_unsigned == exp->X_unsigned))
1705	break;
1706
1707      if ((litexp->X_op == exp->X_op)
1708	  && (exp->X_op == O_symbol)
1709	  && (litexp->X_add_number == exp->X_add_number)
1710	  && (litexp->X_add_symbol == exp->X_add_symbol)
1711	  && (litexp->X_op_symbol == exp->X_op_symbol))
1712	break;
1713    }
1714
1715  /* Do we need to create a new entry?  */
1716  if (entry == pool->next_free_entry)
1717    {
1718      if (entry >= MAX_LITERAL_POOL_SIZE)
1719	{
1720	  set_syntax_error (_("literal pool overflow"));
1721	  return FALSE;
1722	}
1723
1724      pool->literals[entry].exp = *exp;
1725      pool->next_free_entry += 1;
1726      if (exp->X_op == O_big)
1727	{
1728	  /* PR 16688: Bignums are held in a single global array.  We must
1729	     copy and preserve that value now, before it is overwritten.  */
1730	  pool->literals[entry].bignum = XNEWVEC (LITTLENUM_TYPE,
1731						  exp->X_add_number);
1732	  memcpy (pool->literals[entry].bignum, generic_bignum,
1733		  CHARS_PER_LITTLENUM * exp->X_add_number);
1734	}
1735      else
1736	pool->literals[entry].bignum = NULL;
1737    }
1738
1739  exp->X_op = O_symbol;
1740  exp->X_add_number = ((int) entry) * size;
1741  exp->X_add_symbol = pool->symbol;
1742
1743  return TRUE;
1744}
1745
1746/* Can't use symbol_new here, so have to create a symbol and then at
1747   a later date assign it a value. Thats what these functions do.  */
1748
1749static void
1750symbol_locate (symbolS * symbolP,
1751	       const char *name,/* It is copied, the caller can modify.  */
1752	       segT segment,	/* Segment identifier (SEG_<something>).  */
1753	       valueT valu,	/* Symbol value.  */
1754	       fragS * frag)	/* Associated fragment.  */
1755{
1756  size_t name_length;
1757  char *preserved_copy_of_name;
1758
1759  name_length = strlen (name) + 1;	/* +1 for \0.  */
1760  obstack_grow (&notes, name, name_length);
1761  preserved_copy_of_name = obstack_finish (&notes);
1762
1763#ifdef tc_canonicalize_symbol_name
1764  preserved_copy_of_name =
1765    tc_canonicalize_symbol_name (preserved_copy_of_name);
1766#endif
1767
1768  S_SET_NAME (symbolP, preserved_copy_of_name);
1769
1770  S_SET_SEGMENT (symbolP, segment);
1771  S_SET_VALUE (symbolP, valu);
1772  symbol_clear_list_pointers (symbolP);
1773
1774  symbol_set_frag (symbolP, frag);
1775
1776  /* Link to end of symbol chain.  */
1777  {
1778    extern int symbol_table_frozen;
1779
1780    if (symbol_table_frozen)
1781      abort ();
1782  }
1783
1784  symbol_append (symbolP, symbol_lastP, &symbol_rootP, &symbol_lastP);
1785
1786  obj_symbol_new_hook (symbolP);
1787
1788#ifdef tc_symbol_new_hook
1789  tc_symbol_new_hook (symbolP);
1790#endif
1791
1792#ifdef DEBUG_SYMS
1793  verify_symbol_chain (symbol_rootP, symbol_lastP);
1794#endif /* DEBUG_SYMS  */
1795}
1796
1797
1798static void
1799s_ltorg (int ignored ATTRIBUTE_UNUSED)
1800{
1801  unsigned int entry;
1802  literal_pool *pool;
1803  char sym_name[20];
1804  int align;
1805
1806  for (align = 2; align <= 4; align++)
1807    {
1808      int size = 1 << align;
1809
1810      pool = find_literal_pool (size);
1811      if (pool == NULL || pool->symbol == NULL || pool->next_free_entry == 0)
1812	continue;
1813
1814      /* Align pool as you have word accesses.
1815         Only make a frag if we have to.  */
1816      if (!need_pass_2)
1817	frag_align (align, 0, 0);
1818
1819      mapping_state (MAP_DATA);
1820
1821      record_alignment (now_seg, align);
1822
1823      sprintf (sym_name, "$$lit_\002%x", pool->id);
1824
1825      symbol_locate (pool->symbol, sym_name, now_seg,
1826		     (valueT) frag_now_fix (), frag_now);
1827      symbol_table_insert (pool->symbol);
1828
1829      for (entry = 0; entry < pool->next_free_entry; entry++)
1830	{
1831	  expressionS * exp = & pool->literals[entry].exp;
1832
1833	  if (exp->X_op == O_big)
1834	    {
1835	      /* PR 16688: Restore the global bignum value.  */
1836	      gas_assert (pool->literals[entry].bignum != NULL);
1837	      memcpy (generic_bignum, pool->literals[entry].bignum,
1838		      CHARS_PER_LITTLENUM * exp->X_add_number);
1839	    }
1840
1841	  /* First output the expression in the instruction to the pool.  */
1842	  emit_expr (exp, size);	/* .word|.xword  */
1843
1844	  if (exp->X_op == O_big)
1845	    {
1846	      free (pool->literals[entry].bignum);
1847	      pool->literals[entry].bignum = NULL;
1848	    }
1849	}
1850
1851      /* Mark the pool as empty.  */
1852      pool->next_free_entry = 0;
1853      pool->symbol = NULL;
1854    }
1855}
1856
1857#ifdef OBJ_ELF
1858/* Forward declarations for functions below, in the MD interface
1859   section.  */
1860static fixS *fix_new_aarch64 (fragS *, int, short, expressionS *, int, int);
1861static struct reloc_table_entry * find_reloc_table_entry (char **);
1862
1863/* Directives: Data.  */
1864/* N.B. the support for relocation suffix in this directive needs to be
1865   implemented properly.  */
1866
1867static void
1868s_aarch64_elf_cons (int nbytes)
1869{
1870  expressionS exp;
1871
1872#ifdef md_flush_pending_output
1873  md_flush_pending_output ();
1874#endif
1875
1876  if (is_it_end_of_statement ())
1877    {
1878      demand_empty_rest_of_line ();
1879      return;
1880    }
1881
1882#ifdef md_cons_align
1883  md_cons_align (nbytes);
1884#endif
1885
1886  mapping_state (MAP_DATA);
1887  do
1888    {
1889      struct reloc_table_entry *reloc;
1890
1891      expression (&exp);
1892
1893      if (exp.X_op != O_symbol)
1894	emit_expr (&exp, (unsigned int) nbytes);
1895      else
1896	{
1897	  skip_past_char (&input_line_pointer, '#');
1898	  if (skip_past_char (&input_line_pointer, ':'))
1899	    {
1900	      reloc = find_reloc_table_entry (&input_line_pointer);
1901	      if (reloc == NULL)
1902		as_bad (_("unrecognized relocation suffix"));
1903	      else
1904		as_bad (_("unimplemented relocation suffix"));
1905	      ignore_rest_of_line ();
1906	      return;
1907	    }
1908	  else
1909	    emit_expr (&exp, (unsigned int) nbytes);
1910	}
1911    }
1912  while (*input_line_pointer++ == ',');
1913
1914  /* Put terminator back into stream.  */
1915  input_line_pointer--;
1916  demand_empty_rest_of_line ();
1917}
1918
1919#endif /* OBJ_ELF */
1920
1921/* Output a 32-bit word, but mark as an instruction.  */
1922
1923static void
1924s_aarch64_inst (int ignored ATTRIBUTE_UNUSED)
1925{
1926  expressionS exp;
1927
1928#ifdef md_flush_pending_output
1929  md_flush_pending_output ();
1930#endif
1931
1932  if (is_it_end_of_statement ())
1933    {
1934      demand_empty_rest_of_line ();
1935      return;
1936    }
1937
1938  /* Sections are assumed to start aligned. In executable section, there is no
1939     MAP_DATA symbol pending. So we only align the address during
1940     MAP_DATA --> MAP_INSN transition.
1941     For other sections, this is not guaranteed.  */
1942  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
1943  if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
1944    frag_align_code (2, 0);
1945
1946#ifdef OBJ_ELF
1947  mapping_state (MAP_INSN);
1948#endif
1949
1950  do
1951    {
1952      expression (&exp);
1953      if (exp.X_op != O_constant)
1954	{
1955	  as_bad (_("constant expression required"));
1956	  ignore_rest_of_line ();
1957	  return;
1958	}
1959
1960      if (target_big_endian)
1961	{
1962	  unsigned int val = exp.X_add_number;
1963	  exp.X_add_number = SWAP_32 (val);
1964	}
1965      emit_expr (&exp, 4);
1966    }
1967  while (*input_line_pointer++ == ',');
1968
1969  /* Put terminator back into stream.  */
1970  input_line_pointer--;
1971  demand_empty_rest_of_line ();
1972}
1973
1974#ifdef OBJ_ELF
1975/* Emit BFD_RELOC_AARCH64_TLSDESC_ADD on the next ADD instruction.  */
1976
1977static void
1978s_tlsdescadd (int ignored ATTRIBUTE_UNUSED)
1979{
1980  expressionS exp;
1981
1982  expression (&exp);
1983  frag_grow (4);
1984  fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
1985		   BFD_RELOC_AARCH64_TLSDESC_ADD);
1986
1987  demand_empty_rest_of_line ();
1988}
1989
1990/* Emit BFD_RELOC_AARCH64_TLSDESC_CALL on the next BLR instruction.  */
1991
1992static void
1993s_tlsdesccall (int ignored ATTRIBUTE_UNUSED)
1994{
1995  expressionS exp;
1996
1997  /* Since we're just labelling the code, there's no need to define a
1998     mapping symbol.  */
1999  expression (&exp);
2000  /* Make sure there is enough room in this frag for the following
2001     blr.  This trick only works if the blr follows immediately after
2002     the .tlsdesc directive.  */
2003  frag_grow (4);
2004  fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2005		   BFD_RELOC_AARCH64_TLSDESC_CALL);
2006
2007  demand_empty_rest_of_line ();
2008}
2009
2010/* Emit BFD_RELOC_AARCH64_TLSDESC_LDR on the next LDR instruction.  */
2011
2012static void
2013s_tlsdescldr (int ignored ATTRIBUTE_UNUSED)
2014{
2015  expressionS exp;
2016
2017  expression (&exp);
2018  frag_grow (4);
2019  fix_new_aarch64 (frag_now, frag_more (0) - frag_now->fr_literal, 4, &exp, 0,
2020		   BFD_RELOC_AARCH64_TLSDESC_LDR);
2021
2022  demand_empty_rest_of_line ();
2023}
2024#endif	/* OBJ_ELF */
2025
2026static void s_aarch64_arch (int);
2027static void s_aarch64_cpu (int);
2028static void s_aarch64_arch_extension (int);
2029
2030/* This table describes all the machine specific pseudo-ops the assembler
2031   has to support.  The fields are:
2032     pseudo-op name without dot
2033     function to call to execute this pseudo-op
2034     Integer arg to pass to the function.  */
2035
2036const pseudo_typeS md_pseudo_table[] = {
2037  /* Never called because '.req' does not start a line.  */
2038  {"req", s_req, 0},
2039  {"unreq", s_unreq, 0},
2040  {"bss", s_bss, 0},
2041  {"even", s_even, 0},
2042  {"ltorg", s_ltorg, 0},
2043  {"pool", s_ltorg, 0},
2044  {"cpu", s_aarch64_cpu, 0},
2045  {"arch", s_aarch64_arch, 0},
2046  {"arch_extension", s_aarch64_arch_extension, 0},
2047  {"inst", s_aarch64_inst, 0},
2048#ifdef OBJ_ELF
2049  {"tlsdescadd", s_tlsdescadd, 0},
2050  {"tlsdesccall", s_tlsdesccall, 0},
2051  {"tlsdescldr", s_tlsdescldr, 0},
2052  {"word", s_aarch64_elf_cons, 4},
2053  {"long", s_aarch64_elf_cons, 4},
2054  {"xword", s_aarch64_elf_cons, 8},
2055  {"dword", s_aarch64_elf_cons, 8},
2056#endif
2057  {0, 0, 0}
2058};
2059
2060
2061/* Check whether STR points to a register name followed by a comma or the
2062   end of line; REG_TYPE indicates which register types are checked
2063   against.  Return TRUE if STR is such a register name; otherwise return
2064   FALSE.  The function does not intend to produce any diagnostics, but since
2065   the register parser aarch64_reg_parse, which is called by this function,
2066   does produce diagnostics, we call clear_error to clear any diagnostics
2067   that may be generated by aarch64_reg_parse.
2068   Also, the function returns FALSE directly if there is any user error
2069   present at the function entry.  This prevents the existing diagnostics
2070   state from being spoiled.
2071   The function currently serves parse_constant_immediate and
2072   parse_big_immediate only.  */
2073static bfd_boolean
2074reg_name_p (char *str, aarch64_reg_type reg_type)
2075{
2076  int reg;
2077
2078  /* Prevent the diagnostics state from being spoiled.  */
2079  if (error_p ())
2080    return FALSE;
2081
2082  reg = aarch64_reg_parse (&str, reg_type, NULL, NULL);
2083
2084  /* Clear the parsing error that may be set by the reg parser.  */
2085  clear_error ();
2086
2087  if (reg == PARSE_FAIL)
2088    return FALSE;
2089
2090  skip_whitespace (str);
2091  if (*str == ',' || is_end_of_line[(unsigned int) *str])
2092    return TRUE;
2093
2094  return FALSE;
2095}
2096
2097/* Parser functions used exclusively in instruction operands.  */
2098
2099/* Parse an immediate expression which may not be constant.
2100
2101   To prevent the expression parser from pushing a register name
2102   into the symbol table as an undefined symbol, firstly a check is
2103   done to find out whether STR is a register of type REG_TYPE followed
2104   by a comma or the end of line.  Return FALSE if STR is such a string.  */
2105
2106static bfd_boolean
2107parse_immediate_expression (char **str, expressionS *exp,
2108			    aarch64_reg_type reg_type)
2109{
2110  if (reg_name_p (*str, reg_type))
2111    {
2112      set_recoverable_error (_("immediate operand required"));
2113      return FALSE;
2114    }
2115
2116  my_get_expression (exp, str, GE_OPT_PREFIX, 1);
2117
2118  if (exp->X_op == O_absent)
2119    {
2120      set_fatal_syntax_error (_("missing immediate expression"));
2121      return FALSE;
2122    }
2123
2124  return TRUE;
2125}
2126
2127/* Constant immediate-value read function for use in insn parsing.
2128   STR points to the beginning of the immediate (with the optional
2129   leading #); *VAL receives the value.  REG_TYPE says which register
2130   names should be treated as registers rather than as symbolic immediates.
2131
2132   Return TRUE on success; otherwise return FALSE.  */
2133
2134static bfd_boolean
2135parse_constant_immediate (char **str, int64_t *val, aarch64_reg_type reg_type)
2136{
2137  expressionS exp;
2138
2139  if (! parse_immediate_expression (str, &exp, reg_type))
2140    return FALSE;
2141
2142  if (exp.X_op != O_constant)
2143    {
2144      set_syntax_error (_("constant expression required"));
2145      return FALSE;
2146    }
2147
2148  *val = exp.X_add_number;
2149  return TRUE;
2150}
2151
2152static uint32_t
2153encode_imm_float_bits (uint32_t imm)
2154{
2155  return ((imm >> 19) & 0x7f)	/* b[25:19] -> b[6:0] */
2156    | ((imm >> (31 - 7)) & 0x80);	/* b[31]    -> b[7]   */
2157}
2158
2159/* Return TRUE if the single-precision floating-point value encoded in IMM
2160   can be expressed in the AArch64 8-bit signed floating-point format with
2161   3-bit exponent and normalized 4 bits of precision; in other words, the
2162   floating-point value must be expressable as
2163     (+/-) n / 16 * power (2, r)
2164   where n and r are integers such that 16 <= n <=31 and -3 <= r <= 4.  */
2165
2166static bfd_boolean
2167aarch64_imm_float_p (uint32_t imm)
2168{
2169  /* If a single-precision floating-point value has the following bit
2170     pattern, it can be expressed in the AArch64 8-bit floating-point
2171     format:
2172
2173     3 32222222 2221111111111
2174     1 09876543 21098765432109876543210
2175     n Eeeeeexx xxxx0000000000000000000
2176
2177     where n, e and each x are either 0 or 1 independently, with
2178     E == ~ e.  */
2179
2180  uint32_t pattern;
2181
2182  /* Prepare the pattern for 'Eeeeee'.  */
2183  if (((imm >> 30) & 0x1) == 0)
2184    pattern = 0x3e000000;
2185  else
2186    pattern = 0x40000000;
2187
2188  return (imm & 0x7ffff) == 0		/* lower 19 bits are 0.  */
2189    && ((imm & 0x7e000000) == pattern);	/* bits 25 - 29 == ~ bit 30.  */
2190}
2191
2192/* Return TRUE if the IEEE double value encoded in IMM can be expressed
2193   as an IEEE float without any loss of precision.  Store the value in
2194   *FPWORD if so.  */
2195
2196static bfd_boolean
2197can_convert_double_to_float (uint64_t imm, uint32_t *fpword)
2198{
2199  /* If a double-precision floating-point value has the following bit
2200     pattern, it can be expressed in a float:
2201
2202     6 66655555555 5544 44444444 33333333 33222222 22221111 111111
2203     3 21098765432 1098 76543210 98765432 10987654 32109876 54321098 76543210
2204     n E~~~eeeeeee ssss ssssssss ssssssss SSS00000 00000000 00000000 00000000
2205
2206       ----------------------------->     nEeeeeee esssssss ssssssss sssssSSS
2207	 if Eeee_eeee != 1111_1111
2208
2209     where n, e, s and S are either 0 or 1 independently and where ~ is the
2210     inverse of E.  */
2211
2212  uint32_t pattern;
2213  uint32_t high32 = imm >> 32;
2214  uint32_t low32 = imm;
2215
2216  /* Lower 29 bits need to be 0s.  */
2217  if ((imm & 0x1fffffff) != 0)
2218    return FALSE;
2219
2220  /* Prepare the pattern for 'Eeeeeeeee'.  */
2221  if (((high32 >> 30) & 0x1) == 0)
2222    pattern = 0x38000000;
2223  else
2224    pattern = 0x40000000;
2225
2226  /* Check E~~~.  */
2227  if ((high32 & 0x78000000) != pattern)
2228    return FALSE;
2229
2230  /* Check Eeee_eeee != 1111_1111.  */
2231  if ((high32 & 0x7ff00000) == 0x47f00000)
2232    return FALSE;
2233
2234  *fpword = ((high32 & 0xc0000000)		/* 1 n bit and 1 E bit.  */
2235	     | ((high32 << 3) & 0x3ffffff8)	/* 7 e and 20 s bits.  */
2236	     | (low32 >> 29));			/* 3 S bits.  */
2237  return TRUE;
2238}
2239
2240/* Return true if we should treat OPERAND as a double-precision
2241   floating-point operand rather than a single-precision one.  */
2242static bfd_boolean
2243double_precision_operand_p (const aarch64_opnd_info *operand)
2244{
2245  /* Check for unsuffixed SVE registers, which are allowed
2246     for LDR and STR but not in instructions that require an
2247     immediate.  We get better error messages if we arbitrarily
2248     pick one size, parse the immediate normally, and then
2249     report the match failure in the normal way.  */
2250  return (operand->qualifier == AARCH64_OPND_QLF_NIL
2251	  || aarch64_get_qualifier_esize (operand->qualifier) == 8);
2252}
2253
2254/* Parse a floating-point immediate.  Return TRUE on success and return the
2255   value in *IMMED in the format of IEEE754 single-precision encoding.
2256   *CCP points to the start of the string; DP_P is TRUE when the immediate
2257   is expected to be in double-precision (N.B. this only matters when
2258   hexadecimal representation is involved).  REG_TYPE says which register
2259   names should be treated as registers rather than as symbolic immediates.
2260
2261   This routine accepts any IEEE float; it is up to the callers to reject
2262   invalid ones.  */
2263
2264static bfd_boolean
2265parse_aarch64_imm_float (char **ccp, int *immed, bfd_boolean dp_p,
2266			 aarch64_reg_type reg_type)
2267{
2268  char *str = *ccp;
2269  char *fpnum;
2270  LITTLENUM_TYPE words[MAX_LITTLENUMS];
2271  int found_fpchar = 0;
2272  int64_t val = 0;
2273  unsigned fpword = 0;
2274  bfd_boolean hex_p = FALSE;
2275
2276  skip_past_char (&str, '#');
2277
2278  fpnum = str;
2279  skip_whitespace (fpnum);
2280
2281  if (strncmp (fpnum, "0x", 2) == 0)
2282    {
2283      /* Support the hexadecimal representation of the IEEE754 encoding.
2284	 Double-precision is expected when DP_P is TRUE, otherwise the
2285	 representation should be in single-precision.  */
2286      if (! parse_constant_immediate (&str, &val, reg_type))
2287	goto invalid_fp;
2288
2289      if (dp_p)
2290	{
2291	  if (!can_convert_double_to_float (val, &fpword))
2292	    goto invalid_fp;
2293	}
2294      else if ((uint64_t) val > 0xffffffff)
2295	goto invalid_fp;
2296      else
2297	fpword = val;
2298
2299      hex_p = TRUE;
2300    }
2301  else
2302    {
2303      if (reg_name_p (str, reg_type))
2304	{
2305	  set_recoverable_error (_("immediate operand required"));
2306	  return FALSE;
2307	}
2308
2309      /* We must not accidentally parse an integer as a floating-point number.
2310	 Make sure that the value we parse is not an integer by checking for
2311	 special characters '.' or 'e'.  */
2312      for (; *fpnum != '\0' && *fpnum != ' ' && *fpnum != '\n'; fpnum++)
2313	if (*fpnum == '.' || *fpnum == 'e' || *fpnum == 'E')
2314	  {
2315	    found_fpchar = 1;
2316	    break;
2317	  }
2318
2319      if (!found_fpchar)
2320	return FALSE;
2321    }
2322
2323  if (! hex_p)
2324    {
2325      int i;
2326
2327      if ((str = atof_ieee (str, 's', words)) == NULL)
2328	goto invalid_fp;
2329
2330      /* Our FP word must be 32 bits (single-precision FP).  */
2331      for (i = 0; i < 32 / LITTLENUM_NUMBER_OF_BITS; i++)
2332	{
2333	  fpword <<= LITTLENUM_NUMBER_OF_BITS;
2334	  fpword |= words[i];
2335	}
2336    }
2337
2338  *immed = fpword;
2339  *ccp = str;
2340  return TRUE;
2341
2342invalid_fp:
2343  set_fatal_syntax_error (_("invalid floating-point constant"));
2344  return FALSE;
2345}
2346
2347/* Less-generic immediate-value read function with the possibility of loading
2348   a big (64-bit) immediate, as required by AdvSIMD Modified immediate
2349   instructions.
2350
2351   To prevent the expression parser from pushing a register name into the
2352   symbol table as an undefined symbol, a check is firstly done to find
2353   out whether STR is a register of type REG_TYPE followed by a comma or
2354   the end of line.  Return FALSE if STR is such a register.  */
2355
2356static bfd_boolean
2357parse_big_immediate (char **str, int64_t *imm, aarch64_reg_type reg_type)
2358{
2359  char *ptr = *str;
2360
2361  if (reg_name_p (ptr, reg_type))
2362    {
2363      set_syntax_error (_("immediate operand required"));
2364      return FALSE;
2365    }
2366
2367  my_get_expression (&inst.reloc.exp, &ptr, GE_OPT_PREFIX, 1);
2368
2369  if (inst.reloc.exp.X_op == O_constant)
2370    *imm = inst.reloc.exp.X_add_number;
2371
2372  *str = ptr;
2373
2374  return TRUE;
2375}
2376
2377/* Set operand IDX of the *INSTR that needs a GAS internal fixup.
2378   if NEED_LIBOPCODES is non-zero, the fixup will need
2379   assistance from the libopcodes.   */
2380
2381static inline void
2382aarch64_set_gas_internal_fixup (struct reloc *reloc,
2383				const aarch64_opnd_info *operand,
2384				int need_libopcodes_p)
2385{
2386  reloc->type = BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2387  reloc->opnd = operand->type;
2388  if (need_libopcodes_p)
2389    reloc->need_libopcodes_p = 1;
2390};
2391
2392/* Return TRUE if the instruction needs to be fixed up later internally by
2393   the GAS; otherwise return FALSE.  */
2394
2395static inline bfd_boolean
2396aarch64_gas_internal_fixup_p (void)
2397{
2398  return inst.reloc.type == BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP;
2399}
2400
2401/* Assign the immediate value to the relavant field in *OPERAND if
2402   RELOC->EXP is a constant expression; otherwise, flag that *OPERAND
2403   needs an internal fixup in a later stage.
2404   ADDR_OFF_P determines whether it is the field ADDR.OFFSET.IMM or
2405   IMM.VALUE that may get assigned with the constant.  */
2406static inline void
2407assign_imm_if_const_or_fixup_later (struct reloc *reloc,
2408				    aarch64_opnd_info *operand,
2409				    int addr_off_p,
2410				    int need_libopcodes_p,
2411				    int skip_p)
2412{
2413  if (reloc->exp.X_op == O_constant)
2414    {
2415      if (addr_off_p)
2416	operand->addr.offset.imm = reloc->exp.X_add_number;
2417      else
2418	operand->imm.value = reloc->exp.X_add_number;
2419      reloc->type = BFD_RELOC_UNUSED;
2420    }
2421  else
2422    {
2423      aarch64_set_gas_internal_fixup (reloc, operand, need_libopcodes_p);
2424      /* Tell libopcodes to ignore this operand or not.  This is helpful
2425	 when one of the operands needs to be fixed up later but we need
2426	 libopcodes to check the other operands.  */
2427      operand->skip = skip_p;
2428    }
2429}
2430
2431/* Relocation modifiers.  Each entry in the table contains the textual
2432   name for the relocation which may be placed before a symbol used as
2433   a load/store offset, or add immediate. It must be surrounded by a
2434   leading and trailing colon, for example:
2435
2436	ldr	x0, [x1, #:rello:varsym]
2437	add	x0, x1, #:rello:varsym  */
2438
2439struct reloc_table_entry
2440{
2441  const char *name;
2442  int pc_rel;
2443  bfd_reloc_code_real_type adr_type;
2444  bfd_reloc_code_real_type adrp_type;
2445  bfd_reloc_code_real_type movw_type;
2446  bfd_reloc_code_real_type add_type;
2447  bfd_reloc_code_real_type ldst_type;
2448  bfd_reloc_code_real_type ld_literal_type;
2449};
2450
2451static struct reloc_table_entry reloc_table[] = {
2452  /* Low 12 bits of absolute address: ADD/i and LDR/STR */
2453  {"lo12", 0,
2454   0,				/* adr_type */
2455   0,
2456   0,
2457   BFD_RELOC_AARCH64_ADD_LO12,
2458   BFD_RELOC_AARCH64_LDST_LO12,
2459   0},
2460
2461  /* Higher 21 bits of pc-relative page offset: ADRP */
2462  {"pg_hi21", 1,
2463   0,				/* adr_type */
2464   BFD_RELOC_AARCH64_ADR_HI21_PCREL,
2465   0,
2466   0,
2467   0,
2468   0},
2469
2470  /* Higher 21 bits of pc-relative page offset: ADRP, no check */
2471  {"pg_hi21_nc", 1,
2472   0,				/* adr_type */
2473   BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL,
2474   0,
2475   0,
2476   0,
2477   0},
2478
2479  /* Most significant bits 0-15 of unsigned address/value: MOVZ */
2480  {"abs_g0", 0,
2481   0,				/* adr_type */
2482   0,
2483   BFD_RELOC_AARCH64_MOVW_G0,
2484   0,
2485   0,
2486   0},
2487
2488  /* Most significant bits 0-15 of signed address/value: MOVN/Z */
2489  {"abs_g0_s", 0,
2490   0,				/* adr_type */
2491   0,
2492   BFD_RELOC_AARCH64_MOVW_G0_S,
2493   0,
2494   0,
2495   0},
2496
2497  /* Less significant bits 0-15 of address/value: MOVK, no check */
2498  {"abs_g0_nc", 0,
2499   0,				/* adr_type */
2500   0,
2501   BFD_RELOC_AARCH64_MOVW_G0_NC,
2502   0,
2503   0,
2504   0},
2505
2506  /* Most significant bits 16-31 of unsigned address/value: MOVZ */
2507  {"abs_g1", 0,
2508   0,				/* adr_type */
2509   0,
2510   BFD_RELOC_AARCH64_MOVW_G1,
2511   0,
2512   0,
2513   0},
2514
2515  /* Most significant bits 16-31 of signed address/value: MOVN/Z */
2516  {"abs_g1_s", 0,
2517   0,				/* adr_type */
2518   0,
2519   BFD_RELOC_AARCH64_MOVW_G1_S,
2520   0,
2521   0,
2522   0},
2523
2524  /* Less significant bits 16-31 of address/value: MOVK, no check */
2525  {"abs_g1_nc", 0,
2526   0,				/* adr_type */
2527   0,
2528   BFD_RELOC_AARCH64_MOVW_G1_NC,
2529   0,
2530   0,
2531   0},
2532
2533  /* Most significant bits 32-47 of unsigned address/value: MOVZ */
2534  {"abs_g2", 0,
2535   0,				/* adr_type */
2536   0,
2537   BFD_RELOC_AARCH64_MOVW_G2,
2538   0,
2539   0,
2540   0},
2541
2542  /* Most significant bits 32-47 of signed address/value: MOVN/Z */
2543  {"abs_g2_s", 0,
2544   0,				/* adr_type */
2545   0,
2546   BFD_RELOC_AARCH64_MOVW_G2_S,
2547   0,
2548   0,
2549   0},
2550
2551  /* Less significant bits 32-47 of address/value: MOVK, no check */
2552  {"abs_g2_nc", 0,
2553   0,				/* adr_type */
2554   0,
2555   BFD_RELOC_AARCH64_MOVW_G2_NC,
2556   0,
2557   0,
2558   0},
2559
2560  /* Most significant bits 48-63 of signed/unsigned address/value: MOVZ */
2561  {"abs_g3", 0,
2562   0,				/* adr_type */
2563   0,
2564   BFD_RELOC_AARCH64_MOVW_G3,
2565   0,
2566   0,
2567   0},
2568
2569  /* Get to the page containing GOT entry for a symbol.  */
2570  {"got", 1,
2571   0,				/* adr_type */
2572   BFD_RELOC_AARCH64_ADR_GOT_PAGE,
2573   0,
2574   0,
2575   0,
2576   BFD_RELOC_AARCH64_GOT_LD_PREL19},
2577
2578  /* 12 bit offset into the page containing GOT entry for that symbol.  */
2579  {"got_lo12", 0,
2580   0,				/* adr_type */
2581   0,
2582   0,
2583   0,
2584   BFD_RELOC_AARCH64_LD_GOT_LO12_NC,
2585   0},
2586
2587  /* 0-15 bits of address/value: MOVk, no check.  */
2588  {"gotoff_g0_nc", 0,
2589   0,				/* adr_type */
2590   0,
2591   BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC,
2592   0,
2593   0,
2594   0},
2595
2596  /* Most significant bits 16-31 of address/value: MOVZ.  */
2597  {"gotoff_g1", 0,
2598   0,				/* adr_type */
2599   0,
2600   BFD_RELOC_AARCH64_MOVW_GOTOFF_G1,
2601   0,
2602   0,
2603   0},
2604
2605  /* 15 bit offset into the page containing GOT entry for that symbol.  */
2606  {"gotoff_lo15", 0,
2607   0,				/* adr_type */
2608   0,
2609   0,
2610   0,
2611   BFD_RELOC_AARCH64_LD64_GOTOFF_LO15,
2612   0},
2613
2614  /* Get to the page containing GOT TLS entry for a symbol */
2615  {"gottprel_g0_nc", 0,
2616   0,				/* adr_type */
2617   0,
2618   BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC,
2619   0,
2620   0,
2621   0},
2622
2623  /* Get to the page containing GOT TLS entry for a symbol */
2624  {"gottprel_g1", 0,
2625   0,				/* adr_type */
2626   0,
2627   BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1,
2628   0,
2629   0,
2630   0},
2631
2632  /* Get to the page containing GOT TLS entry for a symbol */
2633  {"tlsgd", 0,
2634   BFD_RELOC_AARCH64_TLSGD_ADR_PREL21, /* adr_type */
2635   BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21,
2636   0,
2637   0,
2638   0,
2639   0},
2640
2641  /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2642  {"tlsgd_lo12", 0,
2643   0,				/* adr_type */
2644   0,
2645   0,
2646   BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC,
2647   0,
2648   0},
2649
2650  /* Lower 16 bits address/value: MOVk.  */
2651  {"tlsgd_g0_nc", 0,
2652   0,				/* adr_type */
2653   0,
2654   BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC,
2655   0,
2656   0,
2657   0},
2658
2659  /* Most significant bits 16-31 of address/value: MOVZ.  */
2660  {"tlsgd_g1", 0,
2661   0,				/* adr_type */
2662   0,
2663   BFD_RELOC_AARCH64_TLSGD_MOVW_G1,
2664   0,
2665   0,
2666   0},
2667
2668  /* Get to the page containing GOT TLS entry for a symbol */
2669  {"tlsdesc", 0,
2670   BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21, /* adr_type */
2671   BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21,
2672   0,
2673   0,
2674   0,
2675   BFD_RELOC_AARCH64_TLSDESC_LD_PREL19},
2676
2677  /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2678  {"tlsdesc_lo12", 0,
2679   0,				/* adr_type */
2680   0,
2681   0,
2682   BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC,
2683   BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC,
2684   0},
2685
2686  /* Get to the page containing GOT TLS entry for a symbol.
2687     The same as GD, we allocate two consecutive GOT slots
2688     for module index and module offset, the only difference
2689     with GD is the module offset should be intialized to
2690     zero without any outstanding runtime relocation. */
2691  {"tlsldm", 0,
2692   BFD_RELOC_AARCH64_TLSLD_ADR_PREL21, /* adr_type */
2693   BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21,
2694   0,
2695   0,
2696   0,
2697   0},
2698
2699  /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2700  {"tlsldm_lo12_nc", 0,
2701   0,				/* adr_type */
2702   0,
2703   0,
2704   BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC,
2705   0,
2706   0},
2707
2708  /* 12 bit offset into the module TLS base address.  */
2709  {"dtprel_lo12", 0,
2710   0,				/* adr_type */
2711   0,
2712   0,
2713   BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12,
2714   BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12,
2715   0},
2716
2717  /* Same as dtprel_lo12, no overflow check.  */
2718  {"dtprel_lo12_nc", 0,
2719   0,				/* adr_type */
2720   0,
2721   0,
2722   BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC,
2723   BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC,
2724   0},
2725
2726  /* bits[23:12] of offset to the module TLS base address.  */
2727  {"dtprel_hi12", 0,
2728   0,				/* adr_type */
2729   0,
2730   0,
2731   BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12,
2732   0,
2733   0},
2734
2735  /* bits[15:0] of offset to the module TLS base address.  */
2736  {"dtprel_g0", 0,
2737   0,				/* adr_type */
2738   0,
2739   BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0,
2740   0,
2741   0,
2742   0},
2743
2744  /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0.  */
2745  {"dtprel_g0_nc", 0,
2746   0,				/* adr_type */
2747   0,
2748   BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC,
2749   0,
2750   0,
2751   0},
2752
2753  /* bits[31:16] of offset to the module TLS base address.  */
2754  {"dtprel_g1", 0,
2755   0,				/* adr_type */
2756   0,
2757   BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1,
2758   0,
2759   0,
2760   0},
2761
2762  /* No overflow check version of BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1.  */
2763  {"dtprel_g1_nc", 0,
2764   0,				/* adr_type */
2765   0,
2766   BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC,
2767   0,
2768   0,
2769   0},
2770
2771  /* bits[47:32] of offset to the module TLS base address.  */
2772  {"dtprel_g2", 0,
2773   0,				/* adr_type */
2774   0,
2775   BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2,
2776   0,
2777   0,
2778   0},
2779
2780  /* Lower 16 bit offset into GOT entry for a symbol */
2781  {"tlsdesc_off_g0_nc", 0,
2782   0,				/* adr_type */
2783   0,
2784   BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC,
2785   0,
2786   0,
2787   0},
2788
2789  /* Higher 16 bit offset into GOT entry for a symbol */
2790  {"tlsdesc_off_g1", 0,
2791   0,				/* adr_type */
2792   0,
2793   BFD_RELOC_AARCH64_TLSDESC_OFF_G1,
2794   0,
2795   0,
2796   0},
2797
2798  /* Get to the page containing GOT TLS entry for a symbol */
2799  {"gottprel", 0,
2800   0,				/* adr_type */
2801   BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21,
2802   0,
2803   0,
2804   0,
2805   BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19},
2806
2807  /* 12 bit offset into the page containing GOT TLS entry for a symbol */
2808  {"gottprel_lo12", 0,
2809   0,				/* adr_type */
2810   0,
2811   0,
2812   0,
2813   BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC,
2814   0},
2815
2816  /* Get tp offset for a symbol.  */
2817  {"tprel", 0,
2818   0,				/* adr_type */
2819   0,
2820   0,
2821   BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2822   0,
2823   0},
2824
2825  /* Get tp offset for a symbol.  */
2826  {"tprel_lo12", 0,
2827   0,				/* adr_type */
2828   0,
2829   0,
2830   BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12,
2831   0,
2832   0},
2833
2834  /* Get tp offset for a symbol.  */
2835  {"tprel_hi12", 0,
2836   0,				/* adr_type */
2837   0,
2838   0,
2839   BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12,
2840   0,
2841   0},
2842
2843  /* Get tp offset for a symbol.  */
2844  {"tprel_lo12_nc", 0,
2845   0,				/* adr_type */
2846   0,
2847   0,
2848   BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC,
2849   0,
2850   0},
2851
2852  /* Most significant bits 32-47 of address/value: MOVZ.  */
2853  {"tprel_g2", 0,
2854   0,				/* adr_type */
2855   0,
2856   BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2,
2857   0,
2858   0,
2859   0},
2860
2861  /* Most significant bits 16-31 of address/value: MOVZ.  */
2862  {"tprel_g1", 0,
2863   0,				/* adr_type */
2864   0,
2865   BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1,
2866   0,
2867   0,
2868   0},
2869
2870  /* Most significant bits 16-31 of address/value: MOVZ, no check.  */
2871  {"tprel_g1_nc", 0,
2872   0,				/* adr_type */
2873   0,
2874   BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC,
2875   0,
2876   0,
2877   0},
2878
2879  /* Most significant bits 0-15 of address/value: MOVZ.  */
2880  {"tprel_g0", 0,
2881   0,				/* adr_type */
2882   0,
2883   BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0,
2884   0,
2885   0,
2886   0},
2887
2888  /* Most significant bits 0-15 of address/value: MOVZ, no check.  */
2889  {"tprel_g0_nc", 0,
2890   0,				/* adr_type */
2891   0,
2892   BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC,
2893   0,
2894   0,
2895   0},
2896
2897  /* 15bit offset from got entry to base address of GOT table.  */
2898  {"gotpage_lo15", 0,
2899   0,
2900   0,
2901   0,
2902   0,
2903   BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15,
2904   0},
2905
2906  /* 14bit offset from got entry to base address of GOT table.  */
2907  {"gotpage_lo14", 0,
2908   0,
2909   0,
2910   0,
2911   0,
2912   BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14,
2913   0},
2914};
2915
2916/* Given the address of a pointer pointing to the textual name of a
2917   relocation as may appear in assembler source, attempt to find its
2918   details in reloc_table.  The pointer will be updated to the character
2919   after the trailing colon.  On failure, NULL will be returned;
2920   otherwise return the reloc_table_entry.  */
2921
2922static struct reloc_table_entry *
2923find_reloc_table_entry (char **str)
2924{
2925  unsigned int i;
2926  for (i = 0; i < ARRAY_SIZE (reloc_table); i++)
2927    {
2928      int length = strlen (reloc_table[i].name);
2929
2930      if (strncasecmp (reloc_table[i].name, *str, length) == 0
2931	  && (*str)[length] == ':')
2932	{
2933	  *str += (length + 1);
2934	  return &reloc_table[i];
2935	}
2936    }
2937
2938  return NULL;
2939}
2940
2941/* Mode argument to parse_shift and parser_shifter_operand.  */
2942enum parse_shift_mode
2943{
2944  SHIFTED_NONE,			/* no shifter allowed  */
2945  SHIFTED_ARITH_IMM,		/* "rn{,lsl|lsr|asl|asr|uxt|sxt #n}" or
2946				   "#imm{,lsl #n}"  */
2947  SHIFTED_LOGIC_IMM,		/* "rn{,lsl|lsr|asl|asr|ror #n}" or
2948				   "#imm"  */
2949  SHIFTED_LSL,			/* bare "lsl #n"  */
2950  SHIFTED_MUL,			/* bare "mul #n"  */
2951  SHIFTED_LSL_MSL,		/* "lsl|msl #n"  */
2952  SHIFTED_MUL_VL,		/* "mul vl"  */
2953  SHIFTED_REG_OFFSET		/* [su]xtw|sxtx {#n} or lsl #n  */
2954};
2955
2956/* Parse a <shift> operator on an AArch64 data processing instruction.
2957   Return TRUE on success; otherwise return FALSE.  */
2958static bfd_boolean
2959parse_shift (char **str, aarch64_opnd_info *operand, enum parse_shift_mode mode)
2960{
2961  const struct aarch64_name_value_pair *shift_op;
2962  enum aarch64_modifier_kind kind;
2963  expressionS exp;
2964  int exp_has_prefix;
2965  char *s = *str;
2966  char *p = s;
2967
2968  for (p = *str; ISALPHA (*p); p++)
2969    ;
2970
2971  if (p == *str)
2972    {
2973      set_syntax_error (_("shift expression expected"));
2974      return FALSE;
2975    }
2976
2977  shift_op = hash_find_n (aarch64_shift_hsh, *str, p - *str);
2978
2979  if (shift_op == NULL)
2980    {
2981      set_syntax_error (_("shift operator expected"));
2982      return FALSE;
2983    }
2984
2985  kind = aarch64_get_operand_modifier (shift_op);
2986
2987  if (kind == AARCH64_MOD_MSL && mode != SHIFTED_LSL_MSL)
2988    {
2989      set_syntax_error (_("invalid use of 'MSL'"));
2990      return FALSE;
2991    }
2992
2993  if (kind == AARCH64_MOD_MUL
2994      && mode != SHIFTED_MUL
2995      && mode != SHIFTED_MUL_VL)
2996    {
2997      set_syntax_error (_("invalid use of 'MUL'"));
2998      return FALSE;
2999    }
3000
3001  switch (mode)
3002    {
3003    case SHIFTED_LOGIC_IMM:
3004      if (aarch64_extend_operator_p (kind) == TRUE)
3005	{
3006	  set_syntax_error (_("extending shift is not permitted"));
3007	  return FALSE;
3008	}
3009      break;
3010
3011    case SHIFTED_ARITH_IMM:
3012      if (kind == AARCH64_MOD_ROR)
3013	{
3014	  set_syntax_error (_("'ROR' shift is not permitted"));
3015	  return FALSE;
3016	}
3017      break;
3018
3019    case SHIFTED_LSL:
3020      if (kind != AARCH64_MOD_LSL)
3021	{
3022	  set_syntax_error (_("only 'LSL' shift is permitted"));
3023	  return FALSE;
3024	}
3025      break;
3026
3027    case SHIFTED_MUL:
3028      if (kind != AARCH64_MOD_MUL)
3029	{
3030	  set_syntax_error (_("only 'MUL' is permitted"));
3031	  return FALSE;
3032	}
3033      break;
3034
3035    case SHIFTED_MUL_VL:
3036      /* "MUL VL" consists of two separate tokens.  Require the first
3037	 token to be "MUL" and look for a following "VL".  */
3038      if (kind == AARCH64_MOD_MUL)
3039	{
3040	  skip_whitespace (p);
3041	  if (strncasecmp (p, "vl", 2) == 0 && !ISALPHA (p[2]))
3042	    {
3043	      p += 2;
3044	      kind = AARCH64_MOD_MUL_VL;
3045	      break;
3046	    }
3047	}
3048      set_syntax_error (_("only 'MUL VL' is permitted"));
3049      return FALSE;
3050
3051    case SHIFTED_REG_OFFSET:
3052      if (kind != AARCH64_MOD_UXTW && kind != AARCH64_MOD_LSL
3053	  && kind != AARCH64_MOD_SXTW && kind != AARCH64_MOD_SXTX)
3054	{
3055	  set_fatal_syntax_error
3056	    (_("invalid shift for the register offset addressing mode"));
3057	  return FALSE;
3058	}
3059      break;
3060
3061    case SHIFTED_LSL_MSL:
3062      if (kind != AARCH64_MOD_LSL && kind != AARCH64_MOD_MSL)
3063	{
3064	  set_syntax_error (_("invalid shift operator"));
3065	  return FALSE;
3066	}
3067      break;
3068
3069    default:
3070      abort ();
3071    }
3072
3073  /* Whitespace can appear here if the next thing is a bare digit.  */
3074  skip_whitespace (p);
3075
3076  /* Parse shift amount.  */
3077  exp_has_prefix = 0;
3078  if ((mode == SHIFTED_REG_OFFSET && *p == ']') || kind == AARCH64_MOD_MUL_VL)
3079    exp.X_op = O_absent;
3080  else
3081    {
3082      if (is_immediate_prefix (*p))
3083	{
3084	  p++;
3085	  exp_has_prefix = 1;
3086	}
3087      my_get_expression (&exp, &p, GE_NO_PREFIX, 0);
3088    }
3089  if (kind == AARCH64_MOD_MUL_VL)
3090    /* For consistency, give MUL VL the same shift amount as an implicit
3091       MUL #1.  */
3092    operand->shifter.amount = 1;
3093  else if (exp.X_op == O_absent)
3094    {
3095      if (aarch64_extend_operator_p (kind) == FALSE || exp_has_prefix)
3096	{
3097	  set_syntax_error (_("missing shift amount"));
3098	  return FALSE;
3099	}
3100      operand->shifter.amount = 0;
3101    }
3102  else if (exp.X_op != O_constant)
3103    {
3104      set_syntax_error (_("constant shift amount required"));
3105      return FALSE;
3106    }
3107  /* For parsing purposes, MUL #n has no inherent range.  The range
3108     depends on the operand and will be checked by operand-specific
3109     routines.  */
3110  else if (kind != AARCH64_MOD_MUL
3111	   && (exp.X_add_number < 0 || exp.X_add_number > 63))
3112    {
3113      set_fatal_syntax_error (_("shift amount out of range 0 to 63"));
3114      return FALSE;
3115    }
3116  else
3117    {
3118      operand->shifter.amount = exp.X_add_number;
3119      operand->shifter.amount_present = 1;
3120    }
3121
3122  operand->shifter.operator_present = 1;
3123  operand->shifter.kind = kind;
3124
3125  *str = p;
3126  return TRUE;
3127}
3128
3129/* Parse a <shifter_operand> for a data processing instruction:
3130
3131      #<immediate>
3132      #<immediate>, LSL #imm
3133
3134   Validation of immediate operands is deferred to md_apply_fix.
3135
3136   Return TRUE on success; otherwise return FALSE.  */
3137
3138static bfd_boolean
3139parse_shifter_operand_imm (char **str, aarch64_opnd_info *operand,
3140			   enum parse_shift_mode mode)
3141{
3142  char *p;
3143
3144  if (mode != SHIFTED_ARITH_IMM && mode != SHIFTED_LOGIC_IMM)
3145    return FALSE;
3146
3147  p = *str;
3148
3149  /* Accept an immediate expression.  */
3150  if (! my_get_expression (&inst.reloc.exp, &p, GE_OPT_PREFIX, 1))
3151    return FALSE;
3152
3153  /* Accept optional LSL for arithmetic immediate values.  */
3154  if (mode == SHIFTED_ARITH_IMM && skip_past_comma (&p))
3155    if (! parse_shift (&p, operand, SHIFTED_LSL))
3156      return FALSE;
3157
3158  /* Not accept any shifter for logical immediate values.  */
3159  if (mode == SHIFTED_LOGIC_IMM && skip_past_comma (&p)
3160      && parse_shift (&p, operand, mode))
3161    {
3162      set_syntax_error (_("unexpected shift operator"));
3163      return FALSE;
3164    }
3165
3166  *str = p;
3167  return TRUE;
3168}
3169
3170/* Parse a <shifter_operand> for a data processing instruction:
3171
3172      <Rm>
3173      <Rm>, <shift>
3174      #<immediate>
3175      #<immediate>, LSL #imm
3176
3177   where <shift> is handled by parse_shift above, and the last two
3178   cases are handled by the function above.
3179
3180   Validation of immediate operands is deferred to md_apply_fix.
3181
3182   Return TRUE on success; otherwise return FALSE.  */
3183
3184static bfd_boolean
3185parse_shifter_operand (char **str, aarch64_opnd_info *operand,
3186		       enum parse_shift_mode mode)
3187{
3188  const reg_entry *reg;
3189  aarch64_opnd_qualifier_t qualifier;
3190  enum aarch64_operand_class opd_class
3191    = aarch64_get_operand_class (operand->type);
3192
3193  reg = aarch64_reg_parse_32_64 (str, &qualifier);
3194  if (reg)
3195    {
3196      if (opd_class == AARCH64_OPND_CLASS_IMMEDIATE)
3197	{
3198	  set_syntax_error (_("unexpected register in the immediate operand"));
3199	  return FALSE;
3200	}
3201
3202      if (!aarch64_check_reg_type (reg, REG_TYPE_R_Z))
3203	{
3204	  set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_Z)));
3205	  return FALSE;
3206	}
3207
3208      operand->reg.regno = reg->number;
3209      operand->qualifier = qualifier;
3210
3211      /* Accept optional shift operation on register.  */
3212      if (! skip_past_comma (str))
3213	return TRUE;
3214
3215      if (! parse_shift (str, operand, mode))
3216	return FALSE;
3217
3218      return TRUE;
3219    }
3220  else if (opd_class == AARCH64_OPND_CLASS_MODIFIED_REG)
3221    {
3222      set_syntax_error
3223	(_("integer register expected in the extended/shifted operand "
3224	   "register"));
3225      return FALSE;
3226    }
3227
3228  /* We have a shifted immediate variable.  */
3229  return parse_shifter_operand_imm (str, operand, mode);
3230}
3231
3232/* Return TRUE on success; return FALSE otherwise.  */
3233
3234static bfd_boolean
3235parse_shifter_operand_reloc (char **str, aarch64_opnd_info *operand,
3236			     enum parse_shift_mode mode)
3237{
3238  char *p = *str;
3239
3240  /* Determine if we have the sequence of characters #: or just :
3241     coming next.  If we do, then we check for a :rello: relocation
3242     modifier.  If we don't, punt the whole lot to
3243     parse_shifter_operand.  */
3244
3245  if ((p[0] == '#' && p[1] == ':') || p[0] == ':')
3246    {
3247      struct reloc_table_entry *entry;
3248
3249      if (p[0] == '#')
3250	p += 2;
3251      else
3252	p++;
3253      *str = p;
3254
3255      /* Try to parse a relocation.  Anything else is an error.  */
3256      if (!(entry = find_reloc_table_entry (str)))
3257	{
3258	  set_syntax_error (_("unknown relocation modifier"));
3259	  return FALSE;
3260	}
3261
3262      if (entry->add_type == 0)
3263	{
3264	  set_syntax_error
3265	    (_("this relocation modifier is not allowed on this instruction"));
3266	  return FALSE;
3267	}
3268
3269      /* Save str before we decompose it.  */
3270      p = *str;
3271
3272      /* Next, we parse the expression.  */
3273      if (! my_get_expression (&inst.reloc.exp, str, GE_NO_PREFIX, 1))
3274	return FALSE;
3275
3276      /* Record the relocation type (use the ADD variant here).  */
3277      inst.reloc.type = entry->add_type;
3278      inst.reloc.pc_rel = entry->pc_rel;
3279
3280      /* If str is empty, we've reached the end, stop here.  */
3281      if (**str == '\0')
3282	return TRUE;
3283
3284      /* Otherwise, we have a shifted reloc modifier, so rewind to
3285         recover the variable name and continue parsing for the shifter.  */
3286      *str = p;
3287      return parse_shifter_operand_imm (str, operand, mode);
3288    }
3289
3290  return parse_shifter_operand (str, operand, mode);
3291}
3292
3293/* Parse all forms of an address expression.  Information is written
3294   to *OPERAND and/or inst.reloc.
3295
3296   The A64 instruction set has the following addressing modes:
3297
3298   Offset
3299     [base]			 // in SIMD ld/st structure
3300     [base{,#0}]		 // in ld/st exclusive
3301     [base{,#imm}]
3302     [base,Xm{,LSL #imm}]
3303     [base,Xm,SXTX {#imm}]
3304     [base,Wm,(S|U)XTW {#imm}]
3305   Pre-indexed
3306     [base,#imm]!
3307   Post-indexed
3308     [base],#imm
3309     [base],Xm			 // in SIMD ld/st structure
3310   PC-relative (literal)
3311     label
3312   SVE:
3313     [base,#imm,MUL VL]
3314     [base,Zm.D{,LSL #imm}]
3315     [base,Zm.S,(S|U)XTW {#imm}]
3316     [base,Zm.D,(S|U)XTW {#imm}] // ignores top 32 bits of Zm.D elements
3317     [Zn.S,#imm]
3318     [Zn.D,#imm]
3319     [Zn.S,Zm.S{,LSL #imm}]      // in ADR
3320     [Zn.D,Zm.D{,LSL #imm}]      // in ADR
3321     [Zn.D,Zm.D,(S|U)XTW {#imm}] // in ADR
3322
3323   (As a convenience, the notation "=immediate" is permitted in conjunction
3324   with the pc-relative literal load instructions to automatically place an
3325   immediate value or symbolic address in a nearby literal pool and generate
3326   a hidden label which references it.)
3327
3328   Upon a successful parsing, the address structure in *OPERAND will be
3329   filled in the following way:
3330
3331     .base_regno = <base>
3332     .offset.is_reg	// 1 if the offset is a register
3333     .offset.imm = <imm>
3334     .offset.regno = <Rm>
3335
3336   For different addressing modes defined in the A64 ISA:
3337
3338   Offset
3339     .pcrel=0; .preind=1; .postind=0; .writeback=0
3340   Pre-indexed
3341     .pcrel=0; .preind=1; .postind=0; .writeback=1
3342   Post-indexed
3343     .pcrel=0; .preind=0; .postind=1; .writeback=1
3344   PC-relative (literal)
3345     .pcrel=1; .preind=1; .postind=0; .writeback=0
3346
3347   The shift/extension information, if any, will be stored in .shifter.
3348   The base and offset qualifiers will be stored in *BASE_QUALIFIER and
3349   *OFFSET_QUALIFIER respectively, with NIL being used if there's no
3350   corresponding register.
3351
3352   BASE_TYPE says which types of base register should be accepted and
3353   OFFSET_TYPE says the same for offset registers.  IMM_SHIFT_MODE
3354   is the type of shifter that is allowed for immediate offsets,
3355   or SHIFTED_NONE if none.
3356
3357   In all other respects, it is the caller's responsibility to check
3358   for addressing modes not supported by the instruction, and to set
3359   inst.reloc.type.  */
3360
3361static bfd_boolean
3362parse_address_main (char **str, aarch64_opnd_info *operand,
3363		    aarch64_opnd_qualifier_t *base_qualifier,
3364		    aarch64_opnd_qualifier_t *offset_qualifier,
3365		    aarch64_reg_type base_type, aarch64_reg_type offset_type,
3366		    enum parse_shift_mode imm_shift_mode)
3367{
3368  char *p = *str;
3369  const reg_entry *reg;
3370  expressionS *exp = &inst.reloc.exp;
3371
3372  *base_qualifier = AARCH64_OPND_QLF_NIL;
3373  *offset_qualifier = AARCH64_OPND_QLF_NIL;
3374  if (! skip_past_char (&p, '['))
3375    {
3376      /* =immediate or label.  */
3377      operand->addr.pcrel = 1;
3378      operand->addr.preind = 1;
3379
3380      /* #:<reloc_op>:<symbol>  */
3381      skip_past_char (&p, '#');
3382      if (skip_past_char (&p, ':'))
3383	{
3384	  bfd_reloc_code_real_type ty;
3385	  struct reloc_table_entry *entry;
3386
3387	  /* Try to parse a relocation modifier.  Anything else is
3388	     an error.  */
3389	  entry = find_reloc_table_entry (&p);
3390	  if (! entry)
3391	    {
3392	      set_syntax_error (_("unknown relocation modifier"));
3393	      return FALSE;
3394	    }
3395
3396	  switch (operand->type)
3397	    {
3398	    case AARCH64_OPND_ADDR_PCREL21:
3399	      /* adr */
3400	      ty = entry->adr_type;
3401	      break;
3402
3403	    default:
3404	      ty = entry->ld_literal_type;
3405	      break;
3406	    }
3407
3408	  if (ty == 0)
3409	    {
3410	      set_syntax_error
3411		(_("this relocation modifier is not allowed on this "
3412		   "instruction"));
3413	      return FALSE;
3414	    }
3415
3416	  /* #:<reloc_op>:  */
3417	  if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3418	    {
3419	      set_syntax_error (_("invalid relocation expression"));
3420	      return FALSE;
3421	    }
3422
3423	  /* #:<reloc_op>:<expr>  */
3424	  /* Record the relocation type.  */
3425	  inst.reloc.type = ty;
3426	  inst.reloc.pc_rel = entry->pc_rel;
3427	}
3428      else
3429	{
3430
3431	  if (skip_past_char (&p, '='))
3432	    /* =immediate; need to generate the literal in the literal pool. */
3433	    inst.gen_lit_pool = 1;
3434
3435	  if (!my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3436	    {
3437	      set_syntax_error (_("invalid address"));
3438	      return FALSE;
3439	    }
3440	}
3441
3442      *str = p;
3443      return TRUE;
3444    }
3445
3446  /* [ */
3447
3448  reg = aarch64_addr_reg_parse (&p, base_type, base_qualifier);
3449  if (!reg || !aarch64_check_reg_type (reg, base_type))
3450    {
3451      set_syntax_error (_(get_reg_expected_msg (base_type)));
3452      return FALSE;
3453    }
3454  operand->addr.base_regno = reg->number;
3455
3456  /* [Xn */
3457  if (skip_past_comma (&p))
3458    {
3459      /* [Xn, */
3460      operand->addr.preind = 1;
3461
3462      reg = aarch64_addr_reg_parse (&p, offset_type, offset_qualifier);
3463      if (reg)
3464	{
3465	  if (!aarch64_check_reg_type (reg, offset_type))
3466	    {
3467	      set_syntax_error (_(get_reg_expected_msg (offset_type)));
3468	      return FALSE;
3469	    }
3470
3471	  /* [Xn,Rm  */
3472	  operand->addr.offset.regno = reg->number;
3473	  operand->addr.offset.is_reg = 1;
3474	  /* Shifted index.  */
3475	  if (skip_past_comma (&p))
3476	    {
3477	      /* [Xn,Rm,  */
3478	      if (! parse_shift (&p, operand, SHIFTED_REG_OFFSET))
3479		/* Use the diagnostics set in parse_shift, so not set new
3480		   error message here.  */
3481		return FALSE;
3482	    }
3483	  /* We only accept:
3484	     [base,Xm{,LSL #imm}]
3485	     [base,Xm,SXTX {#imm}]
3486	     [base,Wm,(S|U)XTW {#imm}]  */
3487	  if (operand->shifter.kind == AARCH64_MOD_NONE
3488	      || operand->shifter.kind == AARCH64_MOD_LSL
3489	      || operand->shifter.kind == AARCH64_MOD_SXTX)
3490	    {
3491	      if (*offset_qualifier == AARCH64_OPND_QLF_W)
3492		{
3493		  set_syntax_error (_("invalid use of 32-bit register offset"));
3494		  return FALSE;
3495		}
3496	      if (aarch64_get_qualifier_esize (*base_qualifier)
3497		  != aarch64_get_qualifier_esize (*offset_qualifier))
3498		{
3499		  set_syntax_error (_("offset has different size from base"));
3500		  return FALSE;
3501		}
3502	    }
3503	  else if (*offset_qualifier == AARCH64_OPND_QLF_X)
3504	    {
3505	      set_syntax_error (_("invalid use of 64-bit register offset"));
3506	      return FALSE;
3507	    }
3508	}
3509      else
3510	{
3511	  /* [Xn,#:<reloc_op>:<symbol>  */
3512	  skip_past_char (&p, '#');
3513	  if (skip_past_char (&p, ':'))
3514	    {
3515	      struct reloc_table_entry *entry;
3516
3517	      /* Try to parse a relocation modifier.  Anything else is
3518		 an error.  */
3519	      if (!(entry = find_reloc_table_entry (&p)))
3520		{
3521		  set_syntax_error (_("unknown relocation modifier"));
3522		  return FALSE;
3523		}
3524
3525	      if (entry->ldst_type == 0)
3526		{
3527		  set_syntax_error
3528		    (_("this relocation modifier is not allowed on this "
3529		       "instruction"));
3530		  return FALSE;
3531		}
3532
3533	      /* [Xn,#:<reloc_op>:  */
3534	      /* We now have the group relocation table entry corresponding to
3535	         the name in the assembler source.  Next, we parse the
3536	         expression.  */
3537	      if (! my_get_expression (exp, &p, GE_NO_PREFIX, 1))
3538		{
3539		  set_syntax_error (_("invalid relocation expression"));
3540		  return FALSE;
3541		}
3542
3543	      /* [Xn,#:<reloc_op>:<expr>  */
3544	      /* Record the load/store relocation type.  */
3545	      inst.reloc.type = entry->ldst_type;
3546	      inst.reloc.pc_rel = entry->pc_rel;
3547	    }
3548	  else
3549	    {
3550	      if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3551		{
3552		  set_syntax_error (_("invalid expression in the address"));
3553		  return FALSE;
3554		}
3555	      /* [Xn,<expr>  */
3556	      if (imm_shift_mode != SHIFTED_NONE && skip_past_comma (&p))
3557		/* [Xn,<expr>,<shifter>  */
3558		if (! parse_shift (&p, operand, imm_shift_mode))
3559		  return FALSE;
3560	    }
3561	}
3562    }
3563
3564  if (! skip_past_char (&p, ']'))
3565    {
3566      set_syntax_error (_("']' expected"));
3567      return FALSE;
3568    }
3569
3570  if (skip_past_char (&p, '!'))
3571    {
3572      if (operand->addr.preind && operand->addr.offset.is_reg)
3573	{
3574	  set_syntax_error (_("register offset not allowed in pre-indexed "
3575			      "addressing mode"));
3576	  return FALSE;
3577	}
3578      /* [Xn]! */
3579      operand->addr.writeback = 1;
3580    }
3581  else if (skip_past_comma (&p))
3582    {
3583      /* [Xn], */
3584      operand->addr.postind = 1;
3585      operand->addr.writeback = 1;
3586
3587      if (operand->addr.preind)
3588	{
3589	  set_syntax_error (_("cannot combine pre- and post-indexing"));
3590	  return FALSE;
3591	}
3592
3593      reg = aarch64_reg_parse_32_64 (&p, offset_qualifier);
3594      if (reg)
3595	{
3596	  /* [Xn],Xm */
3597	  if (!aarch64_check_reg_type (reg, REG_TYPE_R_64))
3598	    {
3599	      set_syntax_error (_(get_reg_expected_msg (REG_TYPE_R_64)));
3600	      return FALSE;
3601	    }
3602
3603	  operand->addr.offset.regno = reg->number;
3604	  operand->addr.offset.is_reg = 1;
3605	}
3606      else if (! my_get_expression (exp, &p, GE_OPT_PREFIX, 1))
3607	{
3608	  /* [Xn],#expr */
3609	  set_syntax_error (_("invalid expression in the address"));
3610	  return FALSE;
3611	}
3612    }
3613
3614  /* If at this point neither .preind nor .postind is set, we have a
3615     bare [Rn]{!}; reject [Rn]! but accept [Rn] as a shorthand for [Rn,#0].  */
3616  if (operand->addr.preind == 0 && operand->addr.postind == 0)
3617    {
3618      if (operand->addr.writeback)
3619	{
3620	  /* Reject [Rn]!   */
3621	  set_syntax_error (_("missing offset in the pre-indexed address"));
3622	  return FALSE;
3623	}
3624      operand->addr.preind = 1;
3625      inst.reloc.exp.X_op = O_constant;
3626      inst.reloc.exp.X_add_number = 0;
3627    }
3628
3629  *str = p;
3630  return TRUE;
3631}
3632
3633/* Parse a base AArch64 address (as opposed to an SVE one).  Return TRUE
3634   on success.  */
3635static bfd_boolean
3636parse_address (char **str, aarch64_opnd_info *operand)
3637{
3638  aarch64_opnd_qualifier_t base_qualifier, offset_qualifier;
3639  return parse_address_main (str, operand, &base_qualifier, &offset_qualifier,
3640			     REG_TYPE_R64_SP, REG_TYPE_R_Z, SHIFTED_NONE);
3641}
3642
3643/* Parse an address in which SVE vector registers and MUL VL are allowed.
3644   The arguments have the same meaning as for parse_address_main.
3645   Return TRUE on success.  */
3646static bfd_boolean
3647parse_sve_address (char **str, aarch64_opnd_info *operand,
3648		   aarch64_opnd_qualifier_t *base_qualifier,
3649		   aarch64_opnd_qualifier_t *offset_qualifier)
3650{
3651  return parse_address_main (str, operand, base_qualifier, offset_qualifier,
3652			     REG_TYPE_SVE_BASE, REG_TYPE_SVE_OFFSET,
3653			     SHIFTED_MUL_VL);
3654}
3655
3656/* Parse an operand for a MOVZ, MOVN or MOVK instruction.
3657   Return TRUE on success; otherwise return FALSE.  */
3658static bfd_boolean
3659parse_half (char **str, int *internal_fixup_p)
3660{
3661  char *p = *str;
3662
3663  skip_past_char (&p, '#');
3664
3665  gas_assert (internal_fixup_p);
3666  *internal_fixup_p = 0;
3667
3668  if (*p == ':')
3669    {
3670      struct reloc_table_entry *entry;
3671
3672      /* Try to parse a relocation.  Anything else is an error.  */
3673      ++p;
3674      if (!(entry = find_reloc_table_entry (&p)))
3675	{
3676	  set_syntax_error (_("unknown relocation modifier"));
3677	  return FALSE;
3678	}
3679
3680      if (entry->movw_type == 0)
3681	{
3682	  set_syntax_error
3683	    (_("this relocation modifier is not allowed on this instruction"));
3684	  return FALSE;
3685	}
3686
3687      inst.reloc.type = entry->movw_type;
3688    }
3689  else
3690    *internal_fixup_p = 1;
3691
3692  if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3693    return FALSE;
3694
3695  *str = p;
3696  return TRUE;
3697}
3698
3699/* Parse an operand for an ADRP instruction:
3700     ADRP <Xd>, <label>
3701   Return TRUE on success; otherwise return FALSE.  */
3702
3703static bfd_boolean
3704parse_adrp (char **str)
3705{
3706  char *p;
3707
3708  p = *str;
3709  if (*p == ':')
3710    {
3711      struct reloc_table_entry *entry;
3712
3713      /* Try to parse a relocation.  Anything else is an error.  */
3714      ++p;
3715      if (!(entry = find_reloc_table_entry (&p)))
3716	{
3717	  set_syntax_error (_("unknown relocation modifier"));
3718	  return FALSE;
3719	}
3720
3721      if (entry->adrp_type == 0)
3722	{
3723	  set_syntax_error
3724	    (_("this relocation modifier is not allowed on this instruction"));
3725	  return FALSE;
3726	}
3727
3728      inst.reloc.type = entry->adrp_type;
3729    }
3730  else
3731    inst.reloc.type = BFD_RELOC_AARCH64_ADR_HI21_PCREL;
3732
3733  inst.reloc.pc_rel = 1;
3734
3735  if (! my_get_expression (&inst.reloc.exp, &p, GE_NO_PREFIX, 1))
3736    return FALSE;
3737
3738  *str = p;
3739  return TRUE;
3740}
3741
3742/* Miscellaneous. */
3743
3744/* Parse a symbolic operand such as "pow2" at *STR.  ARRAY is an array
3745   of SIZE tokens in which index I gives the token for field value I,
3746   or is null if field value I is invalid.  REG_TYPE says which register
3747   names should be treated as registers rather than as symbolic immediates.
3748
3749   Return true on success, moving *STR past the operand and storing the
3750   field value in *VAL.  */
3751
3752static int
3753parse_enum_string (char **str, int64_t *val, const char *const *array,
3754		   size_t size, aarch64_reg_type reg_type)
3755{
3756  expressionS exp;
3757  char *p, *q;
3758  size_t i;
3759
3760  /* Match C-like tokens.  */
3761  p = q = *str;
3762  while (ISALNUM (*q))
3763    q++;
3764
3765  for (i = 0; i < size; ++i)
3766    if (array[i]
3767	&& strncasecmp (array[i], p, q - p) == 0
3768	&& array[i][q - p] == 0)
3769      {
3770	*val = i;
3771	*str = q;
3772	return TRUE;
3773      }
3774
3775  if (!parse_immediate_expression (&p, &exp, reg_type))
3776    return FALSE;
3777
3778  if (exp.X_op == O_constant
3779      && (uint64_t) exp.X_add_number < size)
3780    {
3781      *val = exp.X_add_number;
3782      *str = p;
3783      return TRUE;
3784    }
3785
3786  /* Use the default error for this operand.  */
3787  return FALSE;
3788}
3789
3790/* Parse an option for a preload instruction.  Returns the encoding for the
3791   option, or PARSE_FAIL.  */
3792
3793static int
3794parse_pldop (char **str)
3795{
3796  char *p, *q;
3797  const struct aarch64_name_value_pair *o;
3798
3799  p = q = *str;
3800  while (ISALNUM (*q))
3801    q++;
3802
3803  o = hash_find_n (aarch64_pldop_hsh, p, q - p);
3804  if (!o)
3805    return PARSE_FAIL;
3806
3807  *str = q;
3808  return o->value;
3809}
3810
3811/* Parse an option for a barrier instruction.  Returns the encoding for the
3812   option, or PARSE_FAIL.  */
3813
3814static int
3815parse_barrier (char **str)
3816{
3817  char *p, *q;
3818  const asm_barrier_opt *o;
3819
3820  p = q = *str;
3821  while (ISALPHA (*q))
3822    q++;
3823
3824  o = hash_find_n (aarch64_barrier_opt_hsh, p, q - p);
3825  if (!o)
3826    return PARSE_FAIL;
3827
3828  *str = q;
3829  return o->value;
3830}
3831
3832/* Parse an operand for a PSB barrier.  Set *HINT_OPT to the hint-option record
3833   return 0 if successful.  Otherwise return PARSE_FAIL.  */
3834
3835static int
3836parse_barrier_psb (char **str,
3837		   const struct aarch64_name_value_pair ** hint_opt)
3838{
3839  char *p, *q;
3840  const struct aarch64_name_value_pair *o;
3841
3842  p = q = *str;
3843  while (ISALPHA (*q))
3844    q++;
3845
3846  o = hash_find_n (aarch64_hint_opt_hsh, p, q - p);
3847  if (!o)
3848    {
3849      set_fatal_syntax_error
3850	( _("unknown or missing option to PSB"));
3851      return PARSE_FAIL;
3852    }
3853
3854  if (o->value != 0x11)
3855    {
3856      /* PSB only accepts option name 'CSYNC'.  */
3857      set_syntax_error
3858	(_("the specified option is not accepted for PSB"));
3859      return PARSE_FAIL;
3860    }
3861
3862  *str = q;
3863  *hint_opt = o;
3864  return 0;
3865}
3866
3867/* Parse a system register or a PSTATE field name for an MSR/MRS instruction.
3868   Returns the encoding for the option, or PARSE_FAIL.
3869
3870   If IMPLE_DEFINED_P is non-zero, the function will also try to parse the
3871   implementation defined system register name S<op0>_<op1>_<Cn>_<Cm>_<op2>.
3872
3873   If PSTATEFIELD_P is non-zero, the function will parse the name as a PSTATE
3874   field, otherwise as a system register.
3875*/
3876
3877static int
3878parse_sys_reg (char **str, struct hash_control *sys_regs,
3879	       int imple_defined_p, int pstatefield_p)
3880{
3881  char *p, *q;
3882  char buf[32];
3883  const aarch64_sys_reg *o;
3884  int value;
3885
3886  p = buf;
3887  for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3888    if (p < buf + 31)
3889      *p++ = TOLOWER (*q);
3890  *p = '\0';
3891  /* Assert that BUF be large enough.  */
3892  gas_assert (p - buf == q - *str);
3893
3894  o = hash_find (sys_regs, buf);
3895  if (!o)
3896    {
3897      if (!imple_defined_p)
3898	return PARSE_FAIL;
3899      else
3900	{
3901	  /* Parse S<op0>_<op1>_<Cn>_<Cm>_<op2>.  */
3902	  unsigned int op0, op1, cn, cm, op2;
3903
3904	  if (sscanf (buf, "s%u_%u_c%u_c%u_%u", &op0, &op1, &cn, &cm, &op2)
3905	      != 5)
3906	    return PARSE_FAIL;
3907	  if (op0 > 3 || op1 > 7 || cn > 15 || cm > 15 || op2 > 7)
3908	    return PARSE_FAIL;
3909	  value = (op0 << 14) | (op1 << 11) | (cn << 7) | (cm << 3) | op2;
3910	}
3911    }
3912  else
3913    {
3914      if (pstatefield_p && !aarch64_pstatefield_supported_p (cpu_variant, o))
3915	as_bad (_("selected processor does not support PSTATE field "
3916		  "name '%s'"), buf);
3917      if (!pstatefield_p && !aarch64_sys_reg_supported_p (cpu_variant, o))
3918	as_bad (_("selected processor does not support system register "
3919		  "name '%s'"), buf);
3920      if (aarch64_sys_reg_deprecated_p (o))
3921	as_warn (_("system register name '%s' is deprecated and may be "
3922		   "removed in a future release"), buf);
3923      value = o->value;
3924    }
3925
3926  *str = q;
3927  return value;
3928}
3929
3930/* Parse a system reg for ic/dc/at/tlbi instructions.  Returns the table entry
3931   for the option, or NULL.  */
3932
3933static const aarch64_sys_ins_reg *
3934parse_sys_ins_reg (char **str, struct hash_control *sys_ins_regs)
3935{
3936  char *p, *q;
3937  char buf[32];
3938  const aarch64_sys_ins_reg *o;
3939
3940  p = buf;
3941  for (q = *str; ISALNUM (*q) || *q == '_'; q++)
3942    if (p < buf + 31)
3943      *p++ = TOLOWER (*q);
3944  *p = '\0';
3945
3946  o = hash_find (sys_ins_regs, buf);
3947  if (!o)
3948    return NULL;
3949
3950  if (!aarch64_sys_ins_reg_supported_p (cpu_variant, o))
3951    as_bad (_("selected processor does not support system register "
3952	      "name '%s'"), buf);
3953
3954  *str = q;
3955  return o;
3956}
3957
3958#define po_char_or_fail(chr) do {				\
3959    if (! skip_past_char (&str, chr))				\
3960      goto failure;						\
3961} while (0)
3962
3963#define po_reg_or_fail(regtype) do {				\
3964    val = aarch64_reg_parse (&str, regtype, &rtype, NULL);	\
3965    if (val == PARSE_FAIL)					\
3966      {								\
3967	set_default_error ();					\
3968	goto failure;						\
3969      }								\
3970  } while (0)
3971
3972#define po_int_reg_or_fail(reg_type) do {			\
3973    reg = aarch64_reg_parse_32_64 (&str, &qualifier);		\
3974    if (!reg || !aarch64_check_reg_type (reg, reg_type))	\
3975      {								\
3976	set_default_error ();					\
3977	goto failure;						\
3978      }								\
3979    info->reg.regno = reg->number;				\
3980    info->qualifier = qualifier;				\
3981  } while (0)
3982
3983#define po_imm_nc_or_fail() do {				\
3984    if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
3985      goto failure;						\
3986  } while (0)
3987
3988#define po_imm_or_fail(min, max) do {				\
3989    if (! parse_constant_immediate (&str, &val, imm_reg_type))	\
3990      goto failure;						\
3991    if (val < min || val > max)					\
3992      {								\
3993	set_fatal_syntax_error (_("immediate value out of range "\
3994#min " to "#max));						\
3995	goto failure;						\
3996      }								\
3997  } while (0)
3998
3999#define po_enum_or_fail(array) do {				\
4000    if (!parse_enum_string (&str, &val, array,			\
4001			    ARRAY_SIZE (array), imm_reg_type))	\
4002      goto failure;						\
4003  } while (0)
4004
4005#define po_misc_or_fail(expr) do {				\
4006    if (!expr)							\
4007      goto failure;						\
4008  } while (0)
4009
4010/* encode the 12-bit imm field of Add/sub immediate */
4011static inline uint32_t
4012encode_addsub_imm (uint32_t imm)
4013{
4014  return imm << 10;
4015}
4016
4017/* encode the shift amount field of Add/sub immediate */
4018static inline uint32_t
4019encode_addsub_imm_shift_amount (uint32_t cnt)
4020{
4021  return cnt << 22;
4022}
4023
4024
4025/* encode the imm field of Adr instruction */
4026static inline uint32_t
4027encode_adr_imm (uint32_t imm)
4028{
4029  return (((imm & 0x3) << 29)	/*  [1:0] -> [30:29] */
4030	  | ((imm & (0x7ffff << 2)) << 3));	/* [20:2] -> [23:5]  */
4031}
4032
4033/* encode the immediate field of Move wide immediate */
4034static inline uint32_t
4035encode_movw_imm (uint32_t imm)
4036{
4037  return imm << 5;
4038}
4039
4040/* encode the 26-bit offset of unconditional branch */
4041static inline uint32_t
4042encode_branch_ofs_26 (uint32_t ofs)
4043{
4044  return ofs & ((1 << 26) - 1);
4045}
4046
4047/* encode the 19-bit offset of conditional branch and compare & branch */
4048static inline uint32_t
4049encode_cond_branch_ofs_19 (uint32_t ofs)
4050{
4051  return (ofs & ((1 << 19) - 1)) << 5;
4052}
4053
4054/* encode the 19-bit offset of ld literal */
4055static inline uint32_t
4056encode_ld_lit_ofs_19 (uint32_t ofs)
4057{
4058  return (ofs & ((1 << 19) - 1)) << 5;
4059}
4060
4061/* Encode the 14-bit offset of test & branch.  */
4062static inline uint32_t
4063encode_tst_branch_ofs_14 (uint32_t ofs)
4064{
4065  return (ofs & ((1 << 14) - 1)) << 5;
4066}
4067
4068/* Encode the 16-bit imm field of svc/hvc/smc.  */
4069static inline uint32_t
4070encode_svc_imm (uint32_t imm)
4071{
4072  return imm << 5;
4073}
4074
4075/* Reencode add(s) to sub(s), or sub(s) to add(s).  */
4076static inline uint32_t
4077reencode_addsub_switch_add_sub (uint32_t opcode)
4078{
4079  return opcode ^ (1 << 30);
4080}
4081
4082static inline uint32_t
4083reencode_movzn_to_movz (uint32_t opcode)
4084{
4085  return opcode | (1 << 30);
4086}
4087
4088static inline uint32_t
4089reencode_movzn_to_movn (uint32_t opcode)
4090{
4091  return opcode & ~(1 << 30);
4092}
4093
4094/* Overall per-instruction processing.	*/
4095
4096/* We need to be able to fix up arbitrary expressions in some statements.
4097   This is so that we can handle symbols that are an arbitrary distance from
4098   the pc.  The most common cases are of the form ((+/-sym -/+ . - 8) & mask),
4099   which returns part of an address in a form which will be valid for
4100   a data instruction.	We do this by pushing the expression into a symbol
4101   in the expr_section, and creating a fix for that.  */
4102
4103static fixS *
4104fix_new_aarch64 (fragS * frag,
4105		 int where,
4106		 short int size, expressionS * exp, int pc_rel, int reloc)
4107{
4108  fixS *new_fix;
4109
4110  switch (exp->X_op)
4111    {
4112    case O_constant:
4113    case O_symbol:
4114    case O_add:
4115    case O_subtract:
4116      new_fix = fix_new_exp (frag, where, size, exp, pc_rel, reloc);
4117      break;
4118
4119    default:
4120      new_fix = fix_new (frag, where, size, make_expr_symbol (exp), 0,
4121			 pc_rel, reloc);
4122      break;
4123    }
4124  return new_fix;
4125}
4126
4127/* Diagnostics on operands errors.  */
4128
4129/* By default, output verbose error message.
4130   Disable the verbose error message by -mno-verbose-error.  */
4131static int verbose_error_p = 1;
4132
4133#ifdef DEBUG_AARCH64
4134/* N.B. this is only for the purpose of debugging.  */
4135const char* operand_mismatch_kind_names[] =
4136{
4137  "AARCH64_OPDE_NIL",
4138  "AARCH64_OPDE_RECOVERABLE",
4139  "AARCH64_OPDE_SYNTAX_ERROR",
4140  "AARCH64_OPDE_FATAL_SYNTAX_ERROR",
4141  "AARCH64_OPDE_INVALID_VARIANT",
4142  "AARCH64_OPDE_OUT_OF_RANGE",
4143  "AARCH64_OPDE_UNALIGNED",
4144  "AARCH64_OPDE_REG_LIST",
4145  "AARCH64_OPDE_OTHER_ERROR",
4146};
4147#endif /* DEBUG_AARCH64 */
4148
4149/* Return TRUE if LHS is of higher severity than RHS, otherwise return FALSE.
4150
4151   When multiple errors of different kinds are found in the same assembly
4152   line, only the error of the highest severity will be picked up for
4153   issuing the diagnostics.  */
4154
4155static inline bfd_boolean
4156operand_error_higher_severity_p (enum aarch64_operand_error_kind lhs,
4157				 enum aarch64_operand_error_kind rhs)
4158{
4159  gas_assert (AARCH64_OPDE_RECOVERABLE > AARCH64_OPDE_NIL);
4160  gas_assert (AARCH64_OPDE_SYNTAX_ERROR > AARCH64_OPDE_RECOVERABLE);
4161  gas_assert (AARCH64_OPDE_FATAL_SYNTAX_ERROR > AARCH64_OPDE_SYNTAX_ERROR);
4162  gas_assert (AARCH64_OPDE_INVALID_VARIANT > AARCH64_OPDE_FATAL_SYNTAX_ERROR);
4163  gas_assert (AARCH64_OPDE_OUT_OF_RANGE > AARCH64_OPDE_INVALID_VARIANT);
4164  gas_assert (AARCH64_OPDE_UNALIGNED > AARCH64_OPDE_OUT_OF_RANGE);
4165  gas_assert (AARCH64_OPDE_REG_LIST > AARCH64_OPDE_UNALIGNED);
4166  gas_assert (AARCH64_OPDE_OTHER_ERROR > AARCH64_OPDE_REG_LIST);
4167  return lhs > rhs;
4168}
4169
4170/* Helper routine to get the mnemonic name from the assembly instruction
4171   line; should only be called for the diagnosis purpose, as there is
4172   string copy operation involved, which may affect the runtime
4173   performance if used in elsewhere.  */
4174
4175static const char*
4176get_mnemonic_name (const char *str)
4177{
4178  static char mnemonic[32];
4179  char *ptr;
4180
4181  /* Get the first 15 bytes and assume that the full name is included.  */
4182  strncpy (mnemonic, str, 31);
4183  mnemonic[31] = '\0';
4184
4185  /* Scan up to the end of the mnemonic, which must end in white space,
4186     '.', or end of string.  */
4187  for (ptr = mnemonic; is_part_of_name(*ptr); ++ptr)
4188    ;
4189
4190  *ptr = '\0';
4191
4192  /* Append '...' to the truncated long name.  */
4193  if (ptr - mnemonic == 31)
4194    mnemonic[28] = mnemonic[29] = mnemonic[30] = '.';
4195
4196  return mnemonic;
4197}
4198
4199static void
4200reset_aarch64_instruction (aarch64_instruction *instruction)
4201{
4202  memset (instruction, '\0', sizeof (aarch64_instruction));
4203  instruction->reloc.type = BFD_RELOC_UNUSED;
4204}
4205
4206/* Data strutures storing one user error in the assembly code related to
4207   operands.  */
4208
4209struct operand_error_record
4210{
4211  const aarch64_opcode *opcode;
4212  aarch64_operand_error detail;
4213  struct operand_error_record *next;
4214};
4215
4216typedef struct operand_error_record operand_error_record;
4217
4218struct operand_errors
4219{
4220  operand_error_record *head;
4221  operand_error_record *tail;
4222};
4223
4224typedef struct operand_errors operand_errors;
4225
4226/* Top-level data structure reporting user errors for the current line of
4227   the assembly code.
4228   The way md_assemble works is that all opcodes sharing the same mnemonic
4229   name are iterated to find a match to the assembly line.  In this data
4230   structure, each of the such opcodes will have one operand_error_record
4231   allocated and inserted.  In other words, excessive errors related with
4232   a single opcode are disregarded.  */
4233operand_errors operand_error_report;
4234
4235/* Free record nodes.  */
4236static operand_error_record *free_opnd_error_record_nodes = NULL;
4237
4238/* Initialize the data structure that stores the operand mismatch
4239   information on assembling one line of the assembly code.  */
4240static void
4241init_operand_error_report (void)
4242{
4243  if (operand_error_report.head != NULL)
4244    {
4245      gas_assert (operand_error_report.tail != NULL);
4246      operand_error_report.tail->next = free_opnd_error_record_nodes;
4247      free_opnd_error_record_nodes = operand_error_report.head;
4248      operand_error_report.head = NULL;
4249      operand_error_report.tail = NULL;
4250      return;
4251    }
4252  gas_assert (operand_error_report.tail == NULL);
4253}
4254
4255/* Return TRUE if some operand error has been recorded during the
4256   parsing of the current assembly line using the opcode *OPCODE;
4257   otherwise return FALSE.  */
4258static inline bfd_boolean
4259opcode_has_operand_error_p (const aarch64_opcode *opcode)
4260{
4261  operand_error_record *record = operand_error_report.head;
4262  return record && record->opcode == opcode;
4263}
4264
4265/* Add the error record *NEW_RECORD to operand_error_report.  The record's
4266   OPCODE field is initialized with OPCODE.
4267   N.B. only one record for each opcode, i.e. the maximum of one error is
4268   recorded for each instruction template.  */
4269
4270static void
4271add_operand_error_record (const operand_error_record* new_record)
4272{
4273  const aarch64_opcode *opcode = new_record->opcode;
4274  operand_error_record* record = operand_error_report.head;
4275
4276  /* The record may have been created for this opcode.  If not, we need
4277     to prepare one.  */
4278  if (! opcode_has_operand_error_p (opcode))
4279    {
4280      /* Get one empty record.  */
4281      if (free_opnd_error_record_nodes == NULL)
4282	{
4283	  record = XNEW (operand_error_record);
4284	}
4285      else
4286	{
4287	  record = free_opnd_error_record_nodes;
4288	  free_opnd_error_record_nodes = record->next;
4289	}
4290      record->opcode = opcode;
4291      /* Insert at the head.  */
4292      record->next = operand_error_report.head;
4293      operand_error_report.head = record;
4294      if (operand_error_report.tail == NULL)
4295	operand_error_report.tail = record;
4296    }
4297  else if (record->detail.kind != AARCH64_OPDE_NIL
4298	   && record->detail.index <= new_record->detail.index
4299	   && operand_error_higher_severity_p (record->detail.kind,
4300					       new_record->detail.kind))
4301    {
4302      /* In the case of multiple errors found on operands related with a
4303	 single opcode, only record the error of the leftmost operand and
4304	 only if the error is of higher severity.  */
4305      DEBUG_TRACE ("error %s on operand %d not added to the report due to"
4306		   " the existing error %s on operand %d",
4307		   operand_mismatch_kind_names[new_record->detail.kind],
4308		   new_record->detail.index,
4309		   operand_mismatch_kind_names[record->detail.kind],
4310		   record->detail.index);
4311      return;
4312    }
4313
4314  record->detail = new_record->detail;
4315}
4316
4317static inline void
4318record_operand_error_info (const aarch64_opcode *opcode,
4319			   aarch64_operand_error *error_info)
4320{
4321  operand_error_record record;
4322  record.opcode = opcode;
4323  record.detail = *error_info;
4324  add_operand_error_record (&record);
4325}
4326
4327/* Record an error of kind KIND and, if ERROR is not NULL, of the detailed
4328   error message *ERROR, for operand IDX (count from 0).  */
4329
4330static void
4331record_operand_error (const aarch64_opcode *opcode, int idx,
4332		      enum aarch64_operand_error_kind kind,
4333		      const char* error)
4334{
4335  aarch64_operand_error info;
4336  memset(&info, 0, sizeof (info));
4337  info.index = idx;
4338  info.kind = kind;
4339  info.error = error;
4340  record_operand_error_info (opcode, &info);
4341}
4342
4343static void
4344record_operand_error_with_data (const aarch64_opcode *opcode, int idx,
4345				enum aarch64_operand_error_kind kind,
4346				const char* error, const int *extra_data)
4347{
4348  aarch64_operand_error info;
4349  info.index = idx;
4350  info.kind = kind;
4351  info.error = error;
4352  info.data[0] = extra_data[0];
4353  info.data[1] = extra_data[1];
4354  info.data[2] = extra_data[2];
4355  record_operand_error_info (opcode, &info);
4356}
4357
4358static void
4359record_operand_out_of_range_error (const aarch64_opcode *opcode, int idx,
4360				   const char* error, int lower_bound,
4361				   int upper_bound)
4362{
4363  int data[3] = {lower_bound, upper_bound, 0};
4364  record_operand_error_with_data (opcode, idx, AARCH64_OPDE_OUT_OF_RANGE,
4365				  error, data);
4366}
4367
4368/* Remove the operand error record for *OPCODE.  */
4369static void ATTRIBUTE_UNUSED
4370remove_operand_error_record (const aarch64_opcode *opcode)
4371{
4372  if (opcode_has_operand_error_p (opcode))
4373    {
4374      operand_error_record* record = operand_error_report.head;
4375      gas_assert (record != NULL && operand_error_report.tail != NULL);
4376      operand_error_report.head = record->next;
4377      record->next = free_opnd_error_record_nodes;
4378      free_opnd_error_record_nodes = record;
4379      if (operand_error_report.head == NULL)
4380	{
4381	  gas_assert (operand_error_report.tail == record);
4382	  operand_error_report.tail = NULL;
4383	}
4384    }
4385}
4386
4387/* Given the instruction in *INSTR, return the index of the best matched
4388   qualifier sequence in the list (an array) headed by QUALIFIERS_LIST.
4389
4390   Return -1 if there is no qualifier sequence; return the first match
4391   if there is multiple matches found.  */
4392
4393static int
4394find_best_match (const aarch64_inst *instr,
4395		 const aarch64_opnd_qualifier_seq_t *qualifiers_list)
4396{
4397  int i, num_opnds, max_num_matched, idx;
4398
4399  num_opnds = aarch64_num_of_operands (instr->opcode);
4400  if (num_opnds == 0)
4401    {
4402      DEBUG_TRACE ("no operand");
4403      return -1;
4404    }
4405
4406  max_num_matched = 0;
4407  idx = 0;
4408
4409  /* For each pattern.  */
4410  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4411    {
4412      int j, num_matched;
4413      const aarch64_opnd_qualifier_t *qualifiers = *qualifiers_list;
4414
4415      /* Most opcodes has much fewer patterns in the list.  */
4416      if (empty_qualifier_sequence_p (qualifiers) == TRUE)
4417	{
4418	  DEBUG_TRACE_IF (i == 0, "empty list of qualifier sequence");
4419	  break;
4420	}
4421
4422      for (j = 0, num_matched = 0; j < num_opnds; ++j, ++qualifiers)
4423	if (*qualifiers == instr->operands[j].qualifier)
4424	  ++num_matched;
4425
4426      if (num_matched > max_num_matched)
4427	{
4428	  max_num_matched = num_matched;
4429	  idx = i;
4430	}
4431    }
4432
4433  DEBUG_TRACE ("return with %d", idx);
4434  return idx;
4435}
4436
4437/* Assign qualifiers in the qualifier seqence (headed by QUALIFIERS) to the
4438   corresponding operands in *INSTR.  */
4439
4440static inline void
4441assign_qualifier_sequence (aarch64_inst *instr,
4442			   const aarch64_opnd_qualifier_t *qualifiers)
4443{
4444  int i = 0;
4445  int num_opnds = aarch64_num_of_operands (instr->opcode);
4446  gas_assert (num_opnds);
4447  for (i = 0; i < num_opnds; ++i, ++qualifiers)
4448    instr->operands[i].qualifier = *qualifiers;
4449}
4450
4451/* Print operands for the diagnosis purpose.  */
4452
4453static void
4454print_operands (char *buf, const aarch64_opcode *opcode,
4455		const aarch64_opnd_info *opnds)
4456{
4457  int i;
4458
4459  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
4460    {
4461      char str[128];
4462
4463      /* We regard the opcode operand info more, however we also look into
4464	 the inst->operands to support the disassembling of the optional
4465	 operand.
4466	 The two operand code should be the same in all cases, apart from
4467	 when the operand can be optional.  */
4468      if (opcode->operands[i] == AARCH64_OPND_NIL
4469	  || opnds[i].type == AARCH64_OPND_NIL)
4470	break;
4471
4472      /* Generate the operand string in STR.  */
4473      aarch64_print_operand (str, sizeof (str), 0, opcode, opnds, i, NULL, NULL);
4474
4475      /* Delimiter.  */
4476      if (str[0] != '\0')
4477	strcat (buf, i == 0 ? " " : ", ");
4478
4479      /* Append the operand string.  */
4480      strcat (buf, str);
4481    }
4482}
4483
4484/* Send to stderr a string as information.  */
4485
4486static void
4487output_info (const char *format, ...)
4488{
4489  const char *file;
4490  unsigned int line;
4491  va_list args;
4492
4493  file = as_where (&line);
4494  if (file)
4495    {
4496      if (line != 0)
4497	fprintf (stderr, "%s:%u: ", file, line);
4498      else
4499	fprintf (stderr, "%s: ", file);
4500    }
4501  fprintf (stderr, _("Info: "));
4502  va_start (args, format);
4503  vfprintf (stderr, format, args);
4504  va_end (args);
4505  (void) putc ('\n', stderr);
4506}
4507
4508/* Output one operand error record.  */
4509
4510static void
4511output_operand_error_record (const operand_error_record *record, char *str)
4512{
4513  const aarch64_operand_error *detail = &record->detail;
4514  int idx = detail->index;
4515  const aarch64_opcode *opcode = record->opcode;
4516  enum aarch64_opnd opd_code = (idx >= 0 ? opcode->operands[idx]
4517				: AARCH64_OPND_NIL);
4518
4519  switch (detail->kind)
4520    {
4521    case AARCH64_OPDE_NIL:
4522      gas_assert (0);
4523      break;
4524
4525    case AARCH64_OPDE_SYNTAX_ERROR:
4526    case AARCH64_OPDE_RECOVERABLE:
4527    case AARCH64_OPDE_FATAL_SYNTAX_ERROR:
4528    case AARCH64_OPDE_OTHER_ERROR:
4529      /* Use the prepared error message if there is, otherwise use the
4530	 operand description string to describe the error.  */
4531      if (detail->error != NULL)
4532	{
4533	  if (idx < 0)
4534	    as_bad (_("%s -- `%s'"), detail->error, str);
4535	  else
4536	    as_bad (_("%s at operand %d -- `%s'"),
4537		    detail->error, idx + 1, str);
4538	}
4539      else
4540	{
4541	  gas_assert (idx >= 0);
4542	  as_bad (_("operand %d must be %s -- `%s'"), idx + 1,
4543		aarch64_get_operand_desc (opd_code), str);
4544	}
4545      break;
4546
4547    case AARCH64_OPDE_INVALID_VARIANT:
4548      as_bad (_("operand mismatch -- `%s'"), str);
4549      if (verbose_error_p)
4550	{
4551	  /* We will try to correct the erroneous instruction and also provide
4552	     more information e.g. all other valid variants.
4553
4554	     The string representation of the corrected instruction and other
4555	     valid variants are generated by
4556
4557	     1) obtaining the intermediate representation of the erroneous
4558	     instruction;
4559	     2) manipulating the IR, e.g. replacing the operand qualifier;
4560	     3) printing out the instruction by calling the printer functions
4561	     shared with the disassembler.
4562
4563	     The limitation of this method is that the exact input assembly
4564	     line cannot be accurately reproduced in some cases, for example an
4565	     optional operand present in the actual assembly line will be
4566	     omitted in the output; likewise for the optional syntax rules,
4567	     e.g. the # before the immediate.  Another limitation is that the
4568	     assembly symbols and relocation operations in the assembly line
4569	     currently cannot be printed out in the error report.  Last but not
4570	     least, when there is other error(s) co-exist with this error, the
4571	     'corrected' instruction may be still incorrect, e.g.  given
4572	       'ldnp h0,h1,[x0,#6]!'
4573	     this diagnosis will provide the version:
4574	       'ldnp s0,s1,[x0,#6]!'
4575	     which is still not right.  */
4576	  size_t len = strlen (get_mnemonic_name (str));
4577	  int i, qlf_idx;
4578	  bfd_boolean result;
4579	  char buf[2048];
4580	  aarch64_inst *inst_base = &inst.base;
4581	  const aarch64_opnd_qualifier_seq_t *qualifiers_list;
4582
4583	  /* Init inst.  */
4584	  reset_aarch64_instruction (&inst);
4585	  inst_base->opcode = opcode;
4586
4587	  /* Reset the error report so that there is no side effect on the
4588	     following operand parsing.  */
4589	  init_operand_error_report ();
4590
4591	  /* Fill inst.  */
4592	  result = parse_operands (str + len, opcode)
4593	    && programmer_friendly_fixup (&inst);
4594	  gas_assert (result);
4595	  result = aarch64_opcode_encode (opcode, inst_base, &inst_base->value,
4596					  NULL, NULL);
4597	  gas_assert (!result);
4598
4599	  /* Find the most matched qualifier sequence.  */
4600	  qlf_idx = find_best_match (inst_base, opcode->qualifiers_list);
4601	  gas_assert (qlf_idx > -1);
4602
4603	  /* Assign the qualifiers.  */
4604	  assign_qualifier_sequence (inst_base,
4605				     opcode->qualifiers_list[qlf_idx]);
4606
4607	  /* Print the hint.  */
4608	  output_info (_("   did you mean this?"));
4609	  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4610	  print_operands (buf, opcode, inst_base->operands);
4611	  output_info (_("   %s"), buf);
4612
4613	  /* Print out other variant(s) if there is any.  */
4614	  if (qlf_idx != 0 ||
4615	      !empty_qualifier_sequence_p (opcode->qualifiers_list[1]))
4616	    output_info (_("   other valid variant(s):"));
4617
4618	  /* For each pattern.  */
4619	  qualifiers_list = opcode->qualifiers_list;
4620	  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
4621	    {
4622	      /* Most opcodes has much fewer patterns in the list.
4623		 First NIL qualifier indicates the end in the list.   */
4624	      if (empty_qualifier_sequence_p (*qualifiers_list) == TRUE)
4625		break;
4626
4627	      if (i != qlf_idx)
4628		{
4629		  /* Mnemonics name.  */
4630		  snprintf (buf, sizeof (buf), "\t%s", get_mnemonic_name (str));
4631
4632		  /* Assign the qualifiers.  */
4633		  assign_qualifier_sequence (inst_base, *qualifiers_list);
4634
4635		  /* Print instruction.  */
4636		  print_operands (buf, opcode, inst_base->operands);
4637
4638		  output_info (_("   %s"), buf);
4639		}
4640	    }
4641	}
4642      break;
4643
4644    case AARCH64_OPDE_UNTIED_OPERAND:
4645      as_bad (_("operand %d must be the same register as operand 1 -- `%s'"),
4646	      detail->index + 1, str);
4647      break;
4648
4649    case AARCH64_OPDE_OUT_OF_RANGE:
4650      if (detail->data[0] != detail->data[1])
4651	as_bad (_("%s out of range %d to %d at operand %d -- `%s'"),
4652		detail->error ? detail->error : _("immediate value"),
4653		detail->data[0], detail->data[1], idx + 1, str);
4654      else
4655	as_bad (_("%s must be %d at operand %d -- `%s'"),
4656		detail->error ? detail->error : _("immediate value"),
4657		detail->data[0], idx + 1, str);
4658      break;
4659
4660    case AARCH64_OPDE_REG_LIST:
4661      if (detail->data[0] == 1)
4662	as_bad (_("invalid number of registers in the list; "
4663		  "only 1 register is expected at operand %d -- `%s'"),
4664		idx + 1, str);
4665      else
4666	as_bad (_("invalid number of registers in the list; "
4667		  "%d registers are expected at operand %d -- `%s'"),
4668	      detail->data[0], idx + 1, str);
4669      break;
4670
4671    case AARCH64_OPDE_UNALIGNED:
4672      as_bad (_("immediate value must be a multiple of "
4673		"%d at operand %d -- `%s'"),
4674	      detail->data[0], idx + 1, str);
4675      break;
4676
4677    default:
4678      gas_assert (0);
4679      break;
4680    }
4681}
4682
4683/* Process and output the error message about the operand mismatching.
4684
4685   When this function is called, the operand error information had
4686   been collected for an assembly line and there will be multiple
4687   errors in the case of mulitple instruction templates; output the
4688   error message that most closely describes the problem.  */
4689
4690static void
4691output_operand_error_report (char *str)
4692{
4693  int largest_error_pos;
4694  const char *msg = NULL;
4695  enum aarch64_operand_error_kind kind;
4696  operand_error_record *curr;
4697  operand_error_record *head = operand_error_report.head;
4698  operand_error_record *record = NULL;
4699
4700  /* No error to report.  */
4701  if (head == NULL)
4702    return;
4703
4704  gas_assert (head != NULL && operand_error_report.tail != NULL);
4705
4706  /* Only one error.  */
4707  if (head == operand_error_report.tail)
4708    {
4709      DEBUG_TRACE ("single opcode entry with error kind: %s",
4710		   operand_mismatch_kind_names[head->detail.kind]);
4711      output_operand_error_record (head, str);
4712      return;
4713    }
4714
4715  /* Find the error kind of the highest severity.  */
4716  DEBUG_TRACE ("multiple opcode entres with error kind");
4717  kind = AARCH64_OPDE_NIL;
4718  for (curr = head; curr != NULL; curr = curr->next)
4719    {
4720      gas_assert (curr->detail.kind != AARCH64_OPDE_NIL);
4721      DEBUG_TRACE ("\t%s", operand_mismatch_kind_names[curr->detail.kind]);
4722      if (operand_error_higher_severity_p (curr->detail.kind, kind))
4723	kind = curr->detail.kind;
4724    }
4725  gas_assert (kind != AARCH64_OPDE_NIL);
4726
4727  /* Pick up one of errors of KIND to report.  */
4728  largest_error_pos = -2; /* Index can be -1 which means unknown index.  */
4729  for (curr = head; curr != NULL; curr = curr->next)
4730    {
4731      if (curr->detail.kind != kind)
4732	continue;
4733      /* If there are multiple errors, pick up the one with the highest
4734	 mismatching operand index.  In the case of multiple errors with
4735	 the equally highest operand index, pick up the first one or the
4736	 first one with non-NULL error message.  */
4737      if (curr->detail.index > largest_error_pos
4738	  || (curr->detail.index == largest_error_pos && msg == NULL
4739	      && curr->detail.error != NULL))
4740	{
4741	  largest_error_pos = curr->detail.index;
4742	  record = curr;
4743	  msg = record->detail.error;
4744	}
4745    }
4746
4747  gas_assert (largest_error_pos != -2 && record != NULL);
4748  DEBUG_TRACE ("Pick up error kind %s to report",
4749	       operand_mismatch_kind_names[record->detail.kind]);
4750
4751  /* Output.  */
4752  output_operand_error_record (record, str);
4753}
4754
4755/* Write an AARCH64 instruction to buf - always little-endian.  */
4756static void
4757put_aarch64_insn (char *buf, uint32_t insn)
4758{
4759  unsigned char *where = (unsigned char *) buf;
4760  where[0] = insn;
4761  where[1] = insn >> 8;
4762  where[2] = insn >> 16;
4763  where[3] = insn >> 24;
4764}
4765
4766static uint32_t
4767get_aarch64_insn (char *buf)
4768{
4769  unsigned char *where = (unsigned char *) buf;
4770  uint32_t result;
4771  result = (where[0] | (where[1] << 8) | (where[2] << 16) | (where[3] << 24));
4772  return result;
4773}
4774
4775static void
4776output_inst (struct aarch64_inst *new_inst)
4777{
4778  char *to = NULL;
4779
4780  to = frag_more (INSN_SIZE);
4781
4782  frag_now->tc_frag_data.recorded = 1;
4783
4784  put_aarch64_insn (to, inst.base.value);
4785
4786  if (inst.reloc.type != BFD_RELOC_UNUSED)
4787    {
4788      fixS *fixp = fix_new_aarch64 (frag_now, to - frag_now->fr_literal,
4789				    INSN_SIZE, &inst.reloc.exp,
4790				    inst.reloc.pc_rel,
4791				    inst.reloc.type);
4792      DEBUG_TRACE ("Prepared relocation fix up");
4793      /* Don't check the addend value against the instruction size,
4794         that's the job of our code in md_apply_fix(). */
4795      fixp->fx_no_overflow = 1;
4796      if (new_inst != NULL)
4797	fixp->tc_fix_data.inst = new_inst;
4798      if (aarch64_gas_internal_fixup_p ())
4799	{
4800	  gas_assert (inst.reloc.opnd != AARCH64_OPND_NIL);
4801	  fixp->tc_fix_data.opnd = inst.reloc.opnd;
4802	  fixp->fx_addnumber = inst.reloc.flags;
4803	}
4804    }
4805
4806  dwarf2_emit_insn (INSN_SIZE);
4807}
4808
4809/* Link together opcodes of the same name.  */
4810
4811struct templates
4812{
4813  aarch64_opcode *opcode;
4814  struct templates *next;
4815};
4816
4817typedef struct templates templates;
4818
4819static templates *
4820lookup_mnemonic (const char *start, int len)
4821{
4822  templates *templ = NULL;
4823
4824  templ = hash_find_n (aarch64_ops_hsh, start, len);
4825  return templ;
4826}
4827
4828/* Subroutine of md_assemble, responsible for looking up the primary
4829   opcode from the mnemonic the user wrote.  STR points to the
4830   beginning of the mnemonic. */
4831
4832static templates *
4833opcode_lookup (char **str)
4834{
4835  char *end, *base, *dot;
4836  const aarch64_cond *cond;
4837  char condname[16];
4838  int len;
4839
4840  /* Scan up to the end of the mnemonic, which must end in white space,
4841     '.', or end of string.  */
4842  dot = 0;
4843  for (base = end = *str; is_part_of_name(*end); end++)
4844    if (*end == '.' && !dot)
4845      dot = end;
4846
4847  if (end == base || dot == base)
4848    return 0;
4849
4850  inst.cond = COND_ALWAYS;
4851
4852  /* Handle a possible condition.  */
4853  if (dot)
4854    {
4855      cond = hash_find_n (aarch64_cond_hsh, dot + 1, end - dot - 1);
4856      if (cond)
4857	{
4858	  inst.cond = cond->value;
4859	  *str = end;
4860	}
4861      else
4862	{
4863	  *str = dot;
4864	  return 0;
4865	}
4866      len = dot - base;
4867    }
4868  else
4869    {
4870      *str = end;
4871      len = end - base;
4872    }
4873
4874  if (inst.cond == COND_ALWAYS)
4875    {
4876      /* Look for unaffixed mnemonic.  */
4877      return lookup_mnemonic (base, len);
4878    }
4879  else if (len <= 13)
4880    {
4881      /* append ".c" to mnemonic if conditional */
4882      memcpy (condname, base, len);
4883      memcpy (condname + len, ".c", 2);
4884      base = condname;
4885      len += 2;
4886      return lookup_mnemonic (base, len);
4887    }
4888
4889  return NULL;
4890}
4891
4892/* Internal helper routine converting a vector_type_el structure *VECTYPE
4893   to a corresponding operand qualifier.  */
4894
4895static inline aarch64_opnd_qualifier_t
4896vectype_to_qualifier (const struct vector_type_el *vectype)
4897{
4898  /* Element size in bytes indexed by vector_el_type.  */
4899  const unsigned char ele_size[5]
4900    = {1, 2, 4, 8, 16};
4901  const unsigned int ele_base [5] =
4902    {
4903      AARCH64_OPND_QLF_V_8B,
4904      AARCH64_OPND_QLF_V_2H,
4905      AARCH64_OPND_QLF_V_2S,
4906      AARCH64_OPND_QLF_V_1D,
4907      AARCH64_OPND_QLF_V_1Q
4908  };
4909
4910  if (!vectype->defined || vectype->type == NT_invtype)
4911    goto vectype_conversion_fail;
4912
4913  if (vectype->type == NT_zero)
4914    return AARCH64_OPND_QLF_P_Z;
4915  if (vectype->type == NT_merge)
4916    return AARCH64_OPND_QLF_P_M;
4917
4918  gas_assert (vectype->type >= NT_b && vectype->type <= NT_q);
4919
4920  if (vectype->defined & (NTA_HASINDEX | NTA_HASVARWIDTH))
4921    /* Vector element register.  */
4922    return AARCH64_OPND_QLF_S_B + vectype->type;
4923  else
4924    {
4925      /* Vector register.  */
4926      int reg_size = ele_size[vectype->type] * vectype->width;
4927      unsigned offset;
4928      unsigned shift;
4929      if (reg_size != 16 && reg_size != 8 && reg_size != 4)
4930	goto vectype_conversion_fail;
4931
4932      /* The conversion is by calculating the offset from the base operand
4933	 qualifier for the vector type.  The operand qualifiers are regular
4934	 enough that the offset can established by shifting the vector width by
4935	 a vector-type dependent amount.  */
4936      shift = 0;
4937      if (vectype->type == NT_b)
4938	shift = 4;
4939      else if (vectype->type == NT_h || vectype->type == NT_s)
4940	shift = 2;
4941      else if (vectype->type >= NT_d)
4942	shift = 1;
4943      else
4944	gas_assert (0);
4945
4946      offset = ele_base [vectype->type] + (vectype->width >> shift);
4947      gas_assert (AARCH64_OPND_QLF_V_8B <= offset
4948		  && offset <= AARCH64_OPND_QLF_V_1Q);
4949      return offset;
4950    }
4951
4952vectype_conversion_fail:
4953  first_error (_("bad vector arrangement type"));
4954  return AARCH64_OPND_QLF_NIL;
4955}
4956
4957/* Process an optional operand that is found omitted from the assembly line.
4958   Fill *OPERAND for such an operand of type TYPE.  OPCODE points to the
4959   instruction's opcode entry while IDX is the index of this omitted operand.
4960   */
4961
4962static void
4963process_omitted_operand (enum aarch64_opnd type, const aarch64_opcode *opcode,
4964			 int idx, aarch64_opnd_info *operand)
4965{
4966  aarch64_insn default_value = get_optional_operand_default_value (opcode);
4967  gas_assert (optional_operand_p (opcode, idx));
4968  gas_assert (!operand->present);
4969
4970  switch (type)
4971    {
4972    case AARCH64_OPND_Rd:
4973    case AARCH64_OPND_Rn:
4974    case AARCH64_OPND_Rm:
4975    case AARCH64_OPND_Rt:
4976    case AARCH64_OPND_Rt2:
4977    case AARCH64_OPND_Rs:
4978    case AARCH64_OPND_Ra:
4979    case AARCH64_OPND_Rt_SYS:
4980    case AARCH64_OPND_Rd_SP:
4981    case AARCH64_OPND_Rn_SP:
4982    case AARCH64_OPND_Rm_SP:
4983    case AARCH64_OPND_Fd:
4984    case AARCH64_OPND_Fn:
4985    case AARCH64_OPND_Fm:
4986    case AARCH64_OPND_Fa:
4987    case AARCH64_OPND_Ft:
4988    case AARCH64_OPND_Ft2:
4989    case AARCH64_OPND_Sd:
4990    case AARCH64_OPND_Sn:
4991    case AARCH64_OPND_Sm:
4992    case AARCH64_OPND_Vd:
4993    case AARCH64_OPND_Vn:
4994    case AARCH64_OPND_Vm:
4995    case AARCH64_OPND_VdD1:
4996    case AARCH64_OPND_VnD1:
4997      operand->reg.regno = default_value;
4998      break;
4999
5000    case AARCH64_OPND_Ed:
5001    case AARCH64_OPND_En:
5002    case AARCH64_OPND_Em:
5003      operand->reglane.regno = default_value;
5004      break;
5005
5006    case AARCH64_OPND_IDX:
5007    case AARCH64_OPND_BIT_NUM:
5008    case AARCH64_OPND_IMMR:
5009    case AARCH64_OPND_IMMS:
5010    case AARCH64_OPND_SHLL_IMM:
5011    case AARCH64_OPND_IMM_VLSL:
5012    case AARCH64_OPND_IMM_VLSR:
5013    case AARCH64_OPND_CCMP_IMM:
5014    case AARCH64_OPND_FBITS:
5015    case AARCH64_OPND_UIMM4:
5016    case AARCH64_OPND_UIMM3_OP1:
5017    case AARCH64_OPND_UIMM3_OP2:
5018    case AARCH64_OPND_IMM:
5019    case AARCH64_OPND_WIDTH:
5020    case AARCH64_OPND_UIMM7:
5021    case AARCH64_OPND_NZCV:
5022    case AARCH64_OPND_SVE_PATTERN:
5023    case AARCH64_OPND_SVE_PRFOP:
5024      operand->imm.value = default_value;
5025      break;
5026
5027    case AARCH64_OPND_SVE_PATTERN_SCALED:
5028      operand->imm.value = default_value;
5029      operand->shifter.kind = AARCH64_MOD_MUL;
5030      operand->shifter.amount = 1;
5031      break;
5032
5033    case AARCH64_OPND_EXCEPTION:
5034      inst.reloc.type = BFD_RELOC_UNUSED;
5035      break;
5036
5037    case AARCH64_OPND_BARRIER_ISB:
5038      operand->barrier = aarch64_barrier_options + default_value;
5039
5040    default:
5041      break;
5042    }
5043}
5044
5045/* Process the relocation type for move wide instructions.
5046   Return TRUE on success; otherwise return FALSE.  */
5047
5048static bfd_boolean
5049process_movw_reloc_info (void)
5050{
5051  int is32;
5052  unsigned shift;
5053
5054  is32 = inst.base.operands[0].qualifier == AARCH64_OPND_QLF_W ? 1 : 0;
5055
5056  if (inst.base.opcode->op == OP_MOVK)
5057    switch (inst.reloc.type)
5058      {
5059      case BFD_RELOC_AARCH64_MOVW_G0_S:
5060      case BFD_RELOC_AARCH64_MOVW_G1_S:
5061      case BFD_RELOC_AARCH64_MOVW_G2_S:
5062      case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5063      case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5064      case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5065      case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5066	set_syntax_error
5067	  (_("the specified relocation type is not allowed for MOVK"));
5068	return FALSE;
5069      default:
5070	break;
5071      }
5072
5073  switch (inst.reloc.type)
5074    {
5075    case BFD_RELOC_AARCH64_MOVW_G0:
5076    case BFD_RELOC_AARCH64_MOVW_G0_NC:
5077    case BFD_RELOC_AARCH64_MOVW_G0_S:
5078    case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
5079    case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
5080    case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
5081    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
5082    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
5083    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
5084    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
5085    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
5086      shift = 0;
5087      break;
5088    case BFD_RELOC_AARCH64_MOVW_G1:
5089    case BFD_RELOC_AARCH64_MOVW_G1_NC:
5090    case BFD_RELOC_AARCH64_MOVW_G1_S:
5091    case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
5092    case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
5093    case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
5094    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
5095    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
5096    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
5097    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
5098    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
5099      shift = 16;
5100      break;
5101    case BFD_RELOC_AARCH64_MOVW_G2:
5102    case BFD_RELOC_AARCH64_MOVW_G2_NC:
5103    case BFD_RELOC_AARCH64_MOVW_G2_S:
5104    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
5105    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
5106      if (is32)
5107	{
5108	  set_fatal_syntax_error
5109	    (_("the specified relocation type is not allowed for 32-bit "
5110	       "register"));
5111	  return FALSE;
5112	}
5113      shift = 32;
5114      break;
5115    case BFD_RELOC_AARCH64_MOVW_G3:
5116      if (is32)
5117	{
5118	  set_fatal_syntax_error
5119	    (_("the specified relocation type is not allowed for 32-bit "
5120	       "register"));
5121	  return FALSE;
5122	}
5123      shift = 48;
5124      break;
5125    default:
5126      /* More cases should be added when more MOVW-related relocation types
5127         are supported in GAS.  */
5128      gas_assert (aarch64_gas_internal_fixup_p ());
5129      /* The shift amount should have already been set by the parser.  */
5130      return TRUE;
5131    }
5132  inst.base.operands[1].shifter.amount = shift;
5133  return TRUE;
5134}
5135
5136/* A primitive log caculator.  */
5137
5138static inline unsigned int
5139get_logsz (unsigned int size)
5140{
5141  const unsigned char ls[16] =
5142    {0, 1, -1, 2, -1, -1, -1, 3, -1, -1, -1, -1, -1, -1, -1, 4};
5143  if (size > 16)
5144    {
5145      gas_assert (0);
5146      return -1;
5147    }
5148  gas_assert (ls[size - 1] != (unsigned char)-1);
5149  return ls[size - 1];
5150}
5151
5152/* Determine and return the real reloc type code for an instruction
5153   with the pseudo reloc type code BFD_RELOC_AARCH64_LDST_LO12.  */
5154
5155static inline bfd_reloc_code_real_type
5156ldst_lo12_determine_real_reloc_type (void)
5157{
5158  unsigned logsz;
5159  enum aarch64_opnd_qualifier opd0_qlf = inst.base.operands[0].qualifier;
5160  enum aarch64_opnd_qualifier opd1_qlf = inst.base.operands[1].qualifier;
5161
5162  const bfd_reloc_code_real_type reloc_ldst_lo12[3][5] = {
5163    {
5164      BFD_RELOC_AARCH64_LDST8_LO12,
5165      BFD_RELOC_AARCH64_LDST16_LO12,
5166      BFD_RELOC_AARCH64_LDST32_LO12,
5167      BFD_RELOC_AARCH64_LDST64_LO12,
5168      BFD_RELOC_AARCH64_LDST128_LO12
5169    },
5170    {
5171      BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12,
5172      BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12,
5173      BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12,
5174      BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12,
5175      BFD_RELOC_AARCH64_NONE
5176    },
5177    {
5178      BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC,
5179      BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC,
5180      BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC,
5181      BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC,
5182      BFD_RELOC_AARCH64_NONE
5183    }
5184  };
5185
5186  gas_assert (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
5187	      || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5188	      || (inst.reloc.type
5189		  == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC));
5190  gas_assert (inst.base.opcode->operands[1] == AARCH64_OPND_ADDR_UIMM12);
5191
5192  if (opd1_qlf == AARCH64_OPND_QLF_NIL)
5193    opd1_qlf =
5194      aarch64_get_expected_qualifier (inst.base.opcode->qualifiers_list,
5195				      1, opd0_qlf, 0);
5196  gas_assert (opd1_qlf != AARCH64_OPND_QLF_NIL);
5197
5198  logsz = get_logsz (aarch64_get_qualifier_esize (opd1_qlf));
5199  if (inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12
5200      || inst.reloc.type == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC)
5201    gas_assert (logsz <= 3);
5202  else
5203    gas_assert (logsz <= 4);
5204
5205  /* In reloc.c, these pseudo relocation types should be defined in similar
5206     order as above reloc_ldst_lo12 array. Because the array index calcuation
5207     below relies on this.  */
5208  return reloc_ldst_lo12[inst.reloc.type - BFD_RELOC_AARCH64_LDST_LO12][logsz];
5209}
5210
5211/* Check whether a register list REGINFO is valid.  The registers must be
5212   numbered in increasing order (modulo 32), in increments of one or two.
5213
5214   If ACCEPT_ALTERNATE is non-zero, the register numbers should be in
5215   increments of two.
5216
5217   Return FALSE if such a register list is invalid, otherwise return TRUE.  */
5218
5219static bfd_boolean
5220reg_list_valid_p (uint32_t reginfo, int accept_alternate)
5221{
5222  uint32_t i, nb_regs, prev_regno, incr;
5223
5224  nb_regs = 1 + (reginfo & 0x3);
5225  reginfo >>= 2;
5226  prev_regno = reginfo & 0x1f;
5227  incr = accept_alternate ? 2 : 1;
5228
5229  for (i = 1; i < nb_regs; ++i)
5230    {
5231      uint32_t curr_regno;
5232      reginfo >>= 5;
5233      curr_regno = reginfo & 0x1f;
5234      if (curr_regno != ((prev_regno + incr) & 0x1f))
5235	return FALSE;
5236      prev_regno = curr_regno;
5237    }
5238
5239  return TRUE;
5240}
5241
5242/* Generic instruction operand parser.	This does no encoding and no
5243   semantic validation; it merely squirrels values away in the inst
5244   structure.  Returns TRUE or FALSE depending on whether the
5245   specified grammar matched.  */
5246
5247static bfd_boolean
5248parse_operands (char *str, const aarch64_opcode *opcode)
5249{
5250  int i;
5251  char *backtrack_pos = 0;
5252  const enum aarch64_opnd *operands = opcode->operands;
5253  aarch64_reg_type imm_reg_type;
5254
5255  clear_error ();
5256  skip_whitespace (str);
5257
5258  if (AARCH64_CPU_HAS_FEATURE (AARCH64_FEATURE_SVE, *opcode->avariant))
5259    imm_reg_type = REG_TYPE_R_Z_BHSDQ_VZP;
5260  else
5261    imm_reg_type = REG_TYPE_R_Z_BHSDQ_V;
5262
5263  for (i = 0; operands[i] != AARCH64_OPND_NIL; i++)
5264    {
5265      int64_t val;
5266      const reg_entry *reg;
5267      int comma_skipped_p = 0;
5268      aarch64_reg_type rtype;
5269      struct vector_type_el vectype;
5270      aarch64_opnd_qualifier_t qualifier, base_qualifier, offset_qualifier;
5271      aarch64_opnd_info *info = &inst.base.operands[i];
5272      aarch64_reg_type reg_type;
5273
5274      DEBUG_TRACE ("parse operand %d", i);
5275
5276      /* Assign the operand code.  */
5277      info->type = operands[i];
5278
5279      if (optional_operand_p (opcode, i))
5280	{
5281	  /* Remember where we are in case we need to backtrack.  */
5282	  gas_assert (!backtrack_pos);
5283	  backtrack_pos = str;
5284	}
5285
5286      /* Expect comma between operands; the backtrack mechanizm will take
5287	 care of cases of omitted optional operand.  */
5288      if (i > 0 && ! skip_past_char (&str, ','))
5289	{
5290	  set_syntax_error (_("comma expected between operands"));
5291	  goto failure;
5292	}
5293      else
5294	comma_skipped_p = 1;
5295
5296      switch (operands[i])
5297	{
5298	case AARCH64_OPND_Rd:
5299	case AARCH64_OPND_Rn:
5300	case AARCH64_OPND_Rm:
5301	case AARCH64_OPND_Rt:
5302	case AARCH64_OPND_Rt2:
5303	case AARCH64_OPND_Rs:
5304	case AARCH64_OPND_Ra:
5305	case AARCH64_OPND_Rt_SYS:
5306	case AARCH64_OPND_PAIRREG:
5307	case AARCH64_OPND_SVE_Rm:
5308	  po_int_reg_or_fail (REG_TYPE_R_Z);
5309	  break;
5310
5311	case AARCH64_OPND_Rd_SP:
5312	case AARCH64_OPND_Rn_SP:
5313	case AARCH64_OPND_SVE_Rn_SP:
5314	case AARCH64_OPND_Rm_SP:
5315	  po_int_reg_or_fail (REG_TYPE_R_SP);
5316	  break;
5317
5318	case AARCH64_OPND_Rm_EXT:
5319	case AARCH64_OPND_Rm_SFT:
5320	  po_misc_or_fail (parse_shifter_operand
5321			   (&str, info, (operands[i] == AARCH64_OPND_Rm_EXT
5322					 ? SHIFTED_ARITH_IMM
5323					 : SHIFTED_LOGIC_IMM)));
5324	  if (!info->shifter.operator_present)
5325	    {
5326	      /* Default to LSL if not present.  Libopcodes prefers shifter
5327		 kind to be explicit.  */
5328	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5329	      info->shifter.kind = AARCH64_MOD_LSL;
5330	      /* For Rm_EXT, libopcodes will carry out further check on whether
5331		 or not stack pointer is used in the instruction (Recall that
5332		 "the extend operator is not optional unless at least one of
5333		 "Rd" or "Rn" is '11111' (i.e. WSP)").  */
5334	    }
5335	  break;
5336
5337	case AARCH64_OPND_Fd:
5338	case AARCH64_OPND_Fn:
5339	case AARCH64_OPND_Fm:
5340	case AARCH64_OPND_Fa:
5341	case AARCH64_OPND_Ft:
5342	case AARCH64_OPND_Ft2:
5343	case AARCH64_OPND_Sd:
5344	case AARCH64_OPND_Sn:
5345	case AARCH64_OPND_Sm:
5346	case AARCH64_OPND_SVE_VZn:
5347	case AARCH64_OPND_SVE_Vd:
5348	case AARCH64_OPND_SVE_Vm:
5349	case AARCH64_OPND_SVE_Vn:
5350	  val = aarch64_reg_parse (&str, REG_TYPE_BHSDQ, &rtype, NULL);
5351	  if (val == PARSE_FAIL)
5352	    {
5353	      first_error (_(get_reg_expected_msg (REG_TYPE_BHSDQ)));
5354	      goto failure;
5355	    }
5356	  gas_assert (rtype >= REG_TYPE_FP_B && rtype <= REG_TYPE_FP_Q);
5357
5358	  info->reg.regno = val;
5359	  info->qualifier = AARCH64_OPND_QLF_S_B + (rtype - REG_TYPE_FP_B);
5360	  break;
5361
5362	case AARCH64_OPND_SVE_Pd:
5363	case AARCH64_OPND_SVE_Pg3:
5364	case AARCH64_OPND_SVE_Pg4_5:
5365	case AARCH64_OPND_SVE_Pg4_10:
5366	case AARCH64_OPND_SVE_Pg4_16:
5367	case AARCH64_OPND_SVE_Pm:
5368	case AARCH64_OPND_SVE_Pn:
5369	case AARCH64_OPND_SVE_Pt:
5370	  reg_type = REG_TYPE_PN;
5371	  goto vector_reg;
5372
5373	case AARCH64_OPND_SVE_Za_5:
5374	case AARCH64_OPND_SVE_Za_16:
5375	case AARCH64_OPND_SVE_Zd:
5376	case AARCH64_OPND_SVE_Zm_5:
5377	case AARCH64_OPND_SVE_Zm_16:
5378	case AARCH64_OPND_SVE_Zn:
5379	case AARCH64_OPND_SVE_Zt:
5380	  reg_type = REG_TYPE_ZN;
5381	  goto vector_reg;
5382
5383	case AARCH64_OPND_Vd:
5384	case AARCH64_OPND_Vn:
5385	case AARCH64_OPND_Vm:
5386	  reg_type = REG_TYPE_VN;
5387	vector_reg:
5388	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5389	  if (val == PARSE_FAIL)
5390	    {
5391	      first_error (_(get_reg_expected_msg (reg_type)));
5392	      goto failure;
5393	    }
5394	  if (vectype.defined & NTA_HASINDEX)
5395	    goto failure;
5396
5397	  info->reg.regno = val;
5398	  if ((reg_type == REG_TYPE_PN || reg_type == REG_TYPE_ZN)
5399	      && vectype.type == NT_invtype)
5400	    /* Unqualified Pn and Zn registers are allowed in certain
5401	       contexts.  Rely on F_STRICT qualifier checking to catch
5402	       invalid uses.  */
5403	    info->qualifier = AARCH64_OPND_QLF_NIL;
5404	  else
5405	    {
5406	      info->qualifier = vectype_to_qualifier (&vectype);
5407	      if (info->qualifier == AARCH64_OPND_QLF_NIL)
5408		goto failure;
5409	    }
5410	  break;
5411
5412	case AARCH64_OPND_VdD1:
5413	case AARCH64_OPND_VnD1:
5414	  val = aarch64_reg_parse (&str, REG_TYPE_VN, NULL, &vectype);
5415	  if (val == PARSE_FAIL)
5416	    {
5417	      set_first_syntax_error (_(get_reg_expected_msg (REG_TYPE_VN)));
5418	      goto failure;
5419	    }
5420	  if (vectype.type != NT_d || vectype.index != 1)
5421	    {
5422	      set_fatal_syntax_error
5423		(_("the top half of a 128-bit FP/SIMD register is expected"));
5424	      goto failure;
5425	    }
5426	  info->reg.regno = val;
5427	  /* N.B: VdD1 and VnD1 are treated as an fp or advsimd scalar register
5428	     here; it is correct for the purpose of encoding/decoding since
5429	     only the register number is explicitly encoded in the related
5430	     instructions, although this appears a bit hacky.  */
5431	  info->qualifier = AARCH64_OPND_QLF_S_D;
5432	  break;
5433
5434	case AARCH64_OPND_SVE_Zm3_INDEX:
5435	case AARCH64_OPND_SVE_Zm3_22_INDEX:
5436	case AARCH64_OPND_SVE_Zm4_INDEX:
5437	case AARCH64_OPND_SVE_Zn_INDEX:
5438	  reg_type = REG_TYPE_ZN;
5439	  goto vector_reg_index;
5440
5441	case AARCH64_OPND_Ed:
5442	case AARCH64_OPND_En:
5443	case AARCH64_OPND_Em:
5444	  reg_type = REG_TYPE_VN;
5445	vector_reg_index:
5446	  val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5447	  if (val == PARSE_FAIL)
5448	    {
5449	      first_error (_(get_reg_expected_msg (reg_type)));
5450	      goto failure;
5451	    }
5452	  if (vectype.type == NT_invtype || !(vectype.defined & NTA_HASINDEX))
5453	    goto failure;
5454
5455	  info->reglane.regno = val;
5456	  info->reglane.index = vectype.index;
5457	  info->qualifier = vectype_to_qualifier (&vectype);
5458	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5459	    goto failure;
5460	  break;
5461
5462	case AARCH64_OPND_SVE_ZnxN:
5463	case AARCH64_OPND_SVE_ZtxN:
5464	  reg_type = REG_TYPE_ZN;
5465	  goto vector_reg_list;
5466
5467	case AARCH64_OPND_LVn:
5468	case AARCH64_OPND_LVt:
5469	case AARCH64_OPND_LVt_AL:
5470	case AARCH64_OPND_LEt:
5471	  reg_type = REG_TYPE_VN;
5472	vector_reg_list:
5473	  if (reg_type == REG_TYPE_ZN
5474	      && get_opcode_dependent_value (opcode) == 1
5475	      && *str != '{')
5476	    {
5477	      val = aarch64_reg_parse (&str, reg_type, NULL, &vectype);
5478	      if (val == PARSE_FAIL)
5479		{
5480		  first_error (_(get_reg_expected_msg (reg_type)));
5481		  goto failure;
5482		}
5483	      info->reglist.first_regno = val;
5484	      info->reglist.num_regs = 1;
5485	    }
5486	  else
5487	    {
5488	      val = parse_vector_reg_list (&str, reg_type, &vectype);
5489	      if (val == PARSE_FAIL)
5490		goto failure;
5491	      if (! reg_list_valid_p (val, /* accept_alternate */ 0))
5492		{
5493		  set_fatal_syntax_error (_("invalid register list"));
5494		  goto failure;
5495		}
5496	      info->reglist.first_regno = (val >> 2) & 0x1f;
5497	      info->reglist.num_regs = (val & 0x3) + 1;
5498	    }
5499	  if (operands[i] == AARCH64_OPND_LEt)
5500	    {
5501	      if (!(vectype.defined & NTA_HASINDEX))
5502		goto failure;
5503	      info->reglist.has_index = 1;
5504	      info->reglist.index = vectype.index;
5505	    }
5506	  else
5507	    {
5508	      if (vectype.defined & NTA_HASINDEX)
5509		goto failure;
5510	      if (!(vectype.defined & NTA_HASTYPE))
5511		{
5512		  if (reg_type == REG_TYPE_ZN)
5513		    set_fatal_syntax_error (_("missing type suffix"));
5514		  goto failure;
5515		}
5516	    }
5517	  info->qualifier = vectype_to_qualifier (&vectype);
5518	  if (info->qualifier == AARCH64_OPND_QLF_NIL)
5519	    goto failure;
5520	  break;
5521
5522	case AARCH64_OPND_CRn:
5523	case AARCH64_OPND_CRm:
5524	    {
5525	      char prefix = *(str++);
5526	      if (prefix != 'c' && prefix != 'C')
5527		goto failure;
5528
5529	      po_imm_nc_or_fail ();
5530	      if (val > 15)
5531		{
5532		  set_fatal_syntax_error (_(N_ ("C0 - C15 expected")));
5533		  goto failure;
5534		}
5535	      info->qualifier = AARCH64_OPND_QLF_CR;
5536	      info->imm.value = val;
5537	      break;
5538	    }
5539
5540	case AARCH64_OPND_SHLL_IMM:
5541	case AARCH64_OPND_IMM_VLSR:
5542	  po_imm_or_fail (1, 64);
5543	  info->imm.value = val;
5544	  break;
5545
5546	case AARCH64_OPND_CCMP_IMM:
5547	case AARCH64_OPND_SIMM5:
5548	case AARCH64_OPND_FBITS:
5549	case AARCH64_OPND_UIMM4:
5550	case AARCH64_OPND_UIMM3_OP1:
5551	case AARCH64_OPND_UIMM3_OP2:
5552	case AARCH64_OPND_IMM_VLSL:
5553	case AARCH64_OPND_IMM:
5554	case AARCH64_OPND_WIDTH:
5555	case AARCH64_OPND_SVE_INV_LIMM:
5556	case AARCH64_OPND_SVE_LIMM:
5557	case AARCH64_OPND_SVE_LIMM_MOV:
5558	case AARCH64_OPND_SVE_SHLIMM_PRED:
5559	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
5560	case AARCH64_OPND_SVE_SHRIMM_PRED:
5561	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
5562	case AARCH64_OPND_SVE_SIMM5:
5563	case AARCH64_OPND_SVE_SIMM5B:
5564	case AARCH64_OPND_SVE_SIMM6:
5565	case AARCH64_OPND_SVE_SIMM8:
5566	case AARCH64_OPND_SVE_UIMM3:
5567	case AARCH64_OPND_SVE_UIMM7:
5568	case AARCH64_OPND_SVE_UIMM8:
5569	case AARCH64_OPND_SVE_UIMM8_53:
5570	case AARCH64_OPND_IMM_ROT1:
5571	case AARCH64_OPND_IMM_ROT2:
5572	case AARCH64_OPND_IMM_ROT3:
5573	case AARCH64_OPND_SVE_IMM_ROT1:
5574	case AARCH64_OPND_SVE_IMM_ROT2:
5575	  po_imm_nc_or_fail ();
5576	  info->imm.value = val;
5577	  break;
5578
5579	case AARCH64_OPND_SVE_AIMM:
5580	case AARCH64_OPND_SVE_ASIMM:
5581	  po_imm_nc_or_fail ();
5582	  info->imm.value = val;
5583	  skip_whitespace (str);
5584	  if (skip_past_comma (&str))
5585	    po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5586	  else
5587	    inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5588	  break;
5589
5590	case AARCH64_OPND_SVE_PATTERN:
5591	  po_enum_or_fail (aarch64_sve_pattern_array);
5592	  info->imm.value = val;
5593	  break;
5594
5595	case AARCH64_OPND_SVE_PATTERN_SCALED:
5596	  po_enum_or_fail (aarch64_sve_pattern_array);
5597	  info->imm.value = val;
5598	  if (skip_past_comma (&str)
5599	      && !parse_shift (&str, info, SHIFTED_MUL))
5600	    goto failure;
5601	  if (!info->shifter.operator_present)
5602	    {
5603	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5604	      info->shifter.kind = AARCH64_MOD_MUL;
5605	      info->shifter.amount = 1;
5606	    }
5607	  break;
5608
5609	case AARCH64_OPND_SVE_PRFOP:
5610	  po_enum_or_fail (aarch64_sve_prfop_array);
5611	  info->imm.value = val;
5612	  break;
5613
5614	case AARCH64_OPND_UIMM7:
5615	  po_imm_or_fail (0, 127);
5616	  info->imm.value = val;
5617	  break;
5618
5619	case AARCH64_OPND_IDX:
5620	case AARCH64_OPND_BIT_NUM:
5621	case AARCH64_OPND_IMMR:
5622	case AARCH64_OPND_IMMS:
5623	  po_imm_or_fail (0, 63);
5624	  info->imm.value = val;
5625	  break;
5626
5627	case AARCH64_OPND_IMM0:
5628	  po_imm_nc_or_fail ();
5629	  if (val != 0)
5630	    {
5631	      set_fatal_syntax_error (_("immediate zero expected"));
5632	      goto failure;
5633	    }
5634	  info->imm.value = 0;
5635	  break;
5636
5637	case AARCH64_OPND_FPIMM0:
5638	  {
5639	    int qfloat;
5640	    bfd_boolean res1 = FALSE, res2 = FALSE;
5641	    /* N.B. -0.0 will be rejected; although -0.0 shouldn't be rejected,
5642	       it is probably not worth the effort to support it.  */
5643	    if (!(res1 = parse_aarch64_imm_float (&str, &qfloat, FALSE,
5644						  imm_reg_type))
5645		&& (error_p ()
5646		    || !(res2 = parse_constant_immediate (&str, &val,
5647							  imm_reg_type))))
5648	      goto failure;
5649	    if ((res1 && qfloat == 0) || (res2 && val == 0))
5650	      {
5651		info->imm.value = 0;
5652		info->imm.is_fp = 1;
5653		break;
5654	      }
5655	    set_fatal_syntax_error (_("immediate zero expected"));
5656	    goto failure;
5657	  }
5658
5659	case AARCH64_OPND_IMM_MOV:
5660	  {
5661	    char *saved = str;
5662	    if (reg_name_p (str, REG_TYPE_R_Z_SP) ||
5663		reg_name_p (str, REG_TYPE_VN))
5664	      goto failure;
5665	    str = saved;
5666	    po_misc_or_fail (my_get_expression (&inst.reloc.exp, &str,
5667						GE_OPT_PREFIX, 1));
5668	    /* The MOV immediate alias will be fixed up by fix_mov_imm_insn
5669	       later.  fix_mov_imm_insn will try to determine a machine
5670	       instruction (MOVZ, MOVN or ORR) for it and will issue an error
5671	       message if the immediate cannot be moved by a single
5672	       instruction.  */
5673	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
5674	    inst.base.operands[i].skip = 1;
5675	  }
5676	  break;
5677
5678	case AARCH64_OPND_SIMD_IMM:
5679	case AARCH64_OPND_SIMD_IMM_SFT:
5680	  if (! parse_big_immediate (&str, &val, imm_reg_type))
5681	    goto failure;
5682	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5683					      /* addr_off_p */ 0,
5684					      /* need_libopcodes_p */ 1,
5685					      /* skip_p */ 1);
5686	  /* Parse shift.
5687	     N.B. although AARCH64_OPND_SIMD_IMM doesn't permit any
5688	     shift, we don't check it here; we leave the checking to
5689	     the libopcodes (operand_general_constraint_met_p).  By
5690	     doing this, we achieve better diagnostics.  */
5691	  if (skip_past_comma (&str)
5692	      && ! parse_shift (&str, info, SHIFTED_LSL_MSL))
5693	    goto failure;
5694	  if (!info->shifter.operator_present
5695	      && info->type == AARCH64_OPND_SIMD_IMM_SFT)
5696	    {
5697	      /* Default to LSL if not present.  Libopcodes prefers shifter
5698		 kind to be explicit.  */
5699	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5700	      info->shifter.kind = AARCH64_MOD_LSL;
5701	    }
5702	  break;
5703
5704	case AARCH64_OPND_FPIMM:
5705	case AARCH64_OPND_SIMD_FPIMM:
5706	case AARCH64_OPND_SVE_FPIMM8:
5707	  {
5708	    int qfloat;
5709	    bfd_boolean dp_p;
5710
5711	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
5712	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type)
5713		|| !aarch64_imm_float_p (qfloat))
5714	      {
5715		if (!error_p ())
5716		  set_fatal_syntax_error (_("invalid floating-point"
5717					    " constant"));
5718		goto failure;
5719	      }
5720	    inst.base.operands[i].imm.value = encode_imm_float_bits (qfloat);
5721	    inst.base.operands[i].imm.is_fp = 1;
5722	  }
5723	  break;
5724
5725	case AARCH64_OPND_SVE_I1_HALF_ONE:
5726	case AARCH64_OPND_SVE_I1_HALF_TWO:
5727	case AARCH64_OPND_SVE_I1_ZERO_ONE:
5728	  {
5729	    int qfloat;
5730	    bfd_boolean dp_p;
5731
5732	    dp_p = double_precision_operand_p (&inst.base.operands[0]);
5733	    if (!parse_aarch64_imm_float (&str, &qfloat, dp_p, imm_reg_type))
5734	      {
5735		if (!error_p ())
5736		  set_fatal_syntax_error (_("invalid floating-point"
5737					    " constant"));
5738		goto failure;
5739	      }
5740	    inst.base.operands[i].imm.value = qfloat;
5741	    inst.base.operands[i].imm.is_fp = 1;
5742	  }
5743	  break;
5744
5745	case AARCH64_OPND_LIMM:
5746	  po_misc_or_fail (parse_shifter_operand (&str, info,
5747						  SHIFTED_LOGIC_IMM));
5748	  if (info->shifter.operator_present)
5749	    {
5750	      set_fatal_syntax_error
5751		(_("shift not allowed for bitmask immediate"));
5752	      goto failure;
5753	    }
5754	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5755					      /* addr_off_p */ 0,
5756					      /* need_libopcodes_p */ 1,
5757					      /* skip_p */ 1);
5758	  break;
5759
5760	case AARCH64_OPND_AIMM:
5761	  if (opcode->op == OP_ADD)
5762	    /* ADD may have relocation types.  */
5763	    po_misc_or_fail (parse_shifter_operand_reloc (&str, info,
5764							  SHIFTED_ARITH_IMM));
5765	  else
5766	    po_misc_or_fail (parse_shifter_operand (&str, info,
5767						    SHIFTED_ARITH_IMM));
5768	  switch (inst.reloc.type)
5769	    {
5770	    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
5771	      info->shifter.amount = 12;
5772	      break;
5773	    case BFD_RELOC_UNUSED:
5774	      aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5775	      if (info->shifter.kind != AARCH64_MOD_NONE)
5776		inst.reloc.flags = FIXUP_F_HAS_EXPLICIT_SHIFT;
5777	      inst.reloc.pc_rel = 0;
5778	      break;
5779	    default:
5780	      break;
5781	    }
5782	  info->imm.value = 0;
5783	  if (!info->shifter.operator_present)
5784	    {
5785	      /* Default to LSL if not present.  Libopcodes prefers shifter
5786		 kind to be explicit.  */
5787	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5788	      info->shifter.kind = AARCH64_MOD_LSL;
5789	    }
5790	  break;
5791
5792	case AARCH64_OPND_HALF:
5793	    {
5794	      /* #<imm16> or relocation.  */
5795	      int internal_fixup_p;
5796	      po_misc_or_fail (parse_half (&str, &internal_fixup_p));
5797	      if (internal_fixup_p)
5798		aarch64_set_gas_internal_fixup (&inst.reloc, info, 0);
5799	      skip_whitespace (str);
5800	      if (skip_past_comma (&str))
5801		{
5802		  /* {, LSL #<shift>}  */
5803		  if (! aarch64_gas_internal_fixup_p ())
5804		    {
5805		      set_fatal_syntax_error (_("can't mix relocation modifier "
5806						"with explicit shift"));
5807		      goto failure;
5808		    }
5809		  po_misc_or_fail (parse_shift (&str, info, SHIFTED_LSL));
5810		}
5811	      else
5812		inst.base.operands[i].shifter.amount = 0;
5813	      inst.base.operands[i].shifter.kind = AARCH64_MOD_LSL;
5814	      inst.base.operands[i].imm.value = 0;
5815	      if (! process_movw_reloc_info ())
5816		goto failure;
5817	    }
5818	  break;
5819
5820	case AARCH64_OPND_EXCEPTION:
5821	  po_misc_or_fail (parse_immediate_expression (&str, &inst.reloc.exp,
5822						       imm_reg_type));
5823	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
5824					      /* addr_off_p */ 0,
5825					      /* need_libopcodes_p */ 0,
5826					      /* skip_p */ 1);
5827	  break;
5828
5829	case AARCH64_OPND_NZCV:
5830	  {
5831	    const asm_nzcv *nzcv = hash_find_n (aarch64_nzcv_hsh, str, 4);
5832	    if (nzcv != NULL)
5833	      {
5834		str += 4;
5835		info->imm.value = nzcv->value;
5836		break;
5837	      }
5838	    po_imm_or_fail (0, 15);
5839	    info->imm.value = val;
5840	  }
5841	  break;
5842
5843	case AARCH64_OPND_COND:
5844	case AARCH64_OPND_COND1:
5845	  {
5846	    char *start = str;
5847	    do
5848	      str++;
5849	    while (ISALPHA (*str));
5850	    info->cond = hash_find_n (aarch64_cond_hsh, start, str - start);
5851	    if (info->cond == NULL)
5852	      {
5853		set_syntax_error (_("invalid condition"));
5854		goto failure;
5855	      }
5856	    else if (operands[i] == AARCH64_OPND_COND1
5857		     && (info->cond->value & 0xe) == 0xe)
5858	      {
5859		/* Do not allow AL or NV.  */
5860		set_default_error ();
5861		goto failure;
5862	      }
5863	  }
5864	  break;
5865
5866	case AARCH64_OPND_ADDR_ADRP:
5867	  po_misc_or_fail (parse_adrp (&str));
5868	  /* Clear the value as operand needs to be relocated.  */
5869	  info->imm.value = 0;
5870	  break;
5871
5872	case AARCH64_OPND_ADDR_PCREL14:
5873	case AARCH64_OPND_ADDR_PCREL19:
5874	case AARCH64_OPND_ADDR_PCREL21:
5875	case AARCH64_OPND_ADDR_PCREL26:
5876	  po_misc_or_fail (parse_address (&str, info));
5877	  if (!info->addr.pcrel)
5878	    {
5879	      set_syntax_error (_("invalid pc-relative address"));
5880	      goto failure;
5881	    }
5882	  if (inst.gen_lit_pool
5883	      && (opcode->iclass != loadlit || opcode->op == OP_PRFM_LIT))
5884	    {
5885	      /* Only permit "=value" in the literal load instructions.
5886		 The literal will be generated by programmer_friendly_fixup.  */
5887	      set_syntax_error (_("invalid use of \"=immediate\""));
5888	      goto failure;
5889	    }
5890	  if (inst.reloc.exp.X_op == O_symbol && find_reloc_table_entry (&str))
5891	    {
5892	      set_syntax_error (_("unrecognized relocation suffix"));
5893	      goto failure;
5894	    }
5895	  if (inst.reloc.exp.X_op == O_constant && !inst.gen_lit_pool)
5896	    {
5897	      info->imm.value = inst.reloc.exp.X_add_number;
5898	      inst.reloc.type = BFD_RELOC_UNUSED;
5899	    }
5900	  else
5901	    {
5902	      info->imm.value = 0;
5903	      if (inst.reloc.type == BFD_RELOC_UNUSED)
5904		switch (opcode->iclass)
5905		  {
5906		  case compbranch:
5907		  case condbranch:
5908		    /* e.g. CBZ or B.COND  */
5909		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5910		    inst.reloc.type = BFD_RELOC_AARCH64_BRANCH19;
5911		    break;
5912		  case testbranch:
5913		    /* e.g. TBZ  */
5914		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL14);
5915		    inst.reloc.type = BFD_RELOC_AARCH64_TSTBR14;
5916		    break;
5917		  case branch_imm:
5918		    /* e.g. B or BL  */
5919		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL26);
5920		    inst.reloc.type =
5921		      (opcode->op == OP_BL) ? BFD_RELOC_AARCH64_CALL26
5922			 : BFD_RELOC_AARCH64_JUMP26;
5923		    break;
5924		  case loadlit:
5925		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL19);
5926		    inst.reloc.type = BFD_RELOC_AARCH64_LD_LO19_PCREL;
5927		    break;
5928		  case pcreladdr:
5929		    gas_assert (operands[i] == AARCH64_OPND_ADDR_PCREL21);
5930		    inst.reloc.type = BFD_RELOC_AARCH64_ADR_LO21_PCREL;
5931		    break;
5932		  default:
5933		    gas_assert (0);
5934		    abort ();
5935		  }
5936	      inst.reloc.pc_rel = 1;
5937	    }
5938	  break;
5939
5940	case AARCH64_OPND_ADDR_SIMPLE:
5941	case AARCH64_OPND_SIMD_ADDR_SIMPLE:
5942	  {
5943	    /* [<Xn|SP>{, #<simm>}]  */
5944	    char *start = str;
5945	    /* First use the normal address-parsing routines, to get
5946	       the usual syntax errors.  */
5947	    po_misc_or_fail (parse_address (&str, info));
5948	    if (info->addr.pcrel || info->addr.offset.is_reg
5949		|| !info->addr.preind || info->addr.postind
5950		|| info->addr.writeback)
5951	      {
5952		set_syntax_error (_("invalid addressing mode"));
5953		goto failure;
5954	      }
5955
5956	    /* Then retry, matching the specific syntax of these addresses.  */
5957	    str = start;
5958	    po_char_or_fail ('[');
5959	    po_reg_or_fail (REG_TYPE_R64_SP);
5960	    /* Accept optional ", #0".  */
5961	    if (operands[i] == AARCH64_OPND_ADDR_SIMPLE
5962		&& skip_past_char (&str, ','))
5963	      {
5964		skip_past_char (&str, '#');
5965		if (! skip_past_char (&str, '0'))
5966		  {
5967		    set_fatal_syntax_error
5968		      (_("the optional immediate offset can only be 0"));
5969		    goto failure;
5970		  }
5971	      }
5972	    po_char_or_fail (']');
5973	    break;
5974	  }
5975
5976	case AARCH64_OPND_ADDR_REGOFF:
5977	  /* [<Xn|SP>, <R><m>{, <extend> {<amount>}}]  */
5978	  po_misc_or_fail (parse_address (&str, info));
5979	regoff_addr:
5980	  if (info->addr.pcrel || !info->addr.offset.is_reg
5981	      || !info->addr.preind || info->addr.postind
5982	      || info->addr.writeback)
5983	    {
5984	      set_syntax_error (_("invalid addressing mode"));
5985	      goto failure;
5986	    }
5987	  if (!info->shifter.operator_present)
5988	    {
5989	      /* Default to LSL if not present.  Libopcodes prefers shifter
5990		 kind to be explicit.  */
5991	      gas_assert (info->shifter.kind == AARCH64_MOD_NONE);
5992	      info->shifter.kind = AARCH64_MOD_LSL;
5993	    }
5994	  /* Qualifier to be deduced by libopcodes.  */
5995	  break;
5996
5997	case AARCH64_OPND_ADDR_SIMM7:
5998	  po_misc_or_fail (parse_address (&str, info));
5999	  if (info->addr.pcrel || info->addr.offset.is_reg
6000	      || (!info->addr.preind && !info->addr.postind))
6001	    {
6002	      set_syntax_error (_("invalid addressing mode"));
6003	      goto failure;
6004	    }
6005	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6006	    {
6007	      set_syntax_error (_("relocation not allowed"));
6008	      goto failure;
6009	    }
6010	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6011					      /* addr_off_p */ 1,
6012					      /* need_libopcodes_p */ 1,
6013					      /* skip_p */ 0);
6014	  break;
6015
6016	case AARCH64_OPND_ADDR_SIMM9:
6017	case AARCH64_OPND_ADDR_SIMM9_2:
6018	  po_misc_or_fail (parse_address (&str, info));
6019	  if (info->addr.pcrel || info->addr.offset.is_reg
6020	      || (!info->addr.preind && !info->addr.postind)
6021	      || (operands[i] == AARCH64_OPND_ADDR_SIMM9_2
6022		  && info->addr.writeback))
6023	    {
6024	      set_syntax_error (_("invalid addressing mode"));
6025	      goto failure;
6026	    }
6027	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6028	    {
6029	      set_syntax_error (_("relocation not allowed"));
6030	      goto failure;
6031	    }
6032	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6033					      /* addr_off_p */ 1,
6034					      /* need_libopcodes_p */ 1,
6035					      /* skip_p */ 0);
6036	  break;
6037
6038	case AARCH64_OPND_ADDR_SIMM10:
6039	  po_misc_or_fail (parse_address (&str, info));
6040	  if (info->addr.pcrel || info->addr.offset.is_reg
6041	      || !info->addr.preind || info->addr.postind)
6042	    {
6043	      set_syntax_error (_("invalid addressing mode"));
6044	      goto failure;
6045	    }
6046	  if (inst.reloc.type != BFD_RELOC_UNUSED)
6047	    {
6048	      set_syntax_error (_("relocation not allowed"));
6049	      goto failure;
6050	    }
6051	  assign_imm_if_const_or_fixup_later (&inst.reloc, info,
6052					      /* addr_off_p */ 1,
6053					      /* need_libopcodes_p */ 1,
6054					      /* skip_p */ 0);
6055	  break;
6056
6057	case AARCH64_OPND_ADDR_UIMM12:
6058	  po_misc_or_fail (parse_address (&str, info));
6059	  if (info->addr.pcrel || info->addr.offset.is_reg
6060	      || !info->addr.preind || info->addr.writeback)
6061	    {
6062	      set_syntax_error (_("invalid addressing mode"));
6063	      goto failure;
6064	    }
6065	  if (inst.reloc.type == BFD_RELOC_UNUSED)
6066	    aarch64_set_gas_internal_fixup (&inst.reloc, info, 1);
6067	  else if (inst.reloc.type == BFD_RELOC_AARCH64_LDST_LO12
6068		   || (inst.reloc.type
6069		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12)
6070		   || (inst.reloc.type
6071		       == BFD_RELOC_AARCH64_TLSLD_LDST_DTPREL_LO12_NC))
6072	    inst.reloc.type = ldst_lo12_determine_real_reloc_type ();
6073	  /* Leave qualifier to be determined by libopcodes.  */
6074	  break;
6075
6076	case AARCH64_OPND_SIMD_ADDR_POST:
6077	  /* [<Xn|SP>], <Xm|#<amount>>  */
6078	  po_misc_or_fail (parse_address (&str, info));
6079	  if (!info->addr.postind || !info->addr.writeback)
6080	    {
6081	      set_syntax_error (_("invalid addressing mode"));
6082	      goto failure;
6083	    }
6084	  if (!info->addr.offset.is_reg)
6085	    {
6086	      if (inst.reloc.exp.X_op == O_constant)
6087		info->addr.offset.imm = inst.reloc.exp.X_add_number;
6088	      else
6089		{
6090		  set_fatal_syntax_error
6091		    (_("writeback value must be an immediate constant"));
6092		  goto failure;
6093		}
6094	    }
6095	  /* No qualifier.  */
6096	  break;
6097
6098	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
6099	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
6100	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
6101	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
6102	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
6103	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
6104	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
6105	case AARCH64_OPND_SVE_ADDR_RI_U6:
6106	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
6107	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
6108	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
6109	  /* [X<n>{, #imm, MUL VL}]
6110	     [X<n>{, #imm}]
6111	     but recognizing SVE registers.  */
6112	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6113					      &offset_qualifier));
6114	  if (base_qualifier != AARCH64_OPND_QLF_X)
6115	    {
6116	      set_syntax_error (_("invalid addressing mode"));
6117	      goto failure;
6118	    }
6119	sve_regimm:
6120	  if (info->addr.pcrel || info->addr.offset.is_reg
6121	      || !info->addr.preind || info->addr.writeback)
6122	    {
6123	      set_syntax_error (_("invalid addressing mode"));
6124	      goto failure;
6125	    }
6126	  if (inst.reloc.type != BFD_RELOC_UNUSED
6127	      || inst.reloc.exp.X_op != O_constant)
6128	    {
6129	      /* Make sure this has priority over
6130		 "invalid addressing mode".  */
6131	      set_fatal_syntax_error (_("constant offset required"));
6132	      goto failure;
6133	    }
6134	  info->addr.offset.imm = inst.reloc.exp.X_add_number;
6135	  break;
6136
6137	case AARCH64_OPND_SVE_ADDR_RR:
6138	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
6139	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
6140	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
6141	case AARCH64_OPND_SVE_ADDR_RX:
6142	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
6143	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
6144	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
6145	  /* [<Xn|SP>, <R><m>{, lsl #<amount>}]
6146	     but recognizing SVE registers.  */
6147	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6148					      &offset_qualifier));
6149	  if (base_qualifier != AARCH64_OPND_QLF_X
6150	      || offset_qualifier != AARCH64_OPND_QLF_X)
6151	    {
6152	      set_syntax_error (_("invalid addressing mode"));
6153	      goto failure;
6154	    }
6155	  goto regoff_addr;
6156
6157	case AARCH64_OPND_SVE_ADDR_RZ:
6158	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
6159	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
6160	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
6161	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
6162	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
6163	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
6164	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
6165	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
6166	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
6167	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
6168	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
6169	  /* [<Xn|SP>, Z<m>.D{, LSL #<amount>}]
6170	     [<Xn|SP>, Z<m>.<T>, <extend> {#<amount>}]  */
6171	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6172					      &offset_qualifier));
6173	  if (base_qualifier != AARCH64_OPND_QLF_X
6174	      || (offset_qualifier != AARCH64_OPND_QLF_S_S
6175		  && offset_qualifier != AARCH64_OPND_QLF_S_D))
6176	    {
6177	      set_syntax_error (_("invalid addressing mode"));
6178	      goto failure;
6179	    }
6180	  info->qualifier = offset_qualifier;
6181	  goto regoff_addr;
6182
6183	case AARCH64_OPND_SVE_ADDR_ZI_U5:
6184	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
6185	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
6186	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
6187	  /* [Z<n>.<T>{, #imm}]  */
6188	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6189					      &offset_qualifier));
6190	  if (base_qualifier != AARCH64_OPND_QLF_S_S
6191	      && base_qualifier != AARCH64_OPND_QLF_S_D)
6192	    {
6193	      set_syntax_error (_("invalid addressing mode"));
6194	      goto failure;
6195	    }
6196	  info->qualifier = base_qualifier;
6197	  goto sve_regimm;
6198
6199	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
6200	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
6201	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
6202	  /* [Z<n>.<T>, Z<m>.<T>{, LSL #<amount>}]
6203	     [Z<n>.D, Z<m>.D, <extend> {#<amount>}]
6204
6205	     We don't reject:
6206
6207	     [Z<n>.S, Z<m>.S, <extend> {#<amount>}]
6208
6209	     here since we get better error messages by leaving it to
6210	     the qualifier checking routines.  */
6211	  po_misc_or_fail (parse_sve_address (&str, info, &base_qualifier,
6212					      &offset_qualifier));
6213	  if ((base_qualifier != AARCH64_OPND_QLF_S_S
6214	       && base_qualifier != AARCH64_OPND_QLF_S_D)
6215	      || offset_qualifier != base_qualifier)
6216	    {
6217	      set_syntax_error (_("invalid addressing mode"));
6218	      goto failure;
6219	    }
6220	  info->qualifier = base_qualifier;
6221	  goto regoff_addr;
6222
6223	case AARCH64_OPND_SYSREG:
6224	  if ((val = parse_sys_reg (&str, aarch64_sys_regs_hsh, 1, 0))
6225	      == PARSE_FAIL)
6226	    {
6227	      set_syntax_error (_("unknown or missing system register name"));
6228	      goto failure;
6229	    }
6230	  inst.base.operands[i].sysreg = val;
6231	  break;
6232
6233	case AARCH64_OPND_PSTATEFIELD:
6234	  if ((val = parse_sys_reg (&str, aarch64_pstatefield_hsh, 0, 1))
6235	      == PARSE_FAIL)
6236	    {
6237	      set_syntax_error (_("unknown or missing PSTATE field name"));
6238	      goto failure;
6239	    }
6240	  inst.base.operands[i].pstatefield = val;
6241	  break;
6242
6243	case AARCH64_OPND_SYSREG_IC:
6244	  inst.base.operands[i].sysins_op =
6245	    parse_sys_ins_reg (&str, aarch64_sys_regs_ic_hsh);
6246	  goto sys_reg_ins;
6247	case AARCH64_OPND_SYSREG_DC:
6248	  inst.base.operands[i].sysins_op =
6249	    parse_sys_ins_reg (&str, aarch64_sys_regs_dc_hsh);
6250	  goto sys_reg_ins;
6251	case AARCH64_OPND_SYSREG_AT:
6252	  inst.base.operands[i].sysins_op =
6253	    parse_sys_ins_reg (&str, aarch64_sys_regs_at_hsh);
6254	  goto sys_reg_ins;
6255	case AARCH64_OPND_SYSREG_TLBI:
6256	  inst.base.operands[i].sysins_op =
6257	    parse_sys_ins_reg (&str, aarch64_sys_regs_tlbi_hsh);
6258sys_reg_ins:
6259	  if (inst.base.operands[i].sysins_op == NULL)
6260	    {
6261	      set_fatal_syntax_error ( _("unknown or missing operation name"));
6262	      goto failure;
6263	    }
6264	  break;
6265
6266	case AARCH64_OPND_BARRIER:
6267	case AARCH64_OPND_BARRIER_ISB:
6268	  val = parse_barrier (&str);
6269	  if (val != PARSE_FAIL
6270	      && operands[i] == AARCH64_OPND_BARRIER_ISB && val != 0xf)
6271	    {
6272	      /* ISB only accepts options name 'sy'.  */
6273	      set_syntax_error
6274		(_("the specified option is not accepted in ISB"));
6275	      /* Turn off backtrack as this optional operand is present.  */
6276	      backtrack_pos = 0;
6277	      goto failure;
6278	    }
6279	  /* This is an extension to accept a 0..15 immediate.  */
6280	  if (val == PARSE_FAIL)
6281	    po_imm_or_fail (0, 15);
6282	  info->barrier = aarch64_barrier_options + val;
6283	  break;
6284
6285	case AARCH64_OPND_PRFOP:
6286	  val = parse_pldop (&str);
6287	  /* This is an extension to accept a 0..31 immediate.  */
6288	  if (val == PARSE_FAIL)
6289	    po_imm_or_fail (0, 31);
6290	  inst.base.operands[i].prfop = aarch64_prfops + val;
6291	  break;
6292
6293	case AARCH64_OPND_BARRIER_PSB:
6294	  val = parse_barrier_psb (&str, &(info->hint_option));
6295	  if (val == PARSE_FAIL)
6296	    goto failure;
6297	  break;
6298
6299	default:
6300	  as_fatal (_("unhandled operand code %d"), operands[i]);
6301	}
6302
6303      /* If we get here, this operand was successfully parsed.  */
6304      inst.base.operands[i].present = 1;
6305      continue;
6306
6307failure:
6308      /* The parse routine should already have set the error, but in case
6309	 not, set a default one here.  */
6310      if (! error_p ())
6311	set_default_error ();
6312
6313      if (! backtrack_pos)
6314	goto parse_operands_return;
6315
6316      {
6317	/* We reach here because this operand is marked as optional, and
6318	   either no operand was supplied or the operand was supplied but it
6319	   was syntactically incorrect.  In the latter case we report an
6320	   error.  In the former case we perform a few more checks before
6321	   dropping through to the code to insert the default operand.  */
6322
6323	char *tmp = backtrack_pos;
6324	char endchar = END_OF_INSN;
6325
6326	if (i != (aarch64_num_of_operands (opcode) - 1))
6327	  endchar = ',';
6328	skip_past_char (&tmp, ',');
6329
6330	if (*tmp != endchar)
6331	  /* The user has supplied an operand in the wrong format.  */
6332	  goto parse_operands_return;
6333
6334	/* Make sure there is not a comma before the optional operand.
6335	   For example the fifth operand of 'sys' is optional:
6336
6337	     sys #0,c0,c0,#0,  <--- wrong
6338	     sys #0,c0,c0,#0   <--- correct.  */
6339	if (comma_skipped_p && i && endchar == END_OF_INSN)
6340	  {
6341	    set_fatal_syntax_error
6342	      (_("unexpected comma before the omitted optional operand"));
6343	    goto parse_operands_return;
6344	  }
6345      }
6346
6347      /* Reaching here means we are dealing with an optional operand that is
6348	 omitted from the assembly line.  */
6349      gas_assert (optional_operand_p (opcode, i));
6350      info->present = 0;
6351      process_omitted_operand (operands[i], opcode, i, info);
6352
6353      /* Try again, skipping the optional operand at backtrack_pos.  */
6354      str = backtrack_pos;
6355      backtrack_pos = 0;
6356
6357      /* Clear any error record after the omitted optional operand has been
6358	 successfully handled.  */
6359      clear_error ();
6360    }
6361
6362  /* Check if we have parsed all the operands.  */
6363  if (*str != '\0' && ! error_p ())
6364    {
6365      /* Set I to the index of the last present operand; this is
6366	 for the purpose of diagnostics.  */
6367      for (i -= 1; i >= 0 && !inst.base.operands[i].present; --i)
6368	;
6369      set_fatal_syntax_error
6370	(_("unexpected characters following instruction"));
6371    }
6372
6373parse_operands_return:
6374
6375  if (error_p ())
6376    {
6377      DEBUG_TRACE ("parsing FAIL: %s - %s",
6378		   operand_mismatch_kind_names[get_error_kind ()],
6379		   get_error_message ());
6380      /* Record the operand error properly; this is useful when there
6381	 are multiple instruction templates for a mnemonic name, so that
6382	 later on, we can select the error that most closely describes
6383	 the problem.  */
6384      record_operand_error (opcode, i, get_error_kind (),
6385			    get_error_message ());
6386      return FALSE;
6387    }
6388  else
6389    {
6390      DEBUG_TRACE ("parsing SUCCESS");
6391      return TRUE;
6392    }
6393}
6394
6395/* It does some fix-up to provide some programmer friendly feature while
6396   keeping the libopcodes happy, i.e. libopcodes only accepts
6397   the preferred architectural syntax.
6398   Return FALSE if there is any failure; otherwise return TRUE.  */
6399
6400static bfd_boolean
6401programmer_friendly_fixup (aarch64_instruction *instr)
6402{
6403  aarch64_inst *base = &instr->base;
6404  const aarch64_opcode *opcode = base->opcode;
6405  enum aarch64_op op = opcode->op;
6406  aarch64_opnd_info *operands = base->operands;
6407
6408  DEBUG_TRACE ("enter");
6409
6410  switch (opcode->iclass)
6411    {
6412    case testbranch:
6413      /* TBNZ Xn|Wn, #uimm6, label
6414	 Test and Branch Not Zero: conditionally jumps to label if bit number
6415	 uimm6 in register Xn is not zero.  The bit number implies the width of
6416	 the register, which may be written and should be disassembled as Wn if
6417	 uimm is less than 32.  */
6418      if (operands[0].qualifier == AARCH64_OPND_QLF_W)
6419	{
6420	  if (operands[1].imm.value >= 32)
6421	    {
6422	      record_operand_out_of_range_error (opcode, 1, _("immediate value"),
6423						 0, 31);
6424	      return FALSE;
6425	    }
6426	  operands[0].qualifier = AARCH64_OPND_QLF_X;
6427	}
6428      break;
6429    case loadlit:
6430      /* LDR Wt, label | =value
6431	 As a convenience assemblers will typically permit the notation
6432	 "=value" in conjunction with the pc-relative literal load instructions
6433	 to automatically place an immediate value or symbolic address in a
6434	 nearby literal pool and generate a hidden label which references it.
6435	 ISREG has been set to 0 in the case of =value.  */
6436      if (instr->gen_lit_pool
6437	  && (op == OP_LDR_LIT || op == OP_LDRV_LIT || op == OP_LDRSW_LIT))
6438	{
6439	  int size = aarch64_get_qualifier_esize (operands[0].qualifier);
6440	  if (op == OP_LDRSW_LIT)
6441	    size = 4;
6442	  if (instr->reloc.exp.X_op != O_constant
6443	      && instr->reloc.exp.X_op != O_big
6444	      && instr->reloc.exp.X_op != O_symbol)
6445	    {
6446	      record_operand_error (opcode, 1,
6447				    AARCH64_OPDE_FATAL_SYNTAX_ERROR,
6448				    _("constant expression expected"));
6449	      return FALSE;
6450	    }
6451	  if (! add_to_lit_pool (&instr->reloc.exp, size))
6452	    {
6453	      record_operand_error (opcode, 1,
6454				    AARCH64_OPDE_OTHER_ERROR,
6455				    _("literal pool insertion failed"));
6456	      return FALSE;
6457	    }
6458	}
6459      break;
6460    case log_shift:
6461    case bitfield:
6462      /* UXT[BHW] Wd, Wn
6463	 Unsigned Extend Byte|Halfword|Word: UXT[BH] is architectural alias
6464	 for UBFM Wd,Wn,#0,#7|15, while UXTW is pseudo instruction which is
6465	 encoded using ORR Wd, WZR, Wn (MOV Wd,Wn).
6466	 A programmer-friendly assembler should accept a destination Xd in
6467	 place of Wd, however that is not the preferred form for disassembly.
6468	 */
6469      if ((op == OP_UXTB || op == OP_UXTH || op == OP_UXTW)
6470	  && operands[1].qualifier == AARCH64_OPND_QLF_W
6471	  && operands[0].qualifier == AARCH64_OPND_QLF_X)
6472	operands[0].qualifier = AARCH64_OPND_QLF_W;
6473      break;
6474
6475    case addsub_ext:
6476	{
6477	  /* In the 64-bit form, the final register operand is written as Wm
6478	     for all but the (possibly omitted) UXTX/LSL and SXTX
6479	     operators.
6480	     As a programmer-friendly assembler, we accept e.g.
6481	     ADDS <Xd>, <Xn|SP>, <Xm>{, UXTB {#<amount>}} and change it to
6482	     ADDS <Xd>, <Xn|SP>, <Wm>{, UXTB {#<amount>}}.  */
6483	  int idx = aarch64_operand_index (opcode->operands,
6484					   AARCH64_OPND_Rm_EXT);
6485	  gas_assert (idx == 1 || idx == 2);
6486	  if (operands[0].qualifier == AARCH64_OPND_QLF_X
6487	      && operands[idx].qualifier == AARCH64_OPND_QLF_X
6488	      && operands[idx].shifter.kind != AARCH64_MOD_LSL
6489	      && operands[idx].shifter.kind != AARCH64_MOD_UXTX
6490	      && operands[idx].shifter.kind != AARCH64_MOD_SXTX)
6491	    operands[idx].qualifier = AARCH64_OPND_QLF_W;
6492	}
6493      break;
6494
6495    default:
6496      break;
6497    }
6498
6499  DEBUG_TRACE ("exit with SUCCESS");
6500  return TRUE;
6501}
6502
6503/* Check for loads and stores that will cause unpredictable behavior.  */
6504
6505static void
6506warn_unpredictable_ldst (aarch64_instruction *instr, char *str)
6507{
6508  aarch64_inst *base = &instr->base;
6509  const aarch64_opcode *opcode = base->opcode;
6510  const aarch64_opnd_info *opnds = base->operands;
6511  switch (opcode->iclass)
6512    {
6513    case ldst_pos:
6514    case ldst_imm9:
6515    case ldst_imm10:
6516    case ldst_unscaled:
6517    case ldst_unpriv:
6518      /* Loading/storing the base register is unpredictable if writeback.  */
6519      if ((aarch64_get_operand_class (opnds[0].type)
6520	   == AARCH64_OPND_CLASS_INT_REG)
6521	  && opnds[0].reg.regno == opnds[1].addr.base_regno
6522	  && opnds[1].addr.base_regno != REG_SP
6523	  && opnds[1].addr.writeback)
6524	as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6525      break;
6526    case ldstpair_off:
6527    case ldstnapair_offs:
6528    case ldstpair_indexed:
6529      /* Loading/storing the base register is unpredictable if writeback.  */
6530      if ((aarch64_get_operand_class (opnds[0].type)
6531	   == AARCH64_OPND_CLASS_INT_REG)
6532	  && (opnds[0].reg.regno == opnds[2].addr.base_regno
6533	    || opnds[1].reg.regno == opnds[2].addr.base_regno)
6534	  && opnds[2].addr.base_regno != REG_SP
6535	  && opnds[2].addr.writeback)
6536	    as_warn (_("unpredictable transfer with writeback -- `%s'"), str);
6537      /* Load operations must load different registers.  */
6538      if ((opcode->opcode & (1 << 22))
6539	  && opnds[0].reg.regno == opnds[1].reg.regno)
6540	    as_warn (_("unpredictable load of register pair -- `%s'"), str);
6541      break;
6542    default:
6543      break;
6544    }
6545}
6546
6547/* A wrapper function to interface with libopcodes on encoding and
6548   record the error message if there is any.
6549
6550   Return TRUE on success; otherwise return FALSE.  */
6551
6552static bfd_boolean
6553do_encode (const aarch64_opcode *opcode, aarch64_inst *instr,
6554	   aarch64_insn *code)
6555{
6556  aarch64_operand_error error_info;
6557  error_info.kind = AARCH64_OPDE_NIL;
6558  if (aarch64_opcode_encode (opcode, instr, code, NULL, &error_info))
6559    return TRUE;
6560  else
6561    {
6562      gas_assert (error_info.kind != AARCH64_OPDE_NIL);
6563      record_operand_error_info (opcode, &error_info);
6564      return FALSE;
6565    }
6566}
6567
6568#ifdef DEBUG_AARCH64
6569static inline void
6570dump_opcode_operands (const aarch64_opcode *opcode)
6571{
6572  int i = 0;
6573  while (opcode->operands[i] != AARCH64_OPND_NIL)
6574    {
6575      aarch64_verbose ("\t\t opnd%d: %s", i,
6576		       aarch64_get_operand_name (opcode->operands[i])[0] != '\0'
6577		       ? aarch64_get_operand_name (opcode->operands[i])
6578		       : aarch64_get_operand_desc (opcode->operands[i]));
6579      ++i;
6580    }
6581}
6582#endif /* DEBUG_AARCH64 */
6583
6584/* This is the guts of the machine-dependent assembler.  STR points to a
6585   machine dependent instruction.  This function is supposed to emit
6586   the frags/bytes it assembles to.  */
6587
6588void
6589md_assemble (char *str)
6590{
6591  char *p = str;
6592  templates *template;
6593  aarch64_opcode *opcode;
6594  aarch64_inst *inst_base;
6595  unsigned saved_cond;
6596
6597  /* Align the previous label if needed.  */
6598  if (last_label_seen != NULL)
6599    {
6600      symbol_set_frag (last_label_seen, frag_now);
6601      S_SET_VALUE (last_label_seen, (valueT) frag_now_fix ());
6602      S_SET_SEGMENT (last_label_seen, now_seg);
6603    }
6604
6605  inst.reloc.type = BFD_RELOC_UNUSED;
6606
6607  DEBUG_TRACE ("\n\n");
6608  DEBUG_TRACE ("==============================");
6609  DEBUG_TRACE ("Enter md_assemble with %s", str);
6610
6611  template = opcode_lookup (&p);
6612  if (!template)
6613    {
6614      /* It wasn't an instruction, but it might be a register alias of
6615         the form alias .req reg directive.  */
6616      if (!create_register_alias (str, p))
6617	as_bad (_("unknown mnemonic `%s' -- `%s'"), get_mnemonic_name (str),
6618		str);
6619      return;
6620    }
6621
6622  skip_whitespace (p);
6623  if (*p == ',')
6624    {
6625      as_bad (_("unexpected comma after the mnemonic name `%s' -- `%s'"),
6626	      get_mnemonic_name (str), str);
6627      return;
6628    }
6629
6630  init_operand_error_report ();
6631
6632  /* Sections are assumed to start aligned. In executable section, there is no
6633     MAP_DATA symbol pending. So we only align the address during
6634     MAP_DATA --> MAP_INSN transition.
6635     For other sections, this is not guaranteed.  */
6636  enum mstate mapstate = seg_info (now_seg)->tc_segment_info_data.mapstate;
6637  if (!need_pass_2 && subseg_text_p (now_seg) && mapstate == MAP_DATA)
6638    frag_align_code (2, 0);
6639
6640  saved_cond = inst.cond;
6641  reset_aarch64_instruction (&inst);
6642  inst.cond = saved_cond;
6643
6644  /* Iterate through all opcode entries with the same mnemonic name.  */
6645  do
6646    {
6647      opcode = template->opcode;
6648
6649      DEBUG_TRACE ("opcode %s found", opcode->name);
6650#ifdef DEBUG_AARCH64
6651      if (debug_dump)
6652	dump_opcode_operands (opcode);
6653#endif /* DEBUG_AARCH64 */
6654
6655      mapping_state (MAP_INSN);
6656
6657      inst_base = &inst.base;
6658      inst_base->opcode = opcode;
6659
6660      /* Truly conditionally executed instructions, e.g. b.cond.  */
6661      if (opcode->flags & F_COND)
6662	{
6663	  gas_assert (inst.cond != COND_ALWAYS);
6664	  inst_base->cond = get_cond_from_value (inst.cond);
6665	  DEBUG_TRACE ("condition found %s", inst_base->cond->names[0]);
6666	}
6667      else if (inst.cond != COND_ALWAYS)
6668	{
6669	  /* It shouldn't arrive here, where the assembly looks like a
6670	     conditional instruction but the found opcode is unconditional.  */
6671	  gas_assert (0);
6672	  continue;
6673	}
6674
6675      if (parse_operands (p, opcode)
6676	  && programmer_friendly_fixup (&inst)
6677	  && do_encode (inst_base->opcode, &inst.base, &inst_base->value))
6678	{
6679	  /* Check that this instruction is supported for this CPU.  */
6680	  if (!opcode->avariant
6681	      || !AARCH64_CPU_HAS_ALL_FEATURES (cpu_variant, *opcode->avariant))
6682	    {
6683	      as_bad (_("selected processor does not support `%s'"), str);
6684	      return;
6685	    }
6686
6687	  warn_unpredictable_ldst (&inst, str);
6688
6689	  if (inst.reloc.type == BFD_RELOC_UNUSED
6690	      || !inst.reloc.need_libopcodes_p)
6691	    output_inst (NULL);
6692	  else
6693	    {
6694	      /* If there is relocation generated for the instruction,
6695	         store the instruction information for the future fix-up.  */
6696	      struct aarch64_inst *copy;
6697	      gas_assert (inst.reloc.type != BFD_RELOC_UNUSED);
6698	      copy = XNEW (struct aarch64_inst);
6699	      memcpy (copy, &inst.base, sizeof (struct aarch64_inst));
6700	      output_inst (copy);
6701	    }
6702	  return;
6703	}
6704
6705      template = template->next;
6706      if (template != NULL)
6707	{
6708	  reset_aarch64_instruction (&inst);
6709	  inst.cond = saved_cond;
6710	}
6711    }
6712  while (template != NULL);
6713
6714  /* Issue the error messages if any.  */
6715  output_operand_error_report (str);
6716}
6717
6718/* Various frobbings of labels and their addresses.  */
6719
6720void
6721aarch64_start_line_hook (void)
6722{
6723  last_label_seen = NULL;
6724}
6725
6726void
6727aarch64_frob_label (symbolS * sym)
6728{
6729  last_label_seen = sym;
6730
6731  dwarf2_emit_label (sym);
6732}
6733
6734int
6735aarch64_data_in_code (void)
6736{
6737  if (!strncmp (input_line_pointer + 1, "data:", 5))
6738    {
6739      *input_line_pointer = '/';
6740      input_line_pointer += 5;
6741      *input_line_pointer = 0;
6742      return 1;
6743    }
6744
6745  return 0;
6746}
6747
6748char *
6749aarch64_canonicalize_symbol_name (char *name)
6750{
6751  int len;
6752
6753  if ((len = strlen (name)) > 5 && streq (name + len - 5, "/data"))
6754    *(name + len - 5) = 0;
6755
6756  return name;
6757}
6758
6759/* Table of all register names defined by default.  The user can
6760   define additional names with .req.  Note that all register names
6761   should appear in both upper and lowercase variants.	Some registers
6762   also have mixed-case names.	*/
6763
6764#define REGDEF(s,n,t) { #s, n, REG_TYPE_##t, TRUE }
6765#define REGNUM(p,n,t) REGDEF(p##n, n, t)
6766#define REGSET16(p,t) \
6767  REGNUM(p, 0,t), REGNUM(p, 1,t), REGNUM(p, 2,t), REGNUM(p, 3,t), \
6768  REGNUM(p, 4,t), REGNUM(p, 5,t), REGNUM(p, 6,t), REGNUM(p, 7,t), \
6769  REGNUM(p, 8,t), REGNUM(p, 9,t), REGNUM(p,10,t), REGNUM(p,11,t), \
6770  REGNUM(p,12,t), REGNUM(p,13,t), REGNUM(p,14,t), REGNUM(p,15,t)
6771#define REGSET31(p,t) \
6772  REGSET16(p, t), \
6773  REGNUM(p,16,t), REGNUM(p,17,t), REGNUM(p,18,t), REGNUM(p,19,t), \
6774  REGNUM(p,20,t), REGNUM(p,21,t), REGNUM(p,22,t), REGNUM(p,23,t), \
6775  REGNUM(p,24,t), REGNUM(p,25,t), REGNUM(p,26,t), REGNUM(p,27,t), \
6776  REGNUM(p,28,t), REGNUM(p,29,t), REGNUM(p,30,t)
6777#define REGSET(p,t) \
6778  REGSET31(p,t), REGNUM(p,31,t)
6779
6780/* These go into aarch64_reg_hsh hash-table.  */
6781static const reg_entry reg_names[] = {
6782  /* Integer registers.  */
6783  REGSET31 (x, R_64), REGSET31 (X, R_64),
6784  REGSET31 (w, R_32), REGSET31 (W, R_32),
6785
6786  REGDEF (wsp, 31, SP_32), REGDEF (WSP, 31, SP_32),
6787  REGDEF (sp, 31, SP_64), REGDEF (SP, 31, SP_64),
6788
6789  REGDEF (wzr, 31, Z_32), REGDEF (WZR, 31, Z_32),
6790  REGDEF (xzr, 31, Z_64), REGDEF (XZR, 31, Z_64),
6791
6792  /* Floating-point single precision registers.  */
6793  REGSET (s, FP_S), REGSET (S, FP_S),
6794
6795  /* Floating-point double precision registers.  */
6796  REGSET (d, FP_D), REGSET (D, FP_D),
6797
6798  /* Floating-point half precision registers.  */
6799  REGSET (h, FP_H), REGSET (H, FP_H),
6800
6801  /* Floating-point byte precision registers.  */
6802  REGSET (b, FP_B), REGSET (B, FP_B),
6803
6804  /* Floating-point quad precision registers.  */
6805  REGSET (q, FP_Q), REGSET (Q, FP_Q),
6806
6807  /* FP/SIMD registers.  */
6808  REGSET (v, VN), REGSET (V, VN),
6809
6810  /* SVE vector registers.  */
6811  REGSET (z, ZN), REGSET (Z, ZN),
6812
6813  /* SVE predicate registers.  */
6814  REGSET16 (p, PN), REGSET16 (P, PN)
6815};
6816
6817#undef REGDEF
6818#undef REGNUM
6819#undef REGSET16
6820#undef REGSET31
6821#undef REGSET
6822
6823#define N 1
6824#define n 0
6825#define Z 1
6826#define z 0
6827#define C 1
6828#define c 0
6829#define V 1
6830#define v 0
6831#define B(a,b,c,d) (((a) << 3) | ((b) << 2) | ((c) << 1) | (d))
6832static const asm_nzcv nzcv_names[] = {
6833  {"nzcv", B (n, z, c, v)},
6834  {"nzcV", B (n, z, c, V)},
6835  {"nzCv", B (n, z, C, v)},
6836  {"nzCV", B (n, z, C, V)},
6837  {"nZcv", B (n, Z, c, v)},
6838  {"nZcV", B (n, Z, c, V)},
6839  {"nZCv", B (n, Z, C, v)},
6840  {"nZCV", B (n, Z, C, V)},
6841  {"Nzcv", B (N, z, c, v)},
6842  {"NzcV", B (N, z, c, V)},
6843  {"NzCv", B (N, z, C, v)},
6844  {"NzCV", B (N, z, C, V)},
6845  {"NZcv", B (N, Z, c, v)},
6846  {"NZcV", B (N, Z, c, V)},
6847  {"NZCv", B (N, Z, C, v)},
6848  {"NZCV", B (N, Z, C, V)}
6849};
6850
6851#undef N
6852#undef n
6853#undef Z
6854#undef z
6855#undef C
6856#undef c
6857#undef V
6858#undef v
6859#undef B
6860
6861/* MD interface: bits in the object file.  */
6862
6863/* Turn an integer of n bytes (in val) into a stream of bytes appropriate
6864   for use in the a.out file, and stores them in the array pointed to by buf.
6865   This knows about the endian-ness of the target machine and does
6866   THE RIGHT THING, whatever it is.  Possible values for n are 1 (byte)
6867   2 (short) and 4 (long)  Floating numbers are put out as a series of
6868   LITTLENUMS (shorts, here at least).	*/
6869
6870void
6871md_number_to_chars (char *buf, valueT val, int n)
6872{
6873  if (target_big_endian)
6874    number_to_chars_bigendian (buf, val, n);
6875  else
6876    number_to_chars_littleendian (buf, val, n);
6877}
6878
6879/* MD interface: Sections.  */
6880
6881/* Estimate the size of a frag before relaxing.  Assume everything fits in
6882   4 bytes.  */
6883
6884int
6885md_estimate_size_before_relax (fragS * fragp, segT segtype ATTRIBUTE_UNUSED)
6886{
6887  fragp->fr_var = 4;
6888  return 4;
6889}
6890
6891/* Round up a section size to the appropriate boundary.	 */
6892
6893valueT
6894md_section_align (segT segment ATTRIBUTE_UNUSED, valueT size)
6895{
6896  return size;
6897}
6898
6899/* This is called from HANDLE_ALIGN in write.c.	 Fill in the contents
6900   of an rs_align_code fragment.
6901
6902   Here we fill the frag with the appropriate info for padding the
6903   output stream.  The resulting frag will consist of a fixed (fr_fix)
6904   and of a repeating (fr_var) part.
6905
6906   The fixed content is always emitted before the repeating content and
6907   these two parts are used as follows in constructing the output:
6908   - the fixed part will be used to align to a valid instruction word
6909     boundary, in case that we start at a misaligned address; as no
6910     executable instruction can live at the misaligned location, we
6911     simply fill with zeros;
6912   - the variable part will be used to cover the remaining padding and
6913     we fill using the AArch64 NOP instruction.
6914
6915   Note that the size of a RS_ALIGN_CODE fragment is always 7 to provide
6916   enough storage space for up to 3 bytes for padding the back to a valid
6917   instruction alignment and exactly 4 bytes to store the NOP pattern.  */
6918
6919void
6920aarch64_handle_align (fragS * fragP)
6921{
6922  /* NOP = d503201f */
6923  /* AArch64 instructions are always little-endian.  */
6924  static unsigned char const aarch64_noop[4] = { 0x1f, 0x20, 0x03, 0xd5 };
6925
6926  int bytes, fix, noop_size;
6927  char *p;
6928
6929  if (fragP->fr_type != rs_align_code)
6930    return;
6931
6932  bytes = fragP->fr_next->fr_address - fragP->fr_address - fragP->fr_fix;
6933  p = fragP->fr_literal + fragP->fr_fix;
6934
6935#ifdef OBJ_ELF
6936  gas_assert (fragP->tc_frag_data.recorded);
6937#endif
6938
6939  noop_size = sizeof (aarch64_noop);
6940
6941  fix = bytes & (noop_size - 1);
6942  if (fix)
6943    {
6944#ifdef OBJ_ELF
6945      insert_data_mapping_symbol (MAP_INSN, fragP->fr_fix, fragP, fix);
6946#endif
6947      memset (p, 0, fix);
6948      p += fix;
6949      fragP->fr_fix += fix;
6950    }
6951
6952  if (noop_size)
6953    memcpy (p, aarch64_noop, noop_size);
6954  fragP->fr_var = noop_size;
6955}
6956
6957/* Perform target specific initialisation of a frag.
6958   Note - despite the name this initialisation is not done when the frag
6959   is created, but only when its type is assigned.  A frag can be created
6960   and used a long time before its type is set, so beware of assuming that
6961   this initialisationis performed first.  */
6962
6963#ifndef OBJ_ELF
6964void
6965aarch64_init_frag (fragS * fragP ATTRIBUTE_UNUSED,
6966		   int max_chars ATTRIBUTE_UNUSED)
6967{
6968}
6969
6970#else /* OBJ_ELF is defined.  */
6971void
6972aarch64_init_frag (fragS * fragP, int max_chars)
6973{
6974  /* Record a mapping symbol for alignment frags.  We will delete this
6975     later if the alignment ends up empty.  */
6976  if (!fragP->tc_frag_data.recorded)
6977    fragP->tc_frag_data.recorded = 1;
6978
6979  switch (fragP->fr_type)
6980    {
6981    case rs_align_test:
6982    case rs_fill:
6983      mapping_state_2 (MAP_DATA, max_chars);
6984      break;
6985    case rs_align:
6986      /* PR 20364: We can get alignment frags in code sections,
6987	 so do not just assume that we should use the MAP_DATA state.  */
6988      mapping_state_2 (subseg_text_p (now_seg) ? MAP_INSN : MAP_DATA, max_chars);
6989      break;
6990    case rs_align_code:
6991      mapping_state_2 (MAP_INSN, max_chars);
6992      break;
6993    default:
6994      break;
6995    }
6996}
6997
6998/* Initialize the DWARF-2 unwind information for this procedure.  */
6999
7000void
7001tc_aarch64_frame_initial_instructions (void)
7002{
7003  cfi_add_CFA_def_cfa (REG_SP, 0);
7004}
7005#endif /* OBJ_ELF */
7006
7007/* Convert REGNAME to a DWARF-2 register number.  */
7008
7009int
7010tc_aarch64_regname_to_dw2regnum (char *regname)
7011{
7012  const reg_entry *reg = parse_reg (&regname);
7013  if (reg == NULL)
7014    return -1;
7015
7016  switch (reg->type)
7017    {
7018    case REG_TYPE_SP_32:
7019    case REG_TYPE_SP_64:
7020    case REG_TYPE_R_32:
7021    case REG_TYPE_R_64:
7022      return reg->number;
7023
7024    case REG_TYPE_FP_B:
7025    case REG_TYPE_FP_H:
7026    case REG_TYPE_FP_S:
7027    case REG_TYPE_FP_D:
7028    case REG_TYPE_FP_Q:
7029      return reg->number + 64;
7030
7031    default:
7032      break;
7033    }
7034  return -1;
7035}
7036
7037/* Implement DWARF2_ADDR_SIZE.  */
7038
7039int
7040aarch64_dwarf2_addr_size (void)
7041{
7042#if defined (OBJ_MAYBE_ELF) || defined (OBJ_ELF)
7043  if (ilp32_p)
7044    return 4;
7045#endif
7046  return bfd_arch_bits_per_address (stdoutput) / 8;
7047}
7048
7049/* MD interface: Symbol and relocation handling.  */
7050
7051/* Return the address within the segment that a PC-relative fixup is
7052   relative to.  For AArch64 PC-relative fixups applied to instructions
7053   are generally relative to the location plus AARCH64_PCREL_OFFSET bytes.  */
7054
7055long
7056md_pcrel_from_section (fixS * fixP, segT seg)
7057{
7058  offsetT base = fixP->fx_where + fixP->fx_frag->fr_address;
7059
7060  /* If this is pc-relative and we are going to emit a relocation
7061     then we just want to put out any pipeline compensation that the linker
7062     will need.  Otherwise we want to use the calculated base.  */
7063  if (fixP->fx_pcrel
7064      && ((fixP->fx_addsy && S_GET_SEGMENT (fixP->fx_addsy) != seg)
7065	  || aarch64_force_relocation (fixP)))
7066    base = 0;
7067
7068  /* AArch64 should be consistent for all pc-relative relocations.  */
7069  return base + AARCH64_PCREL_OFFSET;
7070}
7071
7072/* Under ELF we need to default _GLOBAL_OFFSET_TABLE.
7073   Otherwise we have no need to default values of symbols.  */
7074
7075symbolS *
7076md_undefined_symbol (char *name ATTRIBUTE_UNUSED)
7077{
7078#ifdef OBJ_ELF
7079  if (name[0] == '_' && name[1] == 'G'
7080      && streq (name, GLOBAL_OFFSET_TABLE_NAME))
7081    {
7082      if (!GOT_symbol)
7083	{
7084	  if (symbol_find (name))
7085	    as_bad (_("GOT already in the symbol table"));
7086
7087	  GOT_symbol = symbol_new (name, undefined_section,
7088				   (valueT) 0, &zero_address_frag);
7089	}
7090
7091      return GOT_symbol;
7092    }
7093#endif
7094
7095  return 0;
7096}
7097
7098/* Return non-zero if the indicated VALUE has overflowed the maximum
7099   range expressible by a unsigned number with the indicated number of
7100   BITS.  */
7101
7102static bfd_boolean
7103unsigned_overflow (valueT value, unsigned bits)
7104{
7105  valueT lim;
7106  if (bits >= sizeof (valueT) * 8)
7107    return FALSE;
7108  lim = (valueT) 1 << bits;
7109  return (value >= lim);
7110}
7111
7112
7113/* Return non-zero if the indicated VALUE has overflowed the maximum
7114   range expressible by an signed number with the indicated number of
7115   BITS.  */
7116
7117static bfd_boolean
7118signed_overflow (offsetT value, unsigned bits)
7119{
7120  offsetT lim;
7121  if (bits >= sizeof (offsetT) * 8)
7122    return FALSE;
7123  lim = (offsetT) 1 << (bits - 1);
7124  return (value < -lim || value >= lim);
7125}
7126
7127/* Given an instruction in *INST, which is expected to be a scaled, 12-bit,
7128   unsigned immediate offset load/store instruction, try to encode it as
7129   an unscaled, 9-bit, signed immediate offset load/store instruction.
7130   Return TRUE if it is successful; otherwise return FALSE.
7131
7132   As a programmer-friendly assembler, LDUR/STUR instructions can be generated
7133   in response to the standard LDR/STR mnemonics when the immediate offset is
7134   unambiguous, i.e. when it is negative or unaligned.  */
7135
7136static bfd_boolean
7137try_to_encode_as_unscaled_ldst (aarch64_inst *instr)
7138{
7139  int idx;
7140  enum aarch64_op new_op;
7141  const aarch64_opcode *new_opcode;
7142
7143  gas_assert (instr->opcode->iclass == ldst_pos);
7144
7145  switch (instr->opcode->op)
7146    {
7147    case OP_LDRB_POS:new_op = OP_LDURB; break;
7148    case OP_STRB_POS: new_op = OP_STURB; break;
7149    case OP_LDRSB_POS: new_op = OP_LDURSB; break;
7150    case OP_LDRH_POS: new_op = OP_LDURH; break;
7151    case OP_STRH_POS: new_op = OP_STURH; break;
7152    case OP_LDRSH_POS: new_op = OP_LDURSH; break;
7153    case OP_LDR_POS: new_op = OP_LDUR; break;
7154    case OP_STR_POS: new_op = OP_STUR; break;
7155    case OP_LDRF_POS: new_op = OP_LDURV; break;
7156    case OP_STRF_POS: new_op = OP_STURV; break;
7157    case OP_LDRSW_POS: new_op = OP_LDURSW; break;
7158    case OP_PRFM_POS: new_op = OP_PRFUM; break;
7159    default: new_op = OP_NIL; break;
7160    }
7161
7162  if (new_op == OP_NIL)
7163    return FALSE;
7164
7165  new_opcode = aarch64_get_opcode (new_op);
7166  gas_assert (new_opcode != NULL);
7167
7168  DEBUG_TRACE ("Check programmer-friendly STURB/LDURB -> STRB/LDRB: %d == %d",
7169	       instr->opcode->op, new_opcode->op);
7170
7171  aarch64_replace_opcode (instr, new_opcode);
7172
7173  /* Clear up the ADDR_SIMM9's qualifier; otherwise the
7174     qualifier matching may fail because the out-of-date qualifier will
7175     prevent the operand being updated with a new and correct qualifier.  */
7176  idx = aarch64_operand_index (instr->opcode->operands,
7177			       AARCH64_OPND_ADDR_SIMM9);
7178  gas_assert (idx == 1);
7179  instr->operands[idx].qualifier = AARCH64_OPND_QLF_NIL;
7180
7181  DEBUG_TRACE ("Found LDURB entry to encode programmer-friendly LDRB");
7182
7183  if (!aarch64_opcode_encode (instr->opcode, instr, &instr->value, NULL, NULL))
7184    return FALSE;
7185
7186  return TRUE;
7187}
7188
7189/* Called by fix_insn to fix a MOV immediate alias instruction.
7190
7191   Operand for a generic move immediate instruction, which is an alias
7192   instruction that generates a single MOVZ, MOVN or ORR instruction to loads
7193   a 32-bit/64-bit immediate value into general register.  An assembler error
7194   shall result if the immediate cannot be created by a single one of these
7195   instructions. If there is a choice, then to ensure reversability an
7196   assembler must prefer a MOVZ to MOVN, and MOVZ or MOVN to ORR.  */
7197
7198static void
7199fix_mov_imm_insn (fixS *fixP, char *buf, aarch64_inst *instr, offsetT value)
7200{
7201  const aarch64_opcode *opcode;
7202
7203  /* Need to check if the destination is SP/ZR.  The check has to be done
7204     before any aarch64_replace_opcode.  */
7205  int try_mov_wide_p = !aarch64_stack_pointer_p (&instr->operands[0]);
7206  int try_mov_bitmask_p = !aarch64_zero_register_p (&instr->operands[0]);
7207
7208  instr->operands[1].imm.value = value;
7209  instr->operands[1].skip = 0;
7210
7211  if (try_mov_wide_p)
7212    {
7213      /* Try the MOVZ alias.  */
7214      opcode = aarch64_get_opcode (OP_MOV_IMM_WIDE);
7215      aarch64_replace_opcode (instr, opcode);
7216      if (aarch64_opcode_encode (instr->opcode, instr,
7217				 &instr->value, NULL, NULL))
7218	{
7219	  put_aarch64_insn (buf, instr->value);
7220	  return;
7221	}
7222      /* Try the MOVK alias.  */
7223      opcode = aarch64_get_opcode (OP_MOV_IMM_WIDEN);
7224      aarch64_replace_opcode (instr, opcode);
7225      if (aarch64_opcode_encode (instr->opcode, instr,
7226				 &instr->value, NULL, NULL))
7227	{
7228	  put_aarch64_insn (buf, instr->value);
7229	  return;
7230	}
7231    }
7232
7233  if (try_mov_bitmask_p)
7234    {
7235      /* Try the ORR alias.  */
7236      opcode = aarch64_get_opcode (OP_MOV_IMM_LOG);
7237      aarch64_replace_opcode (instr, opcode);
7238      if (aarch64_opcode_encode (instr->opcode, instr,
7239				 &instr->value, NULL, NULL))
7240	{
7241	  put_aarch64_insn (buf, instr->value);
7242	  return;
7243	}
7244    }
7245
7246  as_bad_where (fixP->fx_file, fixP->fx_line,
7247		_("immediate cannot be moved by a single instruction"));
7248}
7249
7250/* An instruction operand which is immediate related may have symbol used
7251   in the assembly, e.g.
7252
7253     mov     w0, u32
7254     .set    u32,    0x00ffff00
7255
7256   At the time when the assembly instruction is parsed, a referenced symbol,
7257   like 'u32' in the above example may not have been seen; a fixS is created
7258   in such a case and is handled here after symbols have been resolved.
7259   Instruction is fixed up with VALUE using the information in *FIXP plus
7260   extra information in FLAGS.
7261
7262   This function is called by md_apply_fix to fix up instructions that need
7263   a fix-up described above but does not involve any linker-time relocation.  */
7264
7265static void
7266fix_insn (fixS *fixP, uint32_t flags, offsetT value)
7267{
7268  int idx;
7269  uint32_t insn;
7270  char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7271  enum aarch64_opnd opnd = fixP->tc_fix_data.opnd;
7272  aarch64_inst *new_inst = fixP->tc_fix_data.inst;
7273
7274  if (new_inst)
7275    {
7276      /* Now the instruction is about to be fixed-up, so the operand that
7277	 was previously marked as 'ignored' needs to be unmarked in order
7278	 to get the encoding done properly.  */
7279      idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7280      new_inst->operands[idx].skip = 0;
7281    }
7282
7283  gas_assert (opnd != AARCH64_OPND_NIL);
7284
7285  switch (opnd)
7286    {
7287    case AARCH64_OPND_EXCEPTION:
7288      if (unsigned_overflow (value, 16))
7289	as_bad_where (fixP->fx_file, fixP->fx_line,
7290		      _("immediate out of range"));
7291      insn = get_aarch64_insn (buf);
7292      insn |= encode_svc_imm (value);
7293      put_aarch64_insn (buf, insn);
7294      break;
7295
7296    case AARCH64_OPND_AIMM:
7297      /* ADD or SUB with immediate.
7298	 NOTE this assumes we come here with a add/sub shifted reg encoding
7299		  3  322|2222|2  2  2 21111 111111
7300		  1  098|7654|3  2  1 09876 543210 98765 43210
7301	 0b000000 sf 000|1011|shift 0 Rm    imm6   Rn    Rd    ADD
7302	 2b000000 sf 010|1011|shift 0 Rm    imm6   Rn    Rd    ADDS
7303	 4b000000 sf 100|1011|shift 0 Rm    imm6   Rn    Rd    SUB
7304	 6b000000 sf 110|1011|shift 0 Rm    imm6   Rn    Rd    SUBS
7305	 ->
7306		  3  322|2222|2 2   221111111111
7307		  1  098|7654|3 2   109876543210 98765 43210
7308	 11000000 sf 001|0001|shift imm12        Rn    Rd    ADD
7309	 31000000 sf 011|0001|shift imm12        Rn    Rd    ADDS
7310	 51000000 sf 101|0001|shift imm12        Rn    Rd    SUB
7311	 71000000 sf 111|0001|shift imm12        Rn    Rd    SUBS
7312	 Fields sf Rn Rd are already set.  */
7313      insn = get_aarch64_insn (buf);
7314      if (value < 0)
7315	{
7316	  /* Add <-> sub.  */
7317	  insn = reencode_addsub_switch_add_sub (insn);
7318	  value = -value;
7319	}
7320
7321      if ((flags & FIXUP_F_HAS_EXPLICIT_SHIFT) == 0
7322	  && unsigned_overflow (value, 12))
7323	{
7324	  /* Try to shift the value by 12 to make it fit.  */
7325	  if (((value >> 12) << 12) == value
7326	      && ! unsigned_overflow (value, 12 + 12))
7327	    {
7328	      value >>= 12;
7329	      insn |= encode_addsub_imm_shift_amount (1);
7330	    }
7331	}
7332
7333      if (unsigned_overflow (value, 12))
7334	as_bad_where (fixP->fx_file, fixP->fx_line,
7335		      _("immediate out of range"));
7336
7337      insn |= encode_addsub_imm (value);
7338
7339      put_aarch64_insn (buf, insn);
7340      break;
7341
7342    case AARCH64_OPND_SIMD_IMM:
7343    case AARCH64_OPND_SIMD_IMM_SFT:
7344    case AARCH64_OPND_LIMM:
7345      /* Bit mask immediate.  */
7346      gas_assert (new_inst != NULL);
7347      idx = aarch64_operand_index (new_inst->opcode->operands, opnd);
7348      new_inst->operands[idx].imm.value = value;
7349      if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7350				 &new_inst->value, NULL, NULL))
7351	put_aarch64_insn (buf, new_inst->value);
7352      else
7353	as_bad_where (fixP->fx_file, fixP->fx_line,
7354		      _("invalid immediate"));
7355      break;
7356
7357    case AARCH64_OPND_HALF:
7358      /* 16-bit unsigned immediate.  */
7359      if (unsigned_overflow (value, 16))
7360	as_bad_where (fixP->fx_file, fixP->fx_line,
7361		      _("immediate out of range"));
7362      insn = get_aarch64_insn (buf);
7363      insn |= encode_movw_imm (value & 0xffff);
7364      put_aarch64_insn (buf, insn);
7365      break;
7366
7367    case AARCH64_OPND_IMM_MOV:
7368      /* Operand for a generic move immediate instruction, which is
7369	 an alias instruction that generates a single MOVZ, MOVN or ORR
7370	 instruction to loads a 32-bit/64-bit immediate value into general
7371	 register.  An assembler error shall result if the immediate cannot be
7372	 created by a single one of these instructions. If there is a choice,
7373	 then to ensure reversability an assembler must prefer a MOVZ to MOVN,
7374	 and MOVZ or MOVN to ORR.  */
7375      gas_assert (new_inst != NULL);
7376      fix_mov_imm_insn (fixP, buf, new_inst, value);
7377      break;
7378
7379    case AARCH64_OPND_ADDR_SIMM7:
7380    case AARCH64_OPND_ADDR_SIMM9:
7381    case AARCH64_OPND_ADDR_SIMM9_2:
7382    case AARCH64_OPND_ADDR_SIMM10:
7383    case AARCH64_OPND_ADDR_UIMM12:
7384      /* Immediate offset in an address.  */
7385      insn = get_aarch64_insn (buf);
7386
7387      gas_assert (new_inst != NULL && new_inst->value == insn);
7388      gas_assert (new_inst->opcode->operands[1] == opnd
7389		  || new_inst->opcode->operands[2] == opnd);
7390
7391      /* Get the index of the address operand.  */
7392      if (new_inst->opcode->operands[1] == opnd)
7393	/* e.g. STR <Xt>, [<Xn|SP>, <R><m>{, <extend> {<amount>}}].  */
7394	idx = 1;
7395      else
7396	/* e.g. LDP <Qt1>, <Qt2>, [<Xn|SP>{, #<imm>}].  */
7397	idx = 2;
7398
7399      /* Update the resolved offset value.  */
7400      new_inst->operands[idx].addr.offset.imm = value;
7401
7402      /* Encode/fix-up.  */
7403      if (aarch64_opcode_encode (new_inst->opcode, new_inst,
7404				 &new_inst->value, NULL, NULL))
7405	{
7406	  put_aarch64_insn (buf, new_inst->value);
7407	  break;
7408	}
7409      else if (new_inst->opcode->iclass == ldst_pos
7410	       && try_to_encode_as_unscaled_ldst (new_inst))
7411	{
7412	  put_aarch64_insn (buf, new_inst->value);
7413	  break;
7414	}
7415
7416      as_bad_where (fixP->fx_file, fixP->fx_line,
7417		    _("immediate offset out of range"));
7418      break;
7419
7420    default:
7421      gas_assert (0);
7422      as_fatal (_("unhandled operand code %d"), opnd);
7423    }
7424}
7425
7426/* Apply a fixup (fixP) to segment data, once it has been determined
7427   by our caller that we have all the info we need to fix it up.
7428
7429   Parameter valP is the pointer to the value of the bits.  */
7430
7431void
7432md_apply_fix (fixS * fixP, valueT * valP, segT seg)
7433{
7434  offsetT value = *valP;
7435  uint32_t insn;
7436  char *buf = fixP->fx_where + fixP->fx_frag->fr_literal;
7437  int scale;
7438  unsigned flags = fixP->fx_addnumber;
7439
7440  DEBUG_TRACE ("\n\n");
7441  DEBUG_TRACE ("~~~~~~~~~~~~~~~~~~~~~~~~~");
7442  DEBUG_TRACE ("Enter md_apply_fix");
7443
7444  gas_assert (fixP->fx_r_type <= BFD_RELOC_UNUSED);
7445
7446  /* Note whether this will delete the relocation.  */
7447
7448  if (fixP->fx_addsy == 0 && !fixP->fx_pcrel)
7449    fixP->fx_done = 1;
7450
7451  /* Process the relocations.  */
7452  switch (fixP->fx_r_type)
7453    {
7454    case BFD_RELOC_NONE:
7455      /* This will need to go in the object file.  */
7456      fixP->fx_done = 0;
7457      break;
7458
7459    case BFD_RELOC_8:
7460    case BFD_RELOC_8_PCREL:
7461      if (fixP->fx_done || !seg->use_rela_p)
7462	md_number_to_chars (buf, value, 1);
7463      break;
7464
7465    case BFD_RELOC_16:
7466    case BFD_RELOC_16_PCREL:
7467      if (fixP->fx_done || !seg->use_rela_p)
7468	md_number_to_chars (buf, value, 2);
7469      break;
7470
7471    case BFD_RELOC_32:
7472    case BFD_RELOC_32_PCREL:
7473      if (fixP->fx_done || !seg->use_rela_p)
7474	md_number_to_chars (buf, value, 4);
7475      break;
7476
7477    case BFD_RELOC_64:
7478    case BFD_RELOC_64_PCREL:
7479      if (fixP->fx_done || !seg->use_rela_p)
7480	md_number_to_chars (buf, value, 8);
7481      break;
7482
7483    case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7484      /* We claim that these fixups have been processed here, even if
7485         in fact we generate an error because we do not have a reloc
7486         for them, so tc_gen_reloc() will reject them.  */
7487      fixP->fx_done = 1;
7488      if (fixP->fx_addsy && !S_IS_DEFINED (fixP->fx_addsy))
7489	{
7490	  as_bad_where (fixP->fx_file, fixP->fx_line,
7491			_("undefined symbol %s used as an immediate value"),
7492			S_GET_NAME (fixP->fx_addsy));
7493	  goto apply_fix_return;
7494	}
7495      fix_insn (fixP, flags, value);
7496      break;
7497
7498    case BFD_RELOC_AARCH64_LD_LO19_PCREL:
7499      if (fixP->fx_done || !seg->use_rela_p)
7500	{
7501	  if (value & 3)
7502	    as_bad_where (fixP->fx_file, fixP->fx_line,
7503			  _("pc-relative load offset not word aligned"));
7504	  if (signed_overflow (value, 21))
7505	    as_bad_where (fixP->fx_file, fixP->fx_line,
7506			  _("pc-relative load offset out of range"));
7507	  insn = get_aarch64_insn (buf);
7508	  insn |= encode_ld_lit_ofs_19 (value >> 2);
7509	  put_aarch64_insn (buf, insn);
7510	}
7511      break;
7512
7513    case BFD_RELOC_AARCH64_ADR_LO21_PCREL:
7514      if (fixP->fx_done || !seg->use_rela_p)
7515	{
7516	  if (signed_overflow (value, 21))
7517	    as_bad_where (fixP->fx_file, fixP->fx_line,
7518			  _("pc-relative address offset out of range"));
7519	  insn = get_aarch64_insn (buf);
7520	  insn |= encode_adr_imm (value);
7521	  put_aarch64_insn (buf, insn);
7522	}
7523      break;
7524
7525    case BFD_RELOC_AARCH64_BRANCH19:
7526      if (fixP->fx_done || !seg->use_rela_p)
7527	{
7528	  if (value & 3)
7529	    as_bad_where (fixP->fx_file, fixP->fx_line,
7530			  _("conditional branch target not word aligned"));
7531	  if (signed_overflow (value, 21))
7532	    as_bad_where (fixP->fx_file, fixP->fx_line,
7533			  _("conditional branch out of range"));
7534	  insn = get_aarch64_insn (buf);
7535	  insn |= encode_cond_branch_ofs_19 (value >> 2);
7536	  put_aarch64_insn (buf, insn);
7537	}
7538      break;
7539
7540    case BFD_RELOC_AARCH64_TSTBR14:
7541      if (fixP->fx_done || !seg->use_rela_p)
7542	{
7543	  if (value & 3)
7544	    as_bad_where (fixP->fx_file, fixP->fx_line,
7545			  _("conditional branch target not word aligned"));
7546	  if (signed_overflow (value, 16))
7547	    as_bad_where (fixP->fx_file, fixP->fx_line,
7548			  _("conditional branch out of range"));
7549	  insn = get_aarch64_insn (buf);
7550	  insn |= encode_tst_branch_ofs_14 (value >> 2);
7551	  put_aarch64_insn (buf, insn);
7552	}
7553      break;
7554
7555    case BFD_RELOC_AARCH64_CALL26:
7556    case BFD_RELOC_AARCH64_JUMP26:
7557      if (fixP->fx_done || !seg->use_rela_p)
7558	{
7559	  if (value & 3)
7560	    as_bad_where (fixP->fx_file, fixP->fx_line,
7561			  _("branch target not word aligned"));
7562	  if (signed_overflow (value, 28))
7563	    as_bad_where (fixP->fx_file, fixP->fx_line,
7564			  _("branch out of range"));
7565	  insn = get_aarch64_insn (buf);
7566	  insn |= encode_branch_ofs_26 (value >> 2);
7567	  put_aarch64_insn (buf, insn);
7568	}
7569      break;
7570
7571    case BFD_RELOC_AARCH64_MOVW_G0:
7572    case BFD_RELOC_AARCH64_MOVW_G0_NC:
7573    case BFD_RELOC_AARCH64_MOVW_G0_S:
7574    case BFD_RELOC_AARCH64_MOVW_GOTOFF_G0_NC:
7575      scale = 0;
7576      goto movw_common;
7577    case BFD_RELOC_AARCH64_MOVW_G1:
7578    case BFD_RELOC_AARCH64_MOVW_G1_NC:
7579    case BFD_RELOC_AARCH64_MOVW_G1_S:
7580    case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7581      scale = 16;
7582      goto movw_common;
7583    case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7584      scale = 0;
7585      S_SET_THREAD_LOCAL (fixP->fx_addsy);
7586      /* Should always be exported to object file, see
7587	 aarch64_force_relocation().  */
7588      gas_assert (!fixP->fx_done);
7589      gas_assert (seg->use_rela_p);
7590      goto movw_common;
7591    case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7592      scale = 16;
7593      S_SET_THREAD_LOCAL (fixP->fx_addsy);
7594      /* Should always be exported to object file, see
7595	 aarch64_force_relocation().  */
7596      gas_assert (!fixP->fx_done);
7597      gas_assert (seg->use_rela_p);
7598      goto movw_common;
7599    case BFD_RELOC_AARCH64_MOVW_G2:
7600    case BFD_RELOC_AARCH64_MOVW_G2_NC:
7601    case BFD_RELOC_AARCH64_MOVW_G2_S:
7602      scale = 32;
7603      goto movw_common;
7604    case BFD_RELOC_AARCH64_MOVW_G3:
7605      scale = 48;
7606    movw_common:
7607      if (fixP->fx_done || !seg->use_rela_p)
7608	{
7609	  insn = get_aarch64_insn (buf);
7610
7611	  if (!fixP->fx_done)
7612	    {
7613	      /* REL signed addend must fit in 16 bits */
7614	      if (signed_overflow (value, 16))
7615		as_bad_where (fixP->fx_file, fixP->fx_line,
7616			      _("offset out of range"));
7617	    }
7618	  else
7619	    {
7620	      /* Check for overflow and scale. */
7621	      switch (fixP->fx_r_type)
7622		{
7623		case BFD_RELOC_AARCH64_MOVW_G0:
7624		case BFD_RELOC_AARCH64_MOVW_G1:
7625		case BFD_RELOC_AARCH64_MOVW_G2:
7626		case BFD_RELOC_AARCH64_MOVW_G3:
7627		case BFD_RELOC_AARCH64_MOVW_GOTOFF_G1:
7628		case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7629		  if (unsigned_overflow (value, scale + 16))
7630		    as_bad_where (fixP->fx_file, fixP->fx_line,
7631				  _("unsigned value out of range"));
7632		  break;
7633		case BFD_RELOC_AARCH64_MOVW_G0_S:
7634		case BFD_RELOC_AARCH64_MOVW_G1_S:
7635		case BFD_RELOC_AARCH64_MOVW_G2_S:
7636		  /* NOTE: We can only come here with movz or movn. */
7637		  if (signed_overflow (value, scale + 16))
7638		    as_bad_where (fixP->fx_file, fixP->fx_line,
7639				  _("signed value out of range"));
7640		  if (value < 0)
7641		    {
7642		      /* Force use of MOVN.  */
7643		      value = ~value;
7644		      insn = reencode_movzn_to_movn (insn);
7645		    }
7646		  else
7647		    {
7648		      /* Force use of MOVZ.  */
7649		      insn = reencode_movzn_to_movz (insn);
7650		    }
7651		  break;
7652		default:
7653		  /* Unchecked relocations.  */
7654		  break;
7655		}
7656	      value >>= scale;
7657	    }
7658
7659	  /* Insert value into MOVN/MOVZ/MOVK instruction. */
7660	  insn |= encode_movw_imm (value & 0xffff);
7661
7662	  put_aarch64_insn (buf, insn);
7663	}
7664      break;
7665
7666    case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7667      fixP->fx_r_type = (ilp32_p
7668			 ? BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC
7669			 : BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC);
7670      S_SET_THREAD_LOCAL (fixP->fx_addsy);
7671      /* Should always be exported to object file, see
7672	 aarch64_force_relocation().  */
7673      gas_assert (!fixP->fx_done);
7674      gas_assert (seg->use_rela_p);
7675      break;
7676
7677    case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7678      fixP->fx_r_type = (ilp32_p
7679			 ? BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC
7680			 : BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC);
7681      S_SET_THREAD_LOCAL (fixP->fx_addsy);
7682      /* Should always be exported to object file, see
7683	 aarch64_force_relocation().  */
7684      gas_assert (!fixP->fx_done);
7685      gas_assert (seg->use_rela_p);
7686      break;
7687
7688    case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7689    case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7690    case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7691    case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7692    case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7693    case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7694    case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7695    case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7696    case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7697    case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7698    case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7699    case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7700    case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7701    case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7702    case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7703    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7704    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7705    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7706    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7707    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7708    case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7709    case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7710    case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7711    case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7712    case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7713    case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7714    case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7715    case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7716    case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7717    case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7718    case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7719    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7720    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7721    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7722    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7723    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7724    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7725    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7726    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7727    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7728    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7729    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7730    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7731    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7732      S_SET_THREAD_LOCAL (fixP->fx_addsy);
7733      /* Should always be exported to object file, see
7734	 aarch64_force_relocation().  */
7735      gas_assert (!fixP->fx_done);
7736      gas_assert (seg->use_rela_p);
7737      break;
7738
7739    case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7740      /* Should always be exported to object file, see
7741	 aarch64_force_relocation().  */
7742      fixP->fx_r_type = (ilp32_p
7743			 ? BFD_RELOC_AARCH64_LD32_GOT_LO12_NC
7744			 : BFD_RELOC_AARCH64_LD64_GOT_LO12_NC);
7745      gas_assert (!fixP->fx_done);
7746      gas_assert (seg->use_rela_p);
7747      break;
7748
7749    case BFD_RELOC_AARCH64_ADD_LO12:
7750    case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7751    case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7752    case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7753    case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7754    case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7755    case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7756    case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7757    case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7758    case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7759    case BFD_RELOC_AARCH64_LDST128_LO12:
7760    case BFD_RELOC_AARCH64_LDST16_LO12:
7761    case BFD_RELOC_AARCH64_LDST32_LO12:
7762    case BFD_RELOC_AARCH64_LDST64_LO12:
7763    case BFD_RELOC_AARCH64_LDST8_LO12:
7764      /* Should always be exported to object file, see
7765	 aarch64_force_relocation().  */
7766      gas_assert (!fixP->fx_done);
7767      gas_assert (seg->use_rela_p);
7768      break;
7769
7770    case BFD_RELOC_AARCH64_TLSDESC_ADD:
7771    case BFD_RELOC_AARCH64_TLSDESC_CALL:
7772    case BFD_RELOC_AARCH64_TLSDESC_LDR:
7773      break;
7774
7775    case BFD_RELOC_UNUSED:
7776      /* An error will already have been reported.  */
7777      break;
7778
7779    default:
7780      as_bad_where (fixP->fx_file, fixP->fx_line,
7781		    _("unexpected %s fixup"),
7782		    bfd_get_reloc_code_name (fixP->fx_r_type));
7783      break;
7784    }
7785
7786apply_fix_return:
7787  /* Free the allocated the struct aarch64_inst.
7788     N.B. currently there are very limited number of fix-up types actually use
7789     this field, so the impact on the performance should be minimal .  */
7790  if (fixP->tc_fix_data.inst != NULL)
7791    free (fixP->tc_fix_data.inst);
7792
7793  return;
7794}
7795
7796/* Translate internal representation of relocation info to BFD target
7797   format.  */
7798
7799arelent *
7800tc_gen_reloc (asection * section, fixS * fixp)
7801{
7802  arelent *reloc;
7803  bfd_reloc_code_real_type code;
7804
7805  reloc = XNEW (arelent);
7806
7807  reloc->sym_ptr_ptr = XNEW (asymbol *);
7808  *reloc->sym_ptr_ptr = symbol_get_bfdsym (fixp->fx_addsy);
7809  reloc->address = fixp->fx_frag->fr_address + fixp->fx_where;
7810
7811  if (fixp->fx_pcrel)
7812    {
7813      if (section->use_rela_p)
7814	fixp->fx_offset -= md_pcrel_from_section (fixp, section);
7815      else
7816	fixp->fx_offset = reloc->address;
7817    }
7818  reloc->addend = fixp->fx_offset;
7819
7820  code = fixp->fx_r_type;
7821  switch (code)
7822    {
7823    case BFD_RELOC_16:
7824      if (fixp->fx_pcrel)
7825	code = BFD_RELOC_16_PCREL;
7826      break;
7827
7828    case BFD_RELOC_32:
7829      if (fixp->fx_pcrel)
7830	code = BFD_RELOC_32_PCREL;
7831      break;
7832
7833    case BFD_RELOC_64:
7834      if (fixp->fx_pcrel)
7835	code = BFD_RELOC_64_PCREL;
7836      break;
7837
7838    default:
7839      break;
7840    }
7841
7842  reloc->howto = bfd_reloc_type_lookup (stdoutput, code);
7843  if (reloc->howto == NULL)
7844    {
7845      as_bad_where (fixp->fx_file, fixp->fx_line,
7846		    _
7847		    ("cannot represent %s relocation in this object file format"),
7848		    bfd_get_reloc_code_name (code));
7849      return NULL;
7850    }
7851
7852  return reloc;
7853}
7854
7855/* This fix_new is called by cons via TC_CONS_FIX_NEW.	*/
7856
7857void
7858cons_fix_new_aarch64 (fragS * frag, int where, int size, expressionS * exp)
7859{
7860  bfd_reloc_code_real_type type;
7861  int pcrel = 0;
7862
7863  /* Pick a reloc.
7864     FIXME: @@ Should look at CPU word size.  */
7865  switch (size)
7866    {
7867    case 1:
7868      type = BFD_RELOC_8;
7869      break;
7870    case 2:
7871      type = BFD_RELOC_16;
7872      break;
7873    case 4:
7874      type = BFD_RELOC_32;
7875      break;
7876    case 8:
7877      type = BFD_RELOC_64;
7878      break;
7879    default:
7880      as_bad (_("cannot do %u-byte relocation"), size);
7881      type = BFD_RELOC_UNUSED;
7882      break;
7883    }
7884
7885  fix_new_exp (frag, where, (int) size, exp, pcrel, type);
7886}
7887
7888int
7889aarch64_force_relocation (struct fix *fixp)
7890{
7891  switch (fixp->fx_r_type)
7892    {
7893    case BFD_RELOC_AARCH64_GAS_INTERNAL_FIXUP:
7894      /* Perform these "immediate" internal relocations
7895         even if the symbol is extern or weak.  */
7896      return 0;
7897
7898    case BFD_RELOC_AARCH64_LD_GOT_LO12_NC:
7899    case BFD_RELOC_AARCH64_TLSDESC_LD_LO12_NC:
7900    case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_LO12_NC:
7901      /* Pseudo relocs that need to be fixed up according to
7902	 ilp32_p.  */
7903      return 0;
7904
7905    case BFD_RELOC_AARCH64_ADD_LO12:
7906    case BFD_RELOC_AARCH64_ADR_GOT_PAGE:
7907    case BFD_RELOC_AARCH64_ADR_HI21_NC_PCREL:
7908    case BFD_RELOC_AARCH64_ADR_HI21_PCREL:
7909    case BFD_RELOC_AARCH64_GOT_LD_PREL19:
7910    case BFD_RELOC_AARCH64_LD32_GOT_LO12_NC:
7911    case BFD_RELOC_AARCH64_LD32_GOTPAGE_LO14:
7912    case BFD_RELOC_AARCH64_LD64_GOTOFF_LO15:
7913    case BFD_RELOC_AARCH64_LD64_GOTPAGE_LO15:
7914    case BFD_RELOC_AARCH64_LD64_GOT_LO12_NC:
7915    case BFD_RELOC_AARCH64_LDST128_LO12:
7916    case BFD_RELOC_AARCH64_LDST16_LO12:
7917    case BFD_RELOC_AARCH64_LDST32_LO12:
7918    case BFD_RELOC_AARCH64_LDST64_LO12:
7919    case BFD_RELOC_AARCH64_LDST8_LO12:
7920    case BFD_RELOC_AARCH64_TLSDESC_ADD_LO12_NC:
7921    case BFD_RELOC_AARCH64_TLSDESC_ADR_PAGE21:
7922    case BFD_RELOC_AARCH64_TLSDESC_ADR_PREL21:
7923    case BFD_RELOC_AARCH64_TLSDESC_LD32_LO12_NC:
7924    case BFD_RELOC_AARCH64_TLSDESC_LD64_LO12_NC:
7925    case BFD_RELOC_AARCH64_TLSDESC_LD_PREL19:
7926    case BFD_RELOC_AARCH64_TLSDESC_OFF_G0_NC:
7927    case BFD_RELOC_AARCH64_TLSDESC_OFF_G1:
7928    case BFD_RELOC_AARCH64_TLSGD_ADD_LO12_NC:
7929    case BFD_RELOC_AARCH64_TLSGD_ADR_PAGE21:
7930    case BFD_RELOC_AARCH64_TLSGD_ADR_PREL21:
7931    case BFD_RELOC_AARCH64_TLSGD_MOVW_G0_NC:
7932    case BFD_RELOC_AARCH64_TLSGD_MOVW_G1:
7933    case BFD_RELOC_AARCH64_TLSIE_ADR_GOTTPREL_PAGE21:
7934    case BFD_RELOC_AARCH64_TLSIE_LD32_GOTTPREL_LO12_NC:
7935    case BFD_RELOC_AARCH64_TLSIE_LD64_GOTTPREL_LO12_NC:
7936    case BFD_RELOC_AARCH64_TLSIE_LD_GOTTPREL_PREL19:
7937    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G0_NC:
7938    case BFD_RELOC_AARCH64_TLSIE_MOVW_GOTTPREL_G1:
7939   case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_HI12:
7940    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12:
7941    case BFD_RELOC_AARCH64_TLSLD_ADD_DTPREL_LO12_NC:
7942    case BFD_RELOC_AARCH64_TLSLD_ADD_LO12_NC:
7943    case BFD_RELOC_AARCH64_TLSLD_ADR_PAGE21:
7944    case BFD_RELOC_AARCH64_TLSLD_ADR_PREL21:
7945    case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12:
7946    case BFD_RELOC_AARCH64_TLSLD_LDST16_DTPREL_LO12_NC:
7947    case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12:
7948    case BFD_RELOC_AARCH64_TLSLD_LDST32_DTPREL_LO12_NC:
7949    case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12:
7950    case BFD_RELOC_AARCH64_TLSLD_LDST64_DTPREL_LO12_NC:
7951    case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12:
7952    case BFD_RELOC_AARCH64_TLSLD_LDST8_DTPREL_LO12_NC:
7953    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0:
7954    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G0_NC:
7955    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1:
7956    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G1_NC:
7957    case BFD_RELOC_AARCH64_TLSLD_MOVW_DTPREL_G2:
7958    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_HI12:
7959    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12:
7960    case BFD_RELOC_AARCH64_TLSLE_ADD_TPREL_LO12_NC:
7961    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0:
7962    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G0_NC:
7963    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1:
7964    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G1_NC:
7965    case BFD_RELOC_AARCH64_TLSLE_MOVW_TPREL_G2:
7966      /* Always leave these relocations for the linker.  */
7967      return 1;
7968
7969    default:
7970      break;
7971    }
7972
7973  return generic_force_reloc (fixp);
7974}
7975
7976#ifdef OBJ_ELF
7977
7978const char *
7979elf64_aarch64_target_format (void)
7980{
7981  if (strcmp (TARGET_OS, "cloudabi") == 0)
7982    {
7983      /* FIXME: What to do for ilp32_p ?  */
7984      return target_big_endian ? "elf64-bigaarch64-cloudabi" : "elf64-littleaarch64-cloudabi";
7985    }
7986  if (target_big_endian)
7987    return ilp32_p ? "elf32-bigaarch64" : "elf64-bigaarch64";
7988  else
7989    return ilp32_p ? "elf32-littleaarch64" : "elf64-littleaarch64";
7990}
7991
7992void
7993aarch64elf_frob_symbol (symbolS * symp, int *puntp)
7994{
7995  elf_frob_symbol (symp, puntp);
7996}
7997#endif
7998
7999/* MD interface: Finalization.	*/
8000
8001/* A good place to do this, although this was probably not intended
8002   for this kind of use.  We need to dump the literal pool before
8003   references are made to a null symbol pointer.  */
8004
8005void
8006aarch64_cleanup (void)
8007{
8008  literal_pool *pool;
8009
8010  for (pool = list_of_pools; pool; pool = pool->next)
8011    {
8012      /* Put it at the end of the relevant section.  */
8013      subseg_set (pool->section, pool->sub_section);
8014      s_ltorg (0);
8015    }
8016}
8017
8018#ifdef OBJ_ELF
8019/* Remove any excess mapping symbols generated for alignment frags in
8020   SEC.  We may have created a mapping symbol before a zero byte
8021   alignment; remove it if there's a mapping symbol after the
8022   alignment.  */
8023static void
8024check_mapping_symbols (bfd * abfd ATTRIBUTE_UNUSED, asection * sec,
8025		       void *dummy ATTRIBUTE_UNUSED)
8026{
8027  segment_info_type *seginfo = seg_info (sec);
8028  fragS *fragp;
8029
8030  if (seginfo == NULL || seginfo->frchainP == NULL)
8031    return;
8032
8033  for (fragp = seginfo->frchainP->frch_root;
8034       fragp != NULL; fragp = fragp->fr_next)
8035    {
8036      symbolS *sym = fragp->tc_frag_data.last_map;
8037      fragS *next = fragp->fr_next;
8038
8039      /* Variable-sized frags have been converted to fixed size by
8040         this point.  But if this was variable-sized to start with,
8041         there will be a fixed-size frag after it.  So don't handle
8042         next == NULL.  */
8043      if (sym == NULL || next == NULL)
8044	continue;
8045
8046      if (S_GET_VALUE (sym) < next->fr_address)
8047	/* Not at the end of this frag.  */
8048	continue;
8049      know (S_GET_VALUE (sym) == next->fr_address);
8050
8051      do
8052	{
8053	  if (next->tc_frag_data.first_map != NULL)
8054	    {
8055	      /* Next frag starts with a mapping symbol.  Discard this
8056	         one.  */
8057	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8058	      break;
8059	    }
8060
8061	  if (next->fr_next == NULL)
8062	    {
8063	      /* This mapping symbol is at the end of the section.  Discard
8064	         it.  */
8065	      know (next->fr_fix == 0 && next->fr_var == 0);
8066	      symbol_remove (sym, &symbol_rootP, &symbol_lastP);
8067	      break;
8068	    }
8069
8070	  /* As long as we have empty frags without any mapping symbols,
8071	     keep looking.  */
8072	  /* If the next frag is non-empty and does not start with a
8073	     mapping symbol, then this mapping symbol is required.  */
8074	  if (next->fr_address != next->fr_next->fr_address)
8075	    break;
8076
8077	  next = next->fr_next;
8078	}
8079      while (next != NULL);
8080    }
8081}
8082#endif
8083
8084/* Adjust the symbol table.  */
8085
8086void
8087aarch64_adjust_symtab (void)
8088{
8089#ifdef OBJ_ELF
8090  /* Remove any overlapping mapping symbols generated by alignment frags.  */
8091  bfd_map_over_sections (stdoutput, check_mapping_symbols, (char *) 0);
8092  /* Now do generic ELF adjustments.  */
8093  elf_adjust_symtab ();
8094#endif
8095}
8096
8097static void
8098checked_hash_insert (struct hash_control *table, const char *key, void *value)
8099{
8100  const char *hash_err;
8101
8102  hash_err = hash_insert (table, key, value);
8103  if (hash_err)
8104    printf ("Internal Error:  Can't hash %s\n", key);
8105}
8106
8107static void
8108fill_instruction_hash_table (void)
8109{
8110  aarch64_opcode *opcode = aarch64_opcode_table;
8111
8112  while (opcode->name != NULL)
8113    {
8114      templates *templ, *new_templ;
8115      templ = hash_find (aarch64_ops_hsh, opcode->name);
8116
8117      new_templ = XNEW (templates);
8118      new_templ->opcode = opcode;
8119      new_templ->next = NULL;
8120
8121      if (!templ)
8122	checked_hash_insert (aarch64_ops_hsh, opcode->name, (void *) new_templ);
8123      else
8124	{
8125	  new_templ->next = templ->next;
8126	  templ->next = new_templ;
8127	}
8128      ++opcode;
8129    }
8130}
8131
8132static inline void
8133convert_to_upper (char *dst, const char *src, size_t num)
8134{
8135  unsigned int i;
8136  for (i = 0; i < num && *src != '\0'; ++i, ++dst, ++src)
8137    *dst = TOUPPER (*src);
8138  *dst = '\0';
8139}
8140
8141/* Assume STR point to a lower-case string, allocate, convert and return
8142   the corresponding upper-case string.  */
8143static inline const char*
8144get_upper_str (const char *str)
8145{
8146  char *ret;
8147  size_t len = strlen (str);
8148  ret = XNEWVEC (char, len + 1);
8149  convert_to_upper (ret, str, len);
8150  return ret;
8151}
8152
8153/* MD interface: Initialization.  */
8154
8155void
8156md_begin (void)
8157{
8158  unsigned mach;
8159  unsigned int i;
8160
8161  if ((aarch64_ops_hsh = hash_new ()) == NULL
8162      || (aarch64_cond_hsh = hash_new ()) == NULL
8163      || (aarch64_shift_hsh = hash_new ()) == NULL
8164      || (aarch64_sys_regs_hsh = hash_new ()) == NULL
8165      || (aarch64_pstatefield_hsh = hash_new ()) == NULL
8166      || (aarch64_sys_regs_ic_hsh = hash_new ()) == NULL
8167      || (aarch64_sys_regs_dc_hsh = hash_new ()) == NULL
8168      || (aarch64_sys_regs_at_hsh = hash_new ()) == NULL
8169      || (aarch64_sys_regs_tlbi_hsh = hash_new ()) == NULL
8170      || (aarch64_reg_hsh = hash_new ()) == NULL
8171      || (aarch64_barrier_opt_hsh = hash_new ()) == NULL
8172      || (aarch64_nzcv_hsh = hash_new ()) == NULL
8173      || (aarch64_pldop_hsh = hash_new ()) == NULL
8174      || (aarch64_hint_opt_hsh = hash_new ()) == NULL)
8175    as_fatal (_("virtual memory exhausted"));
8176
8177  fill_instruction_hash_table ();
8178
8179  for (i = 0; aarch64_sys_regs[i].name != NULL; ++i)
8180    checked_hash_insert (aarch64_sys_regs_hsh, aarch64_sys_regs[i].name,
8181			 (void *) (aarch64_sys_regs + i));
8182
8183  for (i = 0; aarch64_pstatefields[i].name != NULL; ++i)
8184    checked_hash_insert (aarch64_pstatefield_hsh,
8185			 aarch64_pstatefields[i].name,
8186			 (void *) (aarch64_pstatefields + i));
8187
8188  for (i = 0; aarch64_sys_regs_ic[i].name != NULL; i++)
8189    checked_hash_insert (aarch64_sys_regs_ic_hsh,
8190			 aarch64_sys_regs_ic[i].name,
8191			 (void *) (aarch64_sys_regs_ic + i));
8192
8193  for (i = 0; aarch64_sys_regs_dc[i].name != NULL; i++)
8194    checked_hash_insert (aarch64_sys_regs_dc_hsh,
8195			 aarch64_sys_regs_dc[i].name,
8196			 (void *) (aarch64_sys_regs_dc + i));
8197
8198  for (i = 0; aarch64_sys_regs_at[i].name != NULL; i++)
8199    checked_hash_insert (aarch64_sys_regs_at_hsh,
8200			 aarch64_sys_regs_at[i].name,
8201			 (void *) (aarch64_sys_regs_at + i));
8202
8203  for (i = 0; aarch64_sys_regs_tlbi[i].name != NULL; i++)
8204    checked_hash_insert (aarch64_sys_regs_tlbi_hsh,
8205			 aarch64_sys_regs_tlbi[i].name,
8206			 (void *) (aarch64_sys_regs_tlbi + i));
8207
8208  for (i = 0; i < ARRAY_SIZE (reg_names); i++)
8209    checked_hash_insert (aarch64_reg_hsh, reg_names[i].name,
8210			 (void *) (reg_names + i));
8211
8212  for (i = 0; i < ARRAY_SIZE (nzcv_names); i++)
8213    checked_hash_insert (aarch64_nzcv_hsh, nzcv_names[i].template,
8214			 (void *) (nzcv_names + i));
8215
8216  for (i = 0; aarch64_operand_modifiers[i].name != NULL; i++)
8217    {
8218      const char *name = aarch64_operand_modifiers[i].name;
8219      checked_hash_insert (aarch64_shift_hsh, name,
8220			   (void *) (aarch64_operand_modifiers + i));
8221      /* Also hash the name in the upper case.  */
8222      checked_hash_insert (aarch64_shift_hsh, get_upper_str (name),
8223			   (void *) (aarch64_operand_modifiers + i));
8224    }
8225
8226  for (i = 0; i < ARRAY_SIZE (aarch64_conds); i++)
8227    {
8228      unsigned int j;
8229      /* A condition code may have alias(es), e.g. "cc", "lo" and "ul" are
8230	 the same condition code.  */
8231      for (j = 0; j < ARRAY_SIZE (aarch64_conds[i].names); ++j)
8232	{
8233	  const char *name = aarch64_conds[i].names[j];
8234	  if (name == NULL)
8235	    break;
8236	  checked_hash_insert (aarch64_cond_hsh, name,
8237			       (void *) (aarch64_conds + i));
8238	  /* Also hash the name in the upper case.  */
8239	  checked_hash_insert (aarch64_cond_hsh, get_upper_str (name),
8240			       (void *) (aarch64_conds + i));
8241	}
8242    }
8243
8244  for (i = 0; i < ARRAY_SIZE (aarch64_barrier_options); i++)
8245    {
8246      const char *name = aarch64_barrier_options[i].name;
8247      /* Skip xx00 - the unallocated values of option.  */
8248      if ((i & 0x3) == 0)
8249	continue;
8250      checked_hash_insert (aarch64_barrier_opt_hsh, name,
8251			   (void *) (aarch64_barrier_options + i));
8252      /* Also hash the name in the upper case.  */
8253      checked_hash_insert (aarch64_barrier_opt_hsh, get_upper_str (name),
8254			   (void *) (aarch64_barrier_options + i));
8255    }
8256
8257  for (i = 0; i < ARRAY_SIZE (aarch64_prfops); i++)
8258    {
8259      const char* name = aarch64_prfops[i].name;
8260      /* Skip the unallocated hint encodings.  */
8261      if (name == NULL)
8262	continue;
8263      checked_hash_insert (aarch64_pldop_hsh, name,
8264			   (void *) (aarch64_prfops + i));
8265      /* Also hash the name in the upper case.  */
8266      checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8267			   (void *) (aarch64_prfops + i));
8268    }
8269
8270  for (i = 0; aarch64_hint_options[i].name != NULL; i++)
8271    {
8272      const char* name = aarch64_hint_options[i].name;
8273
8274      checked_hash_insert (aarch64_hint_opt_hsh, name,
8275			   (void *) (aarch64_hint_options + i));
8276      /* Also hash the name in the upper case.  */
8277      checked_hash_insert (aarch64_pldop_hsh, get_upper_str (name),
8278			   (void *) (aarch64_hint_options + i));
8279    }
8280
8281  /* Set the cpu variant based on the command-line options.  */
8282  if (!mcpu_cpu_opt)
8283    mcpu_cpu_opt = march_cpu_opt;
8284
8285  if (!mcpu_cpu_opt)
8286    mcpu_cpu_opt = &cpu_default;
8287
8288  cpu_variant = *mcpu_cpu_opt;
8289
8290  /* Record the CPU type.  */
8291  mach = ilp32_p ? bfd_mach_aarch64_ilp32 : bfd_mach_aarch64;
8292
8293  bfd_set_arch_mach (stdoutput, TARGET_ARCH, mach);
8294}
8295
8296/* Command line processing.  */
8297
8298const char *md_shortopts = "m:";
8299
8300#ifdef AARCH64_BI_ENDIAN
8301#define OPTION_EB (OPTION_MD_BASE + 0)
8302#define OPTION_EL (OPTION_MD_BASE + 1)
8303#else
8304#if TARGET_BYTES_BIG_ENDIAN
8305#define OPTION_EB (OPTION_MD_BASE + 0)
8306#else
8307#define OPTION_EL (OPTION_MD_BASE + 1)
8308#endif
8309#endif
8310
8311struct option md_longopts[] = {
8312#ifdef OPTION_EB
8313  {"EB", no_argument, NULL, OPTION_EB},
8314#endif
8315#ifdef OPTION_EL
8316  {"EL", no_argument, NULL, OPTION_EL},
8317#endif
8318  {NULL, no_argument, NULL, 0}
8319};
8320
8321size_t md_longopts_size = sizeof (md_longopts);
8322
8323struct aarch64_option_table
8324{
8325  const char *option;			/* Option name to match.  */
8326  const char *help;			/* Help information.  */
8327  int *var;			/* Variable to change.  */
8328  int value;			/* What to change it to.  */
8329  char *deprecated;		/* If non-null, print this message.  */
8330};
8331
8332static struct aarch64_option_table aarch64_opts[] = {
8333  {"mbig-endian", N_("assemble for big-endian"), &target_big_endian, 1, NULL},
8334  {"mlittle-endian", N_("assemble for little-endian"), &target_big_endian, 0,
8335   NULL},
8336#ifdef DEBUG_AARCH64
8337  {"mdebug-dump", N_("temporary switch for dumping"), &debug_dump, 1, NULL},
8338#endif /* DEBUG_AARCH64 */
8339  {"mverbose-error", N_("output verbose error messages"), &verbose_error_p, 1,
8340   NULL},
8341  {"mno-verbose-error", N_("do not output verbose error messages"),
8342   &verbose_error_p, 0, NULL},
8343  {NULL, NULL, NULL, 0, NULL}
8344};
8345
8346struct aarch64_cpu_option_table
8347{
8348  const char *name;
8349  const aarch64_feature_set value;
8350  /* The canonical name of the CPU, or NULL to use NAME converted to upper
8351     case.  */
8352  const char *canonical_name;
8353};
8354
8355/* This list should, at a minimum, contain all the cpu names
8356   recognized by GCC.  */
8357static const struct aarch64_cpu_option_table aarch64_cpus[] = {
8358  {"all", AARCH64_ANY, NULL},
8359  {"cortex-a35", AARCH64_FEATURE (AARCH64_ARCH_V8,
8360				  AARCH64_FEATURE_CRC), "Cortex-A35"},
8361  {"cortex-a53", AARCH64_FEATURE (AARCH64_ARCH_V8,
8362				  AARCH64_FEATURE_CRC), "Cortex-A53"},
8363  {"cortex-a57", AARCH64_FEATURE (AARCH64_ARCH_V8,
8364				  AARCH64_FEATURE_CRC), "Cortex-A57"},
8365  {"cortex-a72", AARCH64_FEATURE (AARCH64_ARCH_V8,
8366				  AARCH64_FEATURE_CRC), "Cortex-A72"},
8367  {"cortex-a73", AARCH64_FEATURE (AARCH64_ARCH_V8,
8368				  AARCH64_FEATURE_CRC), "Cortex-A73"},
8369  {"exynos-m1", AARCH64_FEATURE (AARCH64_ARCH_V8,
8370				 AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8371				"Samsung Exynos M1"},
8372  {"falkor", AARCH64_FEATURE (AARCH64_ARCH_V8,
8373			      AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8374   "Qualcomm Falkor"},
8375  {"qdf24xx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8376			       AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8377   "Qualcomm QDF24XX"},
8378  {"thunderx", AARCH64_FEATURE (AARCH64_ARCH_V8,
8379				AARCH64_FEATURE_CRC | AARCH64_FEATURE_CRYPTO),
8380   "Cavium ThunderX"},
8381  {"vulcan", AARCH64_FEATURE (AARCH64_ARCH_V8_1,
8382			      AARCH64_FEATURE_CRYPTO),
8383  "Broadcom Vulcan"},
8384  /* The 'xgene-1' name is an older name for 'xgene1', which was used
8385     in earlier releases and is superseded by 'xgene1' in all
8386     tools.  */
8387  {"xgene-1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8388  {"xgene1", AARCH64_ARCH_V8, "APM X-Gene 1"},
8389  {"xgene2", AARCH64_FEATURE (AARCH64_ARCH_V8,
8390			      AARCH64_FEATURE_CRC), "APM X-Gene 2"},
8391  {"generic", AARCH64_ARCH_V8, NULL},
8392
8393  {NULL, AARCH64_ARCH_NONE, NULL}
8394};
8395
8396struct aarch64_arch_option_table
8397{
8398  const char *name;
8399  const aarch64_feature_set value;
8400};
8401
8402/* This list should, at a minimum, contain all the architecture names
8403   recognized by GCC.  */
8404static const struct aarch64_arch_option_table aarch64_archs[] = {
8405  {"all", AARCH64_ANY},
8406  {"armv8-a", AARCH64_ARCH_V8},
8407  {"armv8.1-a", AARCH64_ARCH_V8_1},
8408  {"armv8.2-a", AARCH64_ARCH_V8_2},
8409  {"armv8.3-a", AARCH64_ARCH_V8_3},
8410  {NULL, AARCH64_ARCH_NONE}
8411};
8412
8413/* ISA extensions.  */
8414struct aarch64_option_cpu_value_table
8415{
8416  const char *name;
8417  const aarch64_feature_set value;
8418  const aarch64_feature_set require; /* Feature dependencies.  */
8419};
8420
8421static const struct aarch64_option_cpu_value_table aarch64_features[] = {
8422  {"crc",		AARCH64_FEATURE (AARCH64_FEATURE_CRC, 0),
8423			AARCH64_ARCH_NONE},
8424  {"crypto",		AARCH64_FEATURE (AARCH64_FEATURE_CRYPTO, 0),
8425			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8426  {"fp",		AARCH64_FEATURE (AARCH64_FEATURE_FP, 0),
8427			AARCH64_ARCH_NONE},
8428  {"lse",		AARCH64_FEATURE (AARCH64_FEATURE_LSE, 0),
8429			AARCH64_ARCH_NONE},
8430  {"simd",		AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0),
8431			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8432  {"pan",		AARCH64_FEATURE (AARCH64_FEATURE_PAN, 0),
8433			AARCH64_ARCH_NONE},
8434  {"lor",		AARCH64_FEATURE (AARCH64_FEATURE_LOR, 0),
8435			AARCH64_ARCH_NONE},
8436  {"ras",		AARCH64_FEATURE (AARCH64_FEATURE_RAS, 0),
8437			AARCH64_ARCH_NONE},
8438  {"rdma",		AARCH64_FEATURE (AARCH64_FEATURE_RDMA, 0),
8439			AARCH64_FEATURE (AARCH64_FEATURE_SIMD, 0)},
8440  {"fp16",		AARCH64_FEATURE (AARCH64_FEATURE_F16, 0),
8441			AARCH64_FEATURE (AARCH64_FEATURE_FP, 0)},
8442  {"profile",		AARCH64_FEATURE (AARCH64_FEATURE_PROFILE, 0),
8443			AARCH64_ARCH_NONE},
8444  {"sve",		AARCH64_FEATURE (AARCH64_FEATURE_SVE, 0),
8445			AARCH64_FEATURE (AARCH64_FEATURE_F16
8446					 | AARCH64_FEATURE_SIMD
8447					 | AARCH64_FEATURE_COMPNUM, 0)},
8448  {"compnum",		AARCH64_FEATURE (AARCH64_FEATURE_COMPNUM, 0),
8449			AARCH64_FEATURE (AARCH64_FEATURE_F16
8450					 | AARCH64_FEATURE_SIMD, 0)},
8451  {"rcpc",		AARCH64_FEATURE (AARCH64_FEATURE_RCPC, 0),
8452			AARCH64_ARCH_NONE},
8453  {NULL,		AARCH64_ARCH_NONE, AARCH64_ARCH_NONE},
8454};
8455
8456struct aarch64_long_option_table
8457{
8458  const char *option;			/* Substring to match.  */
8459  const char *help;			/* Help information.  */
8460  int (*func) (const char *subopt);	/* Function to decode sub-option.  */
8461  char *deprecated;		/* If non-null, print this message.  */
8462};
8463
8464/* Transitive closure of features depending on set.  */
8465static aarch64_feature_set
8466aarch64_feature_disable_set (aarch64_feature_set set)
8467{
8468  const struct aarch64_option_cpu_value_table *opt;
8469  aarch64_feature_set prev = 0;
8470
8471  while (prev != set) {
8472    prev = set;
8473    for (opt = aarch64_features; opt->name != NULL; opt++)
8474      if (AARCH64_CPU_HAS_ANY_FEATURES (opt->require, set))
8475        AARCH64_MERGE_FEATURE_SETS (set, set, opt->value);
8476  }
8477  return set;
8478}
8479
8480/* Transitive closure of dependencies of set.  */
8481static aarch64_feature_set
8482aarch64_feature_enable_set (aarch64_feature_set set)
8483{
8484  const struct aarch64_option_cpu_value_table *opt;
8485  aarch64_feature_set prev = 0;
8486
8487  while (prev != set) {
8488    prev = set;
8489    for (opt = aarch64_features; opt->name != NULL; opt++)
8490      if (AARCH64_CPU_HAS_FEATURE (set, opt->value))
8491        AARCH64_MERGE_FEATURE_SETS (set, set, opt->require);
8492  }
8493  return set;
8494}
8495
8496static int
8497aarch64_parse_features (const char *str, const aarch64_feature_set **opt_p,
8498			bfd_boolean ext_only)
8499{
8500  /* We insist on extensions being added before being removed.  We achieve
8501     this by using the ADDING_VALUE variable to indicate whether we are
8502     adding an extension (1) or removing it (0) and only allowing it to
8503     change in the order -1 -> 1 -> 0.  */
8504  int adding_value = -1;
8505  aarch64_feature_set *ext_set = XNEW (aarch64_feature_set);
8506
8507  /* Copy the feature set, so that we can modify it.  */
8508  *ext_set = **opt_p;
8509  *opt_p = ext_set;
8510
8511  while (str != NULL && *str != 0)
8512    {
8513      const struct aarch64_option_cpu_value_table *opt;
8514      const char *ext = NULL;
8515      int optlen;
8516
8517      if (!ext_only)
8518	{
8519	  if (*str != '+')
8520	    {
8521	      as_bad (_("invalid architectural extension"));
8522	      return 0;
8523	    }
8524
8525	  ext = strchr (++str, '+');
8526	}
8527
8528      if (ext != NULL)
8529	optlen = ext - str;
8530      else
8531	optlen = strlen (str);
8532
8533      if (optlen >= 2 && strncmp (str, "no", 2) == 0)
8534	{
8535	  if (adding_value != 0)
8536	    adding_value = 0;
8537	  optlen -= 2;
8538	  str += 2;
8539	}
8540      else if (optlen > 0)
8541	{
8542	  if (adding_value == -1)
8543	    adding_value = 1;
8544	  else if (adding_value != 1)
8545	    {
8546	      as_bad (_("must specify extensions to add before specifying "
8547			"those to remove"));
8548	      return FALSE;
8549	    }
8550	}
8551
8552      if (optlen == 0)
8553	{
8554	  as_bad (_("missing architectural extension"));
8555	  return 0;
8556	}
8557
8558      gas_assert (adding_value != -1);
8559
8560      for (opt = aarch64_features; opt->name != NULL; opt++)
8561	if (strncmp (opt->name, str, optlen) == 0)
8562	  {
8563	    aarch64_feature_set set;
8564
8565	    /* Add or remove the extension.  */
8566	    if (adding_value)
8567	      {
8568		set = aarch64_feature_enable_set (opt->value);
8569		AARCH64_MERGE_FEATURE_SETS (*ext_set, *ext_set, set);
8570	      }
8571	    else
8572	      {
8573		set = aarch64_feature_disable_set (opt->value);
8574		AARCH64_CLEAR_FEATURE (*ext_set, *ext_set, set);
8575	      }
8576	    break;
8577	  }
8578
8579      if (opt->name == NULL)
8580	{
8581	  as_bad (_("unknown architectural extension `%s'"), str);
8582	  return 0;
8583	}
8584
8585      str = ext;
8586    };
8587
8588  return 1;
8589}
8590
8591static int
8592aarch64_parse_cpu (const char *str)
8593{
8594  const struct aarch64_cpu_option_table *opt;
8595  const char *ext = strchr (str, '+');
8596  size_t optlen;
8597
8598  if (ext != NULL)
8599    optlen = ext - str;
8600  else
8601    optlen = strlen (str);
8602
8603  if (optlen == 0)
8604    {
8605      as_bad (_("missing cpu name `%s'"), str);
8606      return 0;
8607    }
8608
8609  for (opt = aarch64_cpus; opt->name != NULL; opt++)
8610    if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8611      {
8612	mcpu_cpu_opt = &opt->value;
8613	if (ext != NULL)
8614	  return aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE);
8615
8616	return 1;
8617      }
8618
8619  as_bad (_("unknown cpu `%s'"), str);
8620  return 0;
8621}
8622
8623static int
8624aarch64_parse_arch (const char *str)
8625{
8626  const struct aarch64_arch_option_table *opt;
8627  const char *ext = strchr (str, '+');
8628  size_t optlen;
8629
8630  if (ext != NULL)
8631    optlen = ext - str;
8632  else
8633    optlen = strlen (str);
8634
8635  if (optlen == 0)
8636    {
8637      as_bad (_("missing architecture name `%s'"), str);
8638      return 0;
8639    }
8640
8641  for (opt = aarch64_archs; opt->name != NULL; opt++)
8642    if (strlen (opt->name) == optlen && strncmp (str, opt->name, optlen) == 0)
8643      {
8644	march_cpu_opt = &opt->value;
8645	if (ext != NULL)
8646	  return aarch64_parse_features (ext, &march_cpu_opt, FALSE);
8647
8648	return 1;
8649      }
8650
8651  as_bad (_("unknown architecture `%s'\n"), str);
8652  return 0;
8653}
8654
8655/* ABIs.  */
8656struct aarch64_option_abi_value_table
8657{
8658  const char *name;
8659  enum aarch64_abi_type value;
8660};
8661
8662static const struct aarch64_option_abi_value_table aarch64_abis[] = {
8663  {"ilp32",		AARCH64_ABI_ILP32},
8664  {"lp64",		AARCH64_ABI_LP64},
8665};
8666
8667static int
8668aarch64_parse_abi (const char *str)
8669{
8670  unsigned int i;
8671
8672  if (str[0] == '\0')
8673    {
8674      as_bad (_("missing abi name `%s'"), str);
8675      return 0;
8676    }
8677
8678  for (i = 0; i < ARRAY_SIZE (aarch64_abis); i++)
8679    if (strcmp (str, aarch64_abis[i].name) == 0)
8680      {
8681	aarch64_abi = aarch64_abis[i].value;
8682	return 1;
8683      }
8684
8685  as_bad (_("unknown abi `%s'\n"), str);
8686  return 0;
8687}
8688
8689static struct aarch64_long_option_table aarch64_long_opts[] = {
8690#ifdef OBJ_ELF
8691  {"mabi=", N_("<abi name>\t  specify for ABI <abi name>"),
8692   aarch64_parse_abi, NULL},
8693#endif /* OBJ_ELF */
8694  {"mcpu=", N_("<cpu name>\t  assemble for CPU <cpu name>"),
8695   aarch64_parse_cpu, NULL},
8696  {"march=", N_("<arch name>\t  assemble for architecture <arch name>"),
8697   aarch64_parse_arch, NULL},
8698  {NULL, NULL, 0, NULL}
8699};
8700
8701int
8702md_parse_option (int c, const char *arg)
8703{
8704  struct aarch64_option_table *opt;
8705  struct aarch64_long_option_table *lopt;
8706
8707  switch (c)
8708    {
8709#ifdef OPTION_EB
8710    case OPTION_EB:
8711      target_big_endian = 1;
8712      break;
8713#endif
8714
8715#ifdef OPTION_EL
8716    case OPTION_EL:
8717      target_big_endian = 0;
8718      break;
8719#endif
8720
8721    case 'a':
8722      /* Listing option.  Just ignore these, we don't support additional
8723         ones.  */
8724      return 0;
8725
8726    default:
8727      for (opt = aarch64_opts; opt->option != NULL; opt++)
8728	{
8729	  if (c == opt->option[0]
8730	      && ((arg == NULL && opt->option[1] == 0)
8731		  || streq (arg, opt->option + 1)))
8732	    {
8733	      /* If the option is deprecated, tell the user.  */
8734	      if (opt->deprecated != NULL)
8735		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c,
8736			   arg ? arg : "", _(opt->deprecated));
8737
8738	      if (opt->var != NULL)
8739		*opt->var = opt->value;
8740
8741	      return 1;
8742	    }
8743	}
8744
8745      for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8746	{
8747	  /* These options are expected to have an argument.  */
8748	  if (c == lopt->option[0]
8749	      && arg != NULL
8750	      && strncmp (arg, lopt->option + 1,
8751			  strlen (lopt->option + 1)) == 0)
8752	    {
8753	      /* If the option is deprecated, tell the user.  */
8754	      if (lopt->deprecated != NULL)
8755		as_tsktsk (_("option `-%c%s' is deprecated: %s"), c, arg,
8756			   _(lopt->deprecated));
8757
8758	      /* Call the sup-option parser.  */
8759	      return lopt->func (arg + strlen (lopt->option) - 1);
8760	    }
8761	}
8762
8763      return 0;
8764    }
8765
8766  return 1;
8767}
8768
8769void
8770md_show_usage (FILE * fp)
8771{
8772  struct aarch64_option_table *opt;
8773  struct aarch64_long_option_table *lopt;
8774
8775  fprintf (fp, _(" AArch64-specific assembler options:\n"));
8776
8777  for (opt = aarch64_opts; opt->option != NULL; opt++)
8778    if (opt->help != NULL)
8779      fprintf (fp, "  -%-23s%s\n", opt->option, _(opt->help));
8780
8781  for (lopt = aarch64_long_opts; lopt->option != NULL; lopt++)
8782    if (lopt->help != NULL)
8783      fprintf (fp, "  -%s%s\n", lopt->option, _(lopt->help));
8784
8785#ifdef OPTION_EB
8786  fprintf (fp, _("\
8787  -EB                     assemble code for a big-endian cpu\n"));
8788#endif
8789
8790#ifdef OPTION_EL
8791  fprintf (fp, _("\
8792  -EL                     assemble code for a little-endian cpu\n"));
8793#endif
8794}
8795
8796/* Parse a .cpu directive.  */
8797
8798static void
8799s_aarch64_cpu (int ignored ATTRIBUTE_UNUSED)
8800{
8801  const struct aarch64_cpu_option_table *opt;
8802  char saved_char;
8803  char *name;
8804  char *ext;
8805  size_t optlen;
8806
8807  name = input_line_pointer;
8808  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8809    input_line_pointer++;
8810  saved_char = *input_line_pointer;
8811  *input_line_pointer = 0;
8812
8813  ext = strchr (name, '+');
8814
8815  if (ext != NULL)
8816    optlen = ext - name;
8817  else
8818    optlen = strlen (name);
8819
8820  /* Skip the first "all" entry.  */
8821  for (opt = aarch64_cpus + 1; opt->name != NULL; opt++)
8822    if (strlen (opt->name) == optlen
8823	&& strncmp (name, opt->name, optlen) == 0)
8824      {
8825	mcpu_cpu_opt = &opt->value;
8826	if (ext != NULL)
8827	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8828	    return;
8829
8830	cpu_variant = *mcpu_cpu_opt;
8831
8832	*input_line_pointer = saved_char;
8833	demand_empty_rest_of_line ();
8834	return;
8835      }
8836  as_bad (_("unknown cpu `%s'"), name);
8837  *input_line_pointer = saved_char;
8838  ignore_rest_of_line ();
8839}
8840
8841
8842/* Parse a .arch directive.  */
8843
8844static void
8845s_aarch64_arch (int ignored ATTRIBUTE_UNUSED)
8846{
8847  const struct aarch64_arch_option_table *opt;
8848  char saved_char;
8849  char *name;
8850  char *ext;
8851  size_t optlen;
8852
8853  name = input_line_pointer;
8854  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8855    input_line_pointer++;
8856  saved_char = *input_line_pointer;
8857  *input_line_pointer = 0;
8858
8859  ext = strchr (name, '+');
8860
8861  if (ext != NULL)
8862    optlen = ext - name;
8863  else
8864    optlen = strlen (name);
8865
8866  /* Skip the first "all" entry.  */
8867  for (opt = aarch64_archs + 1; opt->name != NULL; opt++)
8868    if (strlen (opt->name) == optlen
8869	&& strncmp (name, opt->name, optlen) == 0)
8870      {
8871	mcpu_cpu_opt = &opt->value;
8872	if (ext != NULL)
8873	  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, FALSE))
8874	    return;
8875
8876	cpu_variant = *mcpu_cpu_opt;
8877
8878	*input_line_pointer = saved_char;
8879	demand_empty_rest_of_line ();
8880	return;
8881      }
8882
8883  as_bad (_("unknown architecture `%s'\n"), name);
8884  *input_line_pointer = saved_char;
8885  ignore_rest_of_line ();
8886}
8887
8888/* Parse a .arch_extension directive.  */
8889
8890static void
8891s_aarch64_arch_extension (int ignored ATTRIBUTE_UNUSED)
8892{
8893  char saved_char;
8894  char *ext = input_line_pointer;;
8895
8896  while (*input_line_pointer && !ISSPACE (*input_line_pointer))
8897    input_line_pointer++;
8898  saved_char = *input_line_pointer;
8899  *input_line_pointer = 0;
8900
8901  if (!aarch64_parse_features (ext, &mcpu_cpu_opt, TRUE))
8902    return;
8903
8904  cpu_variant = *mcpu_cpu_opt;
8905
8906  *input_line_pointer = saved_char;
8907  demand_empty_rest_of_line ();
8908}
8909
8910/* Copy symbol information.  */
8911
8912void
8913aarch64_copy_symbol_attributes (symbolS * dest, symbolS * src)
8914{
8915  AARCH64_GET_FLAG (dest) = AARCH64_GET_FLAG (src);
8916}
8917