aarch64-opc.c revision 1.1.1.3
1/* aarch64-opc.c -- AArch64 opcode support.
2   Copyright (C) 2009-2015 Free Software Foundation, Inc.
3   Contributed by ARM Ltd.
4
5   This file is part of the GNU opcodes library.
6
7   This library is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   It is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30
31#include "aarch64-opc.h"
32
33#ifdef DEBUG_AARCH64
34int debug_dump = FALSE;
35#endif /* DEBUG_AARCH64 */
36
37/* Helper functions to determine which operand to be used to encode/decode
38   the size:Q fields for AdvSIMD instructions.  */
39
40static inline bfd_boolean
41vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
42{
43  return ((qualifier >= AARCH64_OPND_QLF_V_8B
44	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
45	  : FALSE);
46}
47
48static inline bfd_boolean
49fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
50{
51  return ((qualifier >= AARCH64_OPND_QLF_S_B
52	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
53	  : FALSE);
54}
55
56enum data_pattern
57{
58  DP_UNKNOWN,
59  DP_VECTOR_3SAME,
60  DP_VECTOR_LONG,
61  DP_VECTOR_WIDE,
62  DP_VECTOR_ACROSS_LANES,
63};
64
65static const char significant_operand_index [] =
66{
67  0,	/* DP_UNKNOWN, by default using operand 0.  */
68  0,	/* DP_VECTOR_3SAME */
69  1,	/* DP_VECTOR_LONG */
70  2,	/* DP_VECTOR_WIDE */
71  1,	/* DP_VECTOR_ACROSS_LANES */
72};
73
74/* Given a sequence of qualifiers in QUALIFIERS, determine and return
75   the data pattern.
76   N.B. QUALIFIERS is a possible sequence of qualifiers each of which
77   corresponds to one of a sequence of operands.  */
78
79static enum data_pattern
80get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
81{
82  if (vector_qualifier_p (qualifiers[0]) == TRUE)
83    {
84      /* e.g. v.4s, v.4s, v.4s
85	   or v.4h, v.4h, v.h[3].  */
86      if (qualifiers[0] == qualifiers[1]
87	  && vector_qualifier_p (qualifiers[2]) == TRUE
88	  && (aarch64_get_qualifier_esize (qualifiers[0])
89	      == aarch64_get_qualifier_esize (qualifiers[1]))
90	  && (aarch64_get_qualifier_esize (qualifiers[0])
91	      == aarch64_get_qualifier_esize (qualifiers[2])))
92	return DP_VECTOR_3SAME;
93      /* e.g. v.8h, v.8b, v.8b.
94           or v.4s, v.4h, v.h[2].
95	   or v.8h, v.16b.  */
96      if (vector_qualifier_p (qualifiers[1]) == TRUE
97	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
98	  && (aarch64_get_qualifier_esize (qualifiers[0])
99	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
100	return DP_VECTOR_LONG;
101      /* e.g. v.8h, v.8h, v.8b.  */
102      if (qualifiers[0] == qualifiers[1]
103	  && vector_qualifier_p (qualifiers[2]) == TRUE
104	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
105	  && (aarch64_get_qualifier_esize (qualifiers[0])
106	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
107	  && (aarch64_get_qualifier_esize (qualifiers[0])
108	      == aarch64_get_qualifier_esize (qualifiers[1])))
109	return DP_VECTOR_WIDE;
110    }
111  else if (fp_qualifier_p (qualifiers[0]) == TRUE)
112    {
113      /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
114      if (vector_qualifier_p (qualifiers[1]) == TRUE
115	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
116	return DP_VECTOR_ACROSS_LANES;
117    }
118
119  return DP_UNKNOWN;
120}
121
122/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
123   the AdvSIMD instructions.  */
124/* N.B. it is possible to do some optimization that doesn't call
125   get_data_pattern each time when we need to select an operand.  We can
126   either buffer the caculated the result or statically generate the data,
127   however, it is not obvious that the optimization will bring significant
128   benefit.  */
129
130int
131aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
132{
133  return
134    significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
135}
136
137const aarch64_field fields[] =
138{
139    {  0,  0 },	/* NIL.  */
140    {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
141    {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
142    {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
143    { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
144    {  5, 19 },	/* imm19: e.g. in CBZ.  */
145    {  5, 19 },	/* immhi: e.g. in ADRP.  */
146    { 29,  2 },	/* immlo: e.g. in ADRP.  */
147    { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
148    { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
149    { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
150    { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
151    {  0,  5 },	/* Rt: in load/store instructions.  */
152    {  0,  5 },	/* Rd: in many integer instructions.  */
153    {  5,  5 },	/* Rn: in many integer instructions.  */
154    { 10,  5 },	/* Rt2: in load/store pair instructions.  */
155    { 10,  5 },	/* Ra: in fp instructions.  */
156    {  5,  3 },	/* op2: in the system instructions.  */
157    {  8,  4 },	/* CRm: in the system instructions.  */
158    { 12,  4 },	/* CRn: in the system instructions.  */
159    { 16,  3 },	/* op1: in the system instructions.  */
160    { 19,  2 },	/* op0: in the system instructions.  */
161    { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
162    { 12,  4 },	/* cond: condition flags as a source operand.  */
163    { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
164    { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
165    { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
166    { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
167    { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
168    { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
169    { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
170    { 12,  1 },	/* S: in load/store reg offset instructions.  */
171    { 21,  2 },	/* hw: in move wide constant instructions.  */
172    { 22,  2 },	/* opc: in load/store reg offset instructions.  */
173    { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
174    { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
175    { 22,  2 },	/* type: floating point type field in fp data inst.  */
176    { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
177    { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
178    { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
179    { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
180    { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
181    { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
182    { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
183    { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
184    {  5, 14 },	/* imm14: in test bit and branch instructions.  */
185    {  5, 16 },	/* imm16: in exception instructions.  */
186    {  0, 26 },	/* imm26: in unconditional branch instructions.  */
187    { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
188    { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
189    { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
190    { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
191    { 22,  1 },	/* N: in logical (immediate) instructions.  */
192    { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
193    { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
194    { 31,  1 },	/* sf: in integer data processing instructions.  */
195    { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
196    { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
197    { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
198    { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
199    { 31,  1 },	/* b5: in the test bit and branch instructions.  */
200    { 19,  5 },	/* b40: in the test bit and branch instructions.  */
201    { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
202};
203
204enum aarch64_operand_class
205aarch64_get_operand_class (enum aarch64_opnd type)
206{
207  return aarch64_operands[type].op_class;
208}
209
210const char *
211aarch64_get_operand_name (enum aarch64_opnd type)
212{
213  return aarch64_operands[type].name;
214}
215
216/* Get operand description string.
217   This is usually for the diagnosis purpose.  */
218const char *
219aarch64_get_operand_desc (enum aarch64_opnd type)
220{
221  return aarch64_operands[type].desc;
222}
223
224/* Table of all conditional affixes.  */
225const aarch64_cond aarch64_conds[16] =
226{
227  {{"eq"}, 0x0},
228  {{"ne"}, 0x1},
229  {{"cs", "hs"}, 0x2},
230  {{"cc", "lo", "ul"}, 0x3},
231  {{"mi"}, 0x4},
232  {{"pl"}, 0x5},
233  {{"vs"}, 0x6},
234  {{"vc"}, 0x7},
235  {{"hi"}, 0x8},
236  {{"ls"}, 0x9},
237  {{"ge"}, 0xa},
238  {{"lt"}, 0xb},
239  {{"gt"}, 0xc},
240  {{"le"}, 0xd},
241  {{"al"}, 0xe},
242  {{"nv"}, 0xf},
243};
244
245const aarch64_cond *
246get_cond_from_value (aarch64_insn value)
247{
248  assert (value < 16);
249  return &aarch64_conds[(unsigned int) value];
250}
251
252const aarch64_cond *
253get_inverted_cond (const aarch64_cond *cond)
254{
255  return &aarch64_conds[cond->value ^ 0x1];
256}
257
258/* Table describing the operand extension/shifting operators; indexed by
259   enum aarch64_modifier_kind.
260
261   The value column provides the most common values for encoding modifiers,
262   which enables table-driven encoding/decoding for the modifiers.  */
263const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
264{
265    {"none", 0x0},
266    {"msl",  0x0},
267    {"ror",  0x3},
268    {"asr",  0x2},
269    {"lsr",  0x1},
270    {"lsl",  0x0},
271    {"uxtb", 0x0},
272    {"uxth", 0x1},
273    {"uxtw", 0x2},
274    {"uxtx", 0x3},
275    {"sxtb", 0x4},
276    {"sxth", 0x5},
277    {"sxtw", 0x6},
278    {"sxtx", 0x7},
279    {NULL, 0},
280};
281
282enum aarch64_modifier_kind
283aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
284{
285  return desc - aarch64_operand_modifiers;
286}
287
288aarch64_insn
289aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
290{
291  return aarch64_operand_modifiers[kind].value;
292}
293
294enum aarch64_modifier_kind
295aarch64_get_operand_modifier_from_value (aarch64_insn value,
296					 bfd_boolean extend_p)
297{
298  if (extend_p == TRUE)
299    return AARCH64_MOD_UXTB + value;
300  else
301    return AARCH64_MOD_LSL - value;
302}
303
304bfd_boolean
305aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
306{
307  return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
308    ? TRUE : FALSE;
309}
310
311static inline bfd_boolean
312aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
313{
314  return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
315    ? TRUE : FALSE;
316}
317
318const struct aarch64_name_value_pair aarch64_barrier_options[16] =
319{
320    { "#0x00", 0x0 },
321    { "oshld", 0x1 },
322    { "oshst", 0x2 },
323    { "osh",   0x3 },
324    { "#0x04", 0x4 },
325    { "nshld", 0x5 },
326    { "nshst", 0x6 },
327    { "nsh",   0x7 },
328    { "#0x08", 0x8 },
329    { "ishld", 0x9 },
330    { "ishst", 0xa },
331    { "ish",   0xb },
332    { "#0x0c", 0xc },
333    { "ld",    0xd },
334    { "st",    0xe },
335    { "sy",    0xf },
336};
337
338/* op -> op:       load = 0 instruction = 1 store = 2
339   l  -> level:    1-3
340   t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
341#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
342const struct aarch64_name_value_pair aarch64_prfops[32] =
343{
344  { "pldl1keep", B(0, 1, 0) },
345  { "pldl1strm", B(0, 1, 1) },
346  { "pldl2keep", B(0, 2, 0) },
347  { "pldl2strm", B(0, 2, 1) },
348  { "pldl3keep", B(0, 3, 0) },
349  { "pldl3strm", B(0, 3, 1) },
350  { NULL, 0x06 },
351  { NULL, 0x07 },
352  { "plil1keep", B(1, 1, 0) },
353  { "plil1strm", B(1, 1, 1) },
354  { "plil2keep", B(1, 2, 0) },
355  { "plil2strm", B(1, 2, 1) },
356  { "plil3keep", B(1, 3, 0) },
357  { "plil3strm", B(1, 3, 1) },
358  { NULL, 0x0e },
359  { NULL, 0x0f },
360  { "pstl1keep", B(2, 1, 0) },
361  { "pstl1strm", B(2, 1, 1) },
362  { "pstl2keep", B(2, 2, 0) },
363  { "pstl2strm", B(2, 2, 1) },
364  { "pstl3keep", B(2, 3, 0) },
365  { "pstl3strm", B(2, 3, 1) },
366  { NULL, 0x16 },
367  { NULL, 0x17 },
368  { NULL, 0x18 },
369  { NULL, 0x19 },
370  { NULL, 0x1a },
371  { NULL, 0x1b },
372  { NULL, 0x1c },
373  { NULL, 0x1d },
374  { NULL, 0x1e },
375  { NULL, 0x1f },
376};
377#undef B
378
379/* Utilities on value constraint.  */
380
381static inline int
382value_in_range_p (int64_t value, int low, int high)
383{
384  return (value >= low && value <= high) ? 1 : 0;
385}
386
387static inline int
388value_aligned_p (int64_t value, int align)
389{
390  return ((value & (align - 1)) == 0) ? 1 : 0;
391}
392
393/* A signed value fits in a field.  */
394static inline int
395value_fit_signed_field_p (int64_t value, unsigned width)
396{
397  assert (width < 32);
398  if (width < sizeof (value) * 8)
399    {
400      int64_t lim = (int64_t)1 << (width - 1);
401      if (value >= -lim && value < lim)
402	return 1;
403    }
404  return 0;
405}
406
407/* An unsigned value fits in a field.  */
408static inline int
409value_fit_unsigned_field_p (int64_t value, unsigned width)
410{
411  assert (width < 32);
412  if (width < sizeof (value) * 8)
413    {
414      int64_t lim = (int64_t)1 << width;
415      if (value >= 0 && value < lim)
416	return 1;
417    }
418  return 0;
419}
420
421/* Return 1 if OPERAND is SP or WSP.  */
422int
423aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
424{
425  return ((aarch64_get_operand_class (operand->type)
426	   == AARCH64_OPND_CLASS_INT_REG)
427	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
428	  && operand->reg.regno == 31);
429}
430
431/* Return 1 if OPERAND is XZR or WZP.  */
432int
433aarch64_zero_register_p (const aarch64_opnd_info *operand)
434{
435  return ((aarch64_get_operand_class (operand->type)
436	   == AARCH64_OPND_CLASS_INT_REG)
437	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
438	  && operand->reg.regno == 31);
439}
440
441/* Return true if the operand *OPERAND that has the operand code
442   OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
443   qualified by the qualifier TARGET.  */
444
445static inline int
446operand_also_qualified_p (const struct aarch64_opnd_info *operand,
447			  aarch64_opnd_qualifier_t target)
448{
449  switch (operand->qualifier)
450    {
451    case AARCH64_OPND_QLF_W:
452      if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
453	return 1;
454      break;
455    case AARCH64_OPND_QLF_X:
456      if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
457	return 1;
458      break;
459    case AARCH64_OPND_QLF_WSP:
460      if (target == AARCH64_OPND_QLF_W
461	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
462	return 1;
463      break;
464    case AARCH64_OPND_QLF_SP:
465      if (target == AARCH64_OPND_QLF_X
466	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
467	return 1;
468      break;
469    default:
470      break;
471    }
472
473  return 0;
474}
475
476/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
477   for operand KNOWN_IDX, return the expected qualifier for operand IDX.
478
479   Return NIL if more than one expected qualifiers are found.  */
480
481aarch64_opnd_qualifier_t
482aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
483				int idx,
484				const aarch64_opnd_qualifier_t known_qlf,
485				int known_idx)
486{
487  int i, saved_i;
488
489  /* Special case.
490
491     When the known qualifier is NIL, we have to assume that there is only
492     one qualifier sequence in the *QSEQ_LIST and return the corresponding
493     qualifier directly.  One scenario is that for instruction
494	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
495     which has only one possible valid qualifier sequence
496	NIL, S_D
497     the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
498     determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
499
500     Because the qualifier NIL has dual roles in the qualifier sequence:
501     it can mean no qualifier for the operand, or the qualifer sequence is
502     not in use (when all qualifiers in the sequence are NILs), we have to
503     handle this special case here.  */
504  if (known_qlf == AARCH64_OPND_NIL)
505    {
506      assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
507      return qseq_list[0][idx];
508    }
509
510  for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
511    {
512      if (qseq_list[i][known_idx] == known_qlf)
513	{
514	  if (saved_i != -1)
515	    /* More than one sequences are found to have KNOWN_QLF at
516	       KNOWN_IDX.  */
517	    return AARCH64_OPND_NIL;
518	  saved_i = i;
519	}
520    }
521
522  return qseq_list[saved_i][idx];
523}
524
525enum operand_qualifier_kind
526{
527  OQK_NIL,
528  OQK_OPD_VARIANT,
529  OQK_VALUE_IN_RANGE,
530  OQK_MISC,
531};
532
533/* Operand qualifier description.  */
534struct operand_qualifier_data
535{
536  /* The usage of the three data fields depends on the qualifier kind.  */
537  int data0;
538  int data1;
539  int data2;
540  /* Description.  */
541  const char *desc;
542  /* Kind.  */
543  enum operand_qualifier_kind kind;
544};
545
546/* Indexed by the operand qualifier enumerators.  */
547struct operand_qualifier_data aarch64_opnd_qualifiers[] =
548{
549  {0, 0, 0, "NIL", OQK_NIL},
550
551  /* Operand variant qualifiers.
552     First 3 fields:
553     element size, number of elements and common value for encoding.  */
554
555  {4, 1, 0x0, "w", OQK_OPD_VARIANT},
556  {8, 1, 0x1, "x", OQK_OPD_VARIANT},
557  {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
558  {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
559
560  {1, 1, 0x0, "b", OQK_OPD_VARIANT},
561  {2, 1, 0x1, "h", OQK_OPD_VARIANT},
562  {4, 1, 0x2, "s", OQK_OPD_VARIANT},
563  {8, 1, 0x3, "d", OQK_OPD_VARIANT},
564  {16, 1, 0x4, "q", OQK_OPD_VARIANT},
565
566  {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
567  {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
568  {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
569  {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
570  {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
571  {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
572  {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
573  {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
574  {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
575
576  /* Qualifiers constraining the value range.
577     First 3 fields:
578     Lower bound, higher bound, unused.  */
579
580  {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
581  {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
582  {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
583  {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
584  {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
585  {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
586
587  /* Qualifiers for miscellaneous purpose.
588     First 3 fields:
589     unused, unused and unused.  */
590
591  {0, 0, 0, "lsl", 0},
592  {0, 0, 0, "msl", 0},
593
594  {0, 0, 0, "retrieving", 0},
595};
596
597static inline bfd_boolean
598operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
599{
600  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
601    ? TRUE : FALSE;
602}
603
604static inline bfd_boolean
605qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
606{
607  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
608    ? TRUE : FALSE;
609}
610
611const char*
612aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
613{
614  return aarch64_opnd_qualifiers[qualifier].desc;
615}
616
617/* Given an operand qualifier, return the expected data element size
618   of a qualified operand.  */
619unsigned char
620aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
621{
622  assert (operand_variant_qualifier_p (qualifier) == TRUE);
623  return aarch64_opnd_qualifiers[qualifier].data0;
624}
625
626unsigned char
627aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
628{
629  assert (operand_variant_qualifier_p (qualifier) == TRUE);
630  return aarch64_opnd_qualifiers[qualifier].data1;
631}
632
633aarch64_insn
634aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
635{
636  assert (operand_variant_qualifier_p (qualifier) == TRUE);
637  return aarch64_opnd_qualifiers[qualifier].data2;
638}
639
640static int
641get_lower_bound (aarch64_opnd_qualifier_t qualifier)
642{
643  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
644  return aarch64_opnd_qualifiers[qualifier].data0;
645}
646
647static int
648get_upper_bound (aarch64_opnd_qualifier_t qualifier)
649{
650  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
651  return aarch64_opnd_qualifiers[qualifier].data1;
652}
653
654#ifdef DEBUG_AARCH64
655void
656aarch64_verbose (const char *str, ...)
657{
658  va_list ap;
659  va_start (ap, str);
660  printf ("#### ");
661  vprintf (str, ap);
662  printf ("\n");
663  va_end (ap);
664}
665
666static inline void
667dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
668{
669  int i;
670  printf ("#### \t");
671  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
672    printf ("%s,", aarch64_get_qualifier_name (*qualifier));
673  printf ("\n");
674}
675
676static void
677dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
678		       const aarch64_opnd_qualifier_t *qualifier)
679{
680  int i;
681  aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
682
683  aarch64_verbose ("dump_match_qualifiers:");
684  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
685    curr[i] = opnd[i].qualifier;
686  dump_qualifier_sequence (curr);
687  aarch64_verbose ("against");
688  dump_qualifier_sequence (qualifier);
689}
690#endif /* DEBUG_AARCH64 */
691
692/* TODO improve this, we can have an extra field at the runtime to
693   store the number of operands rather than calculating it every time.  */
694
695int
696aarch64_num_of_operands (const aarch64_opcode *opcode)
697{
698  int i = 0;
699  const enum aarch64_opnd *opnds = opcode->operands;
700  while (opnds[i++] != AARCH64_OPND_NIL)
701    ;
702  --i;
703  assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
704  return i;
705}
706
707/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
708   If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
709
710   N.B. on the entry, it is very likely that only some operands in *INST
711   have had their qualifiers been established.
712
713   If STOP_AT is not -1, the function will only try to match
714   the qualifier sequence for operands before and including the operand
715   of index STOP_AT; and on success *RET will only be filled with the first
716   (STOP_AT+1) qualifiers.
717
718   A couple examples of the matching algorithm:
719
720   X,W,NIL should match
721   X,W,NIL
722
723   NIL,NIL should match
724   X  ,NIL
725
726   Apart from serving the main encoding routine, this can also be called
727   during or after the operand decoding.  */
728
729int
730aarch64_find_best_match (const aarch64_inst *inst,
731			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
732			 int stop_at, aarch64_opnd_qualifier_t *ret)
733{
734  int found = 0;
735  int i, num_opnds;
736  const aarch64_opnd_qualifier_t *qualifiers;
737
738  num_opnds = aarch64_num_of_operands (inst->opcode);
739  if (num_opnds == 0)
740    {
741      DEBUG_TRACE ("SUCCEED: no operand");
742      return 1;
743    }
744
745  if (stop_at < 0 || stop_at >= num_opnds)
746    stop_at = num_opnds - 1;
747
748  /* For each pattern.  */
749  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
750    {
751      int j;
752      qualifiers = *qualifiers_list;
753
754      /* Start as positive.  */
755      found = 1;
756
757      DEBUG_TRACE ("%d", i);
758#ifdef DEBUG_AARCH64
759      if (debug_dump)
760	dump_match_qualifiers (inst->operands, qualifiers);
761#endif
762
763      /* Most opcodes has much fewer patterns in the list.
764	 First NIL qualifier indicates the end in the list.   */
765      if (empty_qualifier_sequence_p (qualifiers) == TRUE)
766	{
767	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
768	  if (i)
769	    found = 0;
770	  break;
771	}
772
773      for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
774	{
775	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
776	    {
777	      /* Either the operand does not have qualifier, or the qualifier
778		 for the operand needs to be deduced from the qualifier
779		 sequence.
780		 In the latter case, any constraint checking related with
781		 the obtained qualifier should be done later in
782		 operand_general_constraint_met_p.  */
783	      continue;
784	    }
785	  else if (*qualifiers != inst->operands[j].qualifier)
786	    {
787	      /* Unless the target qualifier can also qualify the operand
788		 (which has already had a non-nil qualifier), non-equal
789		 qualifiers are generally un-matched.  */
790	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
791		continue;
792	      else
793		{
794		  found = 0;
795		  break;
796		}
797	    }
798	  else
799	    continue;	/* Equal qualifiers are certainly matched.  */
800	}
801
802      /* Qualifiers established.  */
803      if (found == 1)
804	break;
805    }
806
807  if (found == 1)
808    {
809      /* Fill the result in *RET.  */
810      int j;
811      qualifiers = *qualifiers_list;
812
813      DEBUG_TRACE ("complete qualifiers using list %d", i);
814#ifdef DEBUG_AARCH64
815      if (debug_dump)
816	dump_qualifier_sequence (qualifiers);
817#endif
818
819      for (j = 0; j <= stop_at; ++j, ++qualifiers)
820	ret[j] = *qualifiers;
821      for (; j < AARCH64_MAX_OPND_NUM; ++j)
822	ret[j] = AARCH64_OPND_QLF_NIL;
823
824      DEBUG_TRACE ("SUCCESS");
825      return 1;
826    }
827
828  DEBUG_TRACE ("FAIL");
829  return 0;
830}
831
832/* Operand qualifier matching and resolving.
833
834   Return 1 if the operand qualifier(s) in *INST match one of the qualifier
835   sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
836
837   if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
838   succeeds.  */
839
840static int
841match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
842{
843  int i;
844  aarch64_opnd_qualifier_seq_t qualifiers;
845
846  if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
847			       qualifiers))
848    {
849      DEBUG_TRACE ("matching FAIL");
850      return 0;
851    }
852
853  /* Update the qualifiers.  */
854  if (update_p == TRUE)
855    for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
856      {
857	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
858	  break;
859	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
860			"update %s with %s for operand %d",
861			aarch64_get_qualifier_name (inst->operands[i].qualifier),
862			aarch64_get_qualifier_name (qualifiers[i]), i);
863	inst->operands[i].qualifier = qualifiers[i];
864      }
865
866  DEBUG_TRACE ("matching SUCCESS");
867  return 1;
868}
869
870/* Return TRUE if VALUE is a wide constant that can be moved into a general
871   register by MOVZ.
872
873   IS32 indicates whether value is a 32-bit immediate or not.
874   If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
875   amount will be returned in *SHIFT_AMOUNT.  */
876
877bfd_boolean
878aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
879{
880  int amount;
881
882  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
883
884  if (is32)
885    {
886      /* Allow all zeros or all ones in top 32-bits, so that
887	 32-bit constant expressions like ~0x80000000 are
888	 permitted.  */
889      uint64_t ext = value;
890      if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
891	/* Immediate out of range.  */
892	return FALSE;
893      value &= (int64_t) 0xffffffff;
894    }
895
896  /* first, try movz then movn */
897  amount = -1;
898  if ((value & ((int64_t) 0xffff << 0)) == value)
899    amount = 0;
900  else if ((value & ((int64_t) 0xffff << 16)) == value)
901    amount = 16;
902  else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
903    amount = 32;
904  else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
905    amount = 48;
906
907  if (amount == -1)
908    {
909      DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
910      return FALSE;
911    }
912
913  if (shift_amount != NULL)
914    *shift_amount = amount;
915
916  DEBUG_TRACE ("exit TRUE with amount %d", amount);
917
918  return TRUE;
919}
920
921/* Build the accepted values for immediate logical SIMD instructions.
922
923   The standard encodings of the immediate value are:
924     N      imms     immr         SIMD size  R             S
925     1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
926     0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
927     0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
928     0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
929     0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
930     0      11110s   00000r       2       UInt(r)       UInt(s)
931   where all-ones value of S is reserved.
932
933   Let's call E the SIMD size.
934
935   The immediate value is: S+1 bits '1' rotated to the right by R.
936
937   The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
938   (remember S != E - 1).  */
939
940#define TOTAL_IMM_NB  5334
941
942typedef struct
943{
944  uint64_t imm;
945  aarch64_insn encoding;
946} simd_imm_encoding;
947
948static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
949
950static int
951simd_imm_encoding_cmp(const void *i1, const void *i2)
952{
953  const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
954  const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
955
956  if (imm1->imm < imm2->imm)
957    return -1;
958  if (imm1->imm > imm2->imm)
959    return +1;
960  return 0;
961}
962
963/* immediate bitfield standard encoding
964   imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
965   1         ssssss     rrrrrr      64        rrrrrr ssssss
966   0         0sssss     0rrrrr      32        rrrrr  sssss
967   0         10ssss     00rrrr      16        rrrr   ssss
968   0         110sss     000rrr      8         rrr    sss
969   0         1110ss     0000rr      4         rr     ss
970   0         11110s     00000r      2         r      s  */
971static inline int
972encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
973{
974  return (is64 << 12) | (r << 6) | s;
975}
976
977static void
978build_immediate_table (void)
979{
980  uint32_t log_e, e, s, r, s_mask;
981  uint64_t mask, imm;
982  int nb_imms;
983  int is64;
984
985  nb_imms = 0;
986  for (log_e = 1; log_e <= 6; log_e++)
987    {
988      /* Get element size.  */
989      e = 1u << log_e;
990      if (log_e == 6)
991	{
992	  is64 = 1;
993	  mask = 0xffffffffffffffffull;
994	  s_mask = 0;
995	}
996      else
997	{
998	  is64 = 0;
999	  mask = (1ull << e) - 1;
1000	  /* log_e  s_mask
1001	     1     ((1 << 4) - 1) << 2 = 111100
1002	     2     ((1 << 3) - 1) << 3 = 111000
1003	     3     ((1 << 2) - 1) << 4 = 110000
1004	     4     ((1 << 1) - 1) << 5 = 100000
1005	     5     ((1 << 0) - 1) << 6 = 000000  */
1006	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1007	}
1008      for (s = 0; s < e - 1; s++)
1009	for (r = 0; r < e; r++)
1010	  {
1011	    /* s+1 consecutive bits to 1 (s < 63) */
1012	    imm = (1ull << (s + 1)) - 1;
1013	    /* rotate right by r */
1014	    if (r != 0)
1015	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1016	    /* replicate the constant depending on SIMD size */
1017	    switch (log_e)
1018	      {
1019	      case 1: imm = (imm <<  2) | imm;
1020	      case 2: imm = (imm <<  4) | imm;
1021	      case 3: imm = (imm <<  8) | imm;
1022	      case 4: imm = (imm << 16) | imm;
1023	      case 5: imm = (imm << 32) | imm;
1024	      case 6: break;
1025	      default: abort ();
1026	      }
1027	    simd_immediates[nb_imms].imm = imm;
1028	    simd_immediates[nb_imms].encoding =
1029	      encode_immediate_bitfield(is64, s | s_mask, r);
1030	    nb_imms++;
1031	  }
1032    }
1033  assert (nb_imms == TOTAL_IMM_NB);
1034  qsort(simd_immediates, nb_imms,
1035	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1036}
1037
1038/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1039   be accepted by logical (immediate) instructions
1040   e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1041
1042   IS32 indicates whether or not VALUE is a 32-bit immediate.
1043   If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1044   VALUE will be returned in *ENCODING.  */
1045
1046bfd_boolean
1047aarch64_logical_immediate_p (uint64_t value, int is32, aarch64_insn *encoding)
1048{
1049  simd_imm_encoding imm_enc;
1050  const simd_imm_encoding *imm_encoding;
1051  static bfd_boolean initialized = FALSE;
1052
1053  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1054	       value, is32);
1055
1056  if (initialized == FALSE)
1057    {
1058      build_immediate_table ();
1059      initialized = TRUE;
1060    }
1061
1062  if (is32)
1063    {
1064      /* Allow all zeros or all ones in top 32-bits, so that
1065	 constant expressions like ~1 are permitted.  */
1066      if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1067	return FALSE;
1068
1069      /* Replicate the 32 lower bits to the 32 upper bits.  */
1070      value &= 0xffffffff;
1071      value |= value << 32;
1072    }
1073
1074  imm_enc.imm = value;
1075  imm_encoding = (const simd_imm_encoding *)
1076    bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1077            sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1078  if (imm_encoding == NULL)
1079    {
1080      DEBUG_TRACE ("exit with FALSE");
1081      return FALSE;
1082    }
1083  if (encoding != NULL)
1084    *encoding = imm_encoding->encoding;
1085  DEBUG_TRACE ("exit with TRUE");
1086  return TRUE;
1087}
1088
1089/* If 64-bit immediate IMM is in the format of
1090   "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1091   where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1092   of value "abcdefgh".  Otherwise return -1.  */
1093int
1094aarch64_shrink_expanded_imm8 (uint64_t imm)
1095{
1096  int i, ret;
1097  uint32_t byte;
1098
1099  ret = 0;
1100  for (i = 0; i < 8; i++)
1101    {
1102      byte = (imm >> (8 * i)) & 0xff;
1103      if (byte == 0xff)
1104	ret |= 1 << i;
1105      else if (byte != 0x00)
1106	return -1;
1107    }
1108  return ret;
1109}
1110
1111/* Utility inline functions for operand_general_constraint_met_p.  */
1112
1113static inline void
1114set_error (aarch64_operand_error *mismatch_detail,
1115	   enum aarch64_operand_error_kind kind, int idx,
1116	   const char* error)
1117{
1118  if (mismatch_detail == NULL)
1119    return;
1120  mismatch_detail->kind = kind;
1121  mismatch_detail->index = idx;
1122  mismatch_detail->error = error;
1123}
1124
1125static inline void
1126set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1127		  const char* error)
1128{
1129  if (mismatch_detail == NULL)
1130    return;
1131  set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1132}
1133
1134static inline void
1135set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1136			int idx, int lower_bound, int upper_bound,
1137			const char* error)
1138{
1139  if (mismatch_detail == NULL)
1140    return;
1141  set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1142  mismatch_detail->data[0] = lower_bound;
1143  mismatch_detail->data[1] = upper_bound;
1144}
1145
1146static inline void
1147set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1148			    int idx, int lower_bound, int upper_bound)
1149{
1150  if (mismatch_detail == NULL)
1151    return;
1152  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1153			  _("immediate value"));
1154}
1155
1156static inline void
1157set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1158			       int idx, int lower_bound, int upper_bound)
1159{
1160  if (mismatch_detail == NULL)
1161    return;
1162  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1163			  _("immediate offset"));
1164}
1165
1166static inline void
1167set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1168			      int idx, int lower_bound, int upper_bound)
1169{
1170  if (mismatch_detail == NULL)
1171    return;
1172  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1173			  _("register number"));
1174}
1175
1176static inline void
1177set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1178				 int idx, int lower_bound, int upper_bound)
1179{
1180  if (mismatch_detail == NULL)
1181    return;
1182  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1183			  _("register element index"));
1184}
1185
1186static inline void
1187set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1188				   int idx, int lower_bound, int upper_bound)
1189{
1190  if (mismatch_detail == NULL)
1191    return;
1192  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1193			  _("shift amount"));
1194}
1195
1196static inline void
1197set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1198		     int alignment)
1199{
1200  if (mismatch_detail == NULL)
1201    return;
1202  set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1203  mismatch_detail->data[0] = alignment;
1204}
1205
1206static inline void
1207set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1208		    int expected_num)
1209{
1210  if (mismatch_detail == NULL)
1211    return;
1212  set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1213  mismatch_detail->data[0] = expected_num;
1214}
1215
1216static inline void
1217set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1218		 const char* error)
1219{
1220  if (mismatch_detail == NULL)
1221    return;
1222  set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1223}
1224
1225/* General constraint checking based on operand code.
1226
1227   Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1228   as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1229
1230   This function has to be called after the qualifiers for all operands
1231   have been resolved.
1232
1233   Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1234   i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1235   of error message during the disassembling where error message is not
1236   wanted.  We avoid the dynamic construction of strings of error messages
1237   here (i.e. in libopcodes), as it is costly and complicated; instead, we
1238   use a combination of error code, static string and some integer data to
1239   represent an error.  */
1240
1241static int
1242operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1243				  enum aarch64_opnd type,
1244				  const aarch64_opcode *opcode,
1245				  aarch64_operand_error *mismatch_detail)
1246{
1247  unsigned num;
1248  unsigned char size;
1249  int64_t imm;
1250  const aarch64_opnd_info *opnd = opnds + idx;
1251  aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1252
1253  assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1254
1255  switch (aarch64_operands[type].op_class)
1256    {
1257    case AARCH64_OPND_CLASS_INT_REG:
1258      /* Check pair reg constraints for cas* instructions.  */
1259      if (type == AARCH64_OPND_PAIRREG)
1260	{
1261	  assert (idx == 1 || idx == 3);
1262	  if (opnds[idx - 1].reg.regno % 2 != 0)
1263	    {
1264	      set_syntax_error (mismatch_detail, idx - 1,
1265				_("reg pair must start from even reg"));
1266	      return 0;
1267	    }
1268	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1269	    {
1270	      set_syntax_error (mismatch_detail, idx,
1271				_("reg pair must be contiguous"));
1272	      return 0;
1273	    }
1274	  break;
1275	}
1276
1277      /* <Xt> may be optional in some IC and TLBI instructions.  */
1278      if (type == AARCH64_OPND_Rt_SYS)
1279	{
1280	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1281			       == AARCH64_OPND_CLASS_SYSTEM));
1282	  if (opnds[1].present && !opnds[0].sysins_op->has_xt)
1283	    {
1284	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1285	      return 0;
1286	    }
1287	  if (!opnds[1].present && opnds[0].sysins_op->has_xt)
1288	    {
1289	      set_other_error (mismatch_detail, idx, _("missing register"));
1290	      return 0;
1291	    }
1292	}
1293      switch (qualifier)
1294	{
1295	case AARCH64_OPND_QLF_WSP:
1296	case AARCH64_OPND_QLF_SP:
1297	  if (!aarch64_stack_pointer_p (opnd))
1298	    {
1299	      set_other_error (mismatch_detail, idx,
1300			       _("stack pointer register expected"));
1301	      return 0;
1302	    }
1303	  break;
1304	default:
1305	  break;
1306	}
1307      break;
1308
1309    case AARCH64_OPND_CLASS_COND:
1310      if (type == AARCH64_OPND_COND1
1311	  && (opnds[idx].cond->value & 0xe) == 0xe)
1312	{
1313	  /* Not allow AL or NV.  */
1314	  set_syntax_error (mismatch_detail, idx, NULL);
1315	}
1316      break;
1317
1318    case AARCH64_OPND_CLASS_ADDRESS:
1319      /* Check writeback.  */
1320      switch (opcode->iclass)
1321	{
1322	case ldst_pos:
1323	case ldst_unscaled:
1324	case ldstnapair_offs:
1325	case ldstpair_off:
1326	case ldst_unpriv:
1327	  if (opnd->addr.writeback == 1)
1328	    {
1329	      set_syntax_error (mismatch_detail, idx,
1330				_("unexpected address writeback"));
1331	      return 0;
1332	    }
1333	  break;
1334	case ldst_imm9:
1335	case ldstpair_indexed:
1336	case asisdlsep:
1337	case asisdlsop:
1338	  if (opnd->addr.writeback == 0)
1339	    {
1340	      set_syntax_error (mismatch_detail, idx,
1341				_("address writeback expected"));
1342	      return 0;
1343	    }
1344	  break;
1345	default:
1346	  assert (opnd->addr.writeback == 0);
1347	  break;
1348	}
1349      switch (type)
1350	{
1351	case AARCH64_OPND_ADDR_SIMM7:
1352	  /* Scaled signed 7 bits immediate offset.  */
1353	  /* Get the size of the data element that is accessed, which may be
1354	     different from that of the source register size,
1355	     e.g. in strb/ldrb.  */
1356	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1357	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1358	    {
1359	      set_offset_out_of_range_error (mismatch_detail, idx,
1360					     -64 * size, 63 * size);
1361	      return 0;
1362	    }
1363	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1364	    {
1365	      set_unaligned_error (mismatch_detail, idx, size);
1366	      return 0;
1367	    }
1368	  break;
1369	case AARCH64_OPND_ADDR_SIMM9:
1370	  /* Unscaled signed 9 bits immediate offset.  */
1371	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1372	    {
1373	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1374	      return 0;
1375	    }
1376	  break;
1377
1378	case AARCH64_OPND_ADDR_SIMM9_2:
1379	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1380	     or unaligned.  */
1381	  size = aarch64_get_qualifier_esize (qualifier);
1382	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1383	       && !value_aligned_p (opnd->addr.offset.imm, size))
1384	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1385	    return 1;
1386	  set_other_error (mismatch_detail, idx,
1387			   _("negative or unaligned offset expected"));
1388	  return 0;
1389
1390	case AARCH64_OPND_SIMD_ADDR_POST:
1391	  /* AdvSIMD load/store multiple structures, post-index.  */
1392	  assert (idx == 1);
1393	  if (opnd->addr.offset.is_reg)
1394	    {
1395	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1396		return 1;
1397	      else
1398		{
1399		  set_other_error (mismatch_detail, idx,
1400				   _("invalid register offset"));
1401		  return 0;
1402		}
1403	    }
1404	  else
1405	    {
1406	      const aarch64_opnd_info *prev = &opnds[idx-1];
1407	      unsigned num_bytes; /* total number of bytes transferred.  */
1408	      /* The opcode dependent area stores the number of elements in
1409		 each structure to be loaded/stored.  */
1410	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1411	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1412		/* Special handling of loading single structure to all lane.  */
1413		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1414		  * aarch64_get_qualifier_esize (prev->qualifier);
1415	      else
1416		num_bytes = prev->reglist.num_regs
1417		  * aarch64_get_qualifier_esize (prev->qualifier)
1418		  * aarch64_get_qualifier_nelem (prev->qualifier);
1419	      if ((int) num_bytes != opnd->addr.offset.imm)
1420		{
1421		  set_other_error (mismatch_detail, idx,
1422				   _("invalid post-increment amount"));
1423		  return 0;
1424		}
1425	    }
1426	  break;
1427
1428	case AARCH64_OPND_ADDR_REGOFF:
1429	  /* Get the size of the data element that is accessed, which may be
1430	     different from that of the source register size,
1431	     e.g. in strb/ldrb.  */
1432	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1433	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1434	  if (opnd->shifter.amount != 0
1435	      && opnd->shifter.amount != (int)get_logsz (size))
1436	    {
1437	      set_other_error (mismatch_detail, idx,
1438			       _("invalid shift amount"));
1439	      return 0;
1440	    }
1441	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1442	     operators.  */
1443	  switch (opnd->shifter.kind)
1444	    {
1445	    case AARCH64_MOD_UXTW:
1446	    case AARCH64_MOD_LSL:
1447	    case AARCH64_MOD_SXTW:
1448	    case AARCH64_MOD_SXTX: break;
1449	    default:
1450	      set_other_error (mismatch_detail, idx,
1451			       _("invalid extend/shift operator"));
1452	      return 0;
1453	    }
1454	  break;
1455
1456	case AARCH64_OPND_ADDR_UIMM12:
1457	  imm = opnd->addr.offset.imm;
1458	  /* Get the size of the data element that is accessed, which may be
1459	     different from that of the source register size,
1460	     e.g. in strb/ldrb.  */
1461	  size = aarch64_get_qualifier_esize (qualifier);
1462	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1463	    {
1464	      set_offset_out_of_range_error (mismatch_detail, idx,
1465					     0, 4095 * size);
1466	      return 0;
1467	    }
1468	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1469	    {
1470	      set_unaligned_error (mismatch_detail, idx, size);
1471	      return 0;
1472	    }
1473	  break;
1474
1475	case AARCH64_OPND_ADDR_PCREL14:
1476	case AARCH64_OPND_ADDR_PCREL19:
1477	case AARCH64_OPND_ADDR_PCREL21:
1478	case AARCH64_OPND_ADDR_PCREL26:
1479	  imm = opnd->imm.value;
1480	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1481	    {
1482	      /* The offset value in a PC-relative branch instruction is alway
1483		 4-byte aligned and is encoded without the lowest 2 bits.  */
1484	      if (!value_aligned_p (imm, 4))
1485		{
1486		  set_unaligned_error (mismatch_detail, idx, 4);
1487		  return 0;
1488		}
1489	      /* Right shift by 2 so that we can carry out the following check
1490		 canonically.  */
1491	      imm >>= 2;
1492	    }
1493	  size = get_operand_fields_width (get_operand_from_code (type));
1494	  if (!value_fit_signed_field_p (imm, size))
1495	    {
1496	      set_other_error (mismatch_detail, idx,
1497			       _("immediate out of range"));
1498	      return 0;
1499	    }
1500	  break;
1501
1502	default:
1503	  break;
1504	}
1505      break;
1506
1507    case AARCH64_OPND_CLASS_SIMD_REGLIST:
1508      /* The opcode dependent area stores the number of elements in
1509	 each structure to be loaded/stored.  */
1510      num = get_opcode_dependent_value (opcode);
1511      switch (type)
1512	{
1513	case AARCH64_OPND_LVt:
1514	  assert (num >= 1 && num <= 4);
1515	  /* Unless LD1/ST1, the number of registers should be equal to that
1516	     of the structure elements.  */
1517	  if (num != 1 && opnd->reglist.num_regs != num)
1518	    {
1519	      set_reg_list_error (mismatch_detail, idx, num);
1520	      return 0;
1521	    }
1522	  break;
1523	case AARCH64_OPND_LVt_AL:
1524	case AARCH64_OPND_LEt:
1525	  assert (num >= 1 && num <= 4);
1526	  /* The number of registers should be equal to that of the structure
1527	     elements.  */
1528	  if (opnd->reglist.num_regs != num)
1529	    {
1530	      set_reg_list_error (mismatch_detail, idx, num);
1531	      return 0;
1532	    }
1533	  break;
1534	default:
1535	  break;
1536	}
1537      break;
1538
1539    case AARCH64_OPND_CLASS_IMMEDIATE:
1540      /* Constraint check on immediate operand.  */
1541      imm = opnd->imm.value;
1542      /* E.g. imm_0_31 constrains value to be 0..31.  */
1543      if (qualifier_value_in_range_constraint_p (qualifier)
1544	  && !value_in_range_p (imm, get_lower_bound (qualifier),
1545				get_upper_bound (qualifier)))
1546	{
1547	  set_imm_out_of_range_error (mismatch_detail, idx,
1548				      get_lower_bound (qualifier),
1549				      get_upper_bound (qualifier));
1550	  return 0;
1551	}
1552
1553      switch (type)
1554	{
1555	case AARCH64_OPND_AIMM:
1556	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
1557	    {
1558	      set_other_error (mismatch_detail, idx,
1559			       _("invalid shift operator"));
1560	      return 0;
1561	    }
1562	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1563	    {
1564	      set_other_error (mismatch_detail, idx,
1565			       _("shift amount expected to be 0 or 12"));
1566	      return 0;
1567	    }
1568	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1569	    {
1570	      set_other_error (mismatch_detail, idx,
1571			       _("immediate out of range"));
1572	      return 0;
1573	    }
1574	  break;
1575
1576	case AARCH64_OPND_HALF:
1577	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1578	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
1579	    {
1580	      set_other_error (mismatch_detail, idx,
1581			       _("invalid shift operator"));
1582	      return 0;
1583	    }
1584	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1585	  if (!value_aligned_p (opnd->shifter.amount, 16))
1586	    {
1587	      set_other_error (mismatch_detail, idx,
1588			       _("shift amount should be a multiple of 16"));
1589	      return 0;
1590	    }
1591	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
1592	    {
1593	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
1594						 0, size * 8 - 16);
1595	      return 0;
1596	    }
1597	  if (opnd->imm.value < 0)
1598	    {
1599	      set_other_error (mismatch_detail, idx,
1600			       _("negative immediate value not allowed"));
1601	      return 0;
1602	    }
1603	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
1604	    {
1605	      set_other_error (mismatch_detail, idx,
1606			       _("immediate out of range"));
1607	      return 0;
1608	    }
1609	  break;
1610
1611	case AARCH64_OPND_IMM_MOV:
1612	    {
1613	      int is32 = aarch64_get_qualifier_esize (opnds[0].qualifier) == 4;
1614	      imm = opnd->imm.value;
1615	      assert (idx == 1);
1616	      switch (opcode->op)
1617		{
1618		case OP_MOV_IMM_WIDEN:
1619		  imm = ~imm;
1620		  /* Fall through...  */
1621		case OP_MOV_IMM_WIDE:
1622		  if (!aarch64_wide_constant_p (imm, is32, NULL))
1623		    {
1624		      set_other_error (mismatch_detail, idx,
1625				       _("immediate out of range"));
1626		      return 0;
1627		    }
1628		  break;
1629		case OP_MOV_IMM_LOG:
1630		  if (!aarch64_logical_immediate_p (imm, is32, NULL))
1631		    {
1632		      set_other_error (mismatch_detail, idx,
1633				       _("immediate out of range"));
1634		      return 0;
1635		    }
1636		  break;
1637		default:
1638		  assert (0);
1639		  return 0;
1640		}
1641	    }
1642	  break;
1643
1644	case AARCH64_OPND_NZCV:
1645	case AARCH64_OPND_CCMP_IMM:
1646	case AARCH64_OPND_EXCEPTION:
1647	case AARCH64_OPND_UIMM4:
1648	case AARCH64_OPND_UIMM7:
1649	case AARCH64_OPND_UIMM3_OP1:
1650	case AARCH64_OPND_UIMM3_OP2:
1651	  size = get_operand_fields_width (get_operand_from_code (type));
1652	  assert (size < 32);
1653	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
1654	    {
1655	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
1656					  (1 << size) - 1);
1657	      return 0;
1658	    }
1659	  break;
1660
1661	case AARCH64_OPND_WIDTH:
1662	  assert (idx == 3 && opnds[idx-1].type == AARCH64_OPND_IMM
1663		  && opnds[0].type == AARCH64_OPND_Rd);
1664	  size = get_upper_bound (qualifier);
1665	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
1666	    /* lsb+width <= reg.size  */
1667	    {
1668	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
1669					  size - opnds[idx-1].imm.value);
1670	      return 0;
1671	    }
1672	  break;
1673
1674	case AARCH64_OPND_LIMM:
1675	    {
1676	      int is32 = opnds[0].qualifier == AARCH64_OPND_QLF_W;
1677	      uint64_t uimm = opnd->imm.value;
1678	      if (opcode->op == OP_BIC)
1679		uimm = ~uimm;
1680	      if (aarch64_logical_immediate_p (uimm, is32, NULL) == FALSE)
1681		{
1682		  set_other_error (mismatch_detail, idx,
1683				   _("immediate out of range"));
1684		  return 0;
1685		}
1686	    }
1687	  break;
1688
1689	case AARCH64_OPND_IMM0:
1690	case AARCH64_OPND_FPIMM0:
1691	  if (opnd->imm.value != 0)
1692	    {
1693	      set_other_error (mismatch_detail, idx,
1694			       _("immediate zero expected"));
1695	      return 0;
1696	    }
1697	  break;
1698
1699	case AARCH64_OPND_SHLL_IMM:
1700	  assert (idx == 2);
1701	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
1702	  if (opnd->imm.value != size)
1703	    {
1704	      set_other_error (mismatch_detail, idx,
1705			       _("invalid shift amount"));
1706	      return 0;
1707	    }
1708	  break;
1709
1710	case AARCH64_OPND_IMM_VLSL:
1711	  size = aarch64_get_qualifier_esize (qualifier);
1712	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
1713	    {
1714	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
1715					  size * 8 - 1);
1716	      return 0;
1717	    }
1718	  break;
1719
1720	case AARCH64_OPND_IMM_VLSR:
1721	  size = aarch64_get_qualifier_esize (qualifier);
1722	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
1723	    {
1724	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
1725	      return 0;
1726	    }
1727	  break;
1728
1729	case AARCH64_OPND_SIMD_IMM:
1730	case AARCH64_OPND_SIMD_IMM_SFT:
1731	  /* Qualifier check.  */
1732	  switch (qualifier)
1733	    {
1734	    case AARCH64_OPND_QLF_LSL:
1735	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
1736		{
1737		  set_other_error (mismatch_detail, idx,
1738				   _("invalid shift operator"));
1739		  return 0;
1740		}
1741	      break;
1742	    case AARCH64_OPND_QLF_MSL:
1743	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
1744		{
1745		  set_other_error (mismatch_detail, idx,
1746				   _("invalid shift operator"));
1747		  return 0;
1748		}
1749	      break;
1750	    case AARCH64_OPND_QLF_NIL:
1751	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
1752		{
1753		  set_other_error (mismatch_detail, idx,
1754				   _("shift is not permitted"));
1755		  return 0;
1756		}
1757	      break;
1758	    default:
1759	      assert (0);
1760	      return 0;
1761	    }
1762	  /* Is the immediate valid?  */
1763	  assert (idx == 1);
1764	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
1765	    {
1766	      /* uimm8 or simm8 */
1767	      if (!value_in_range_p (opnd->imm.value, -128, 255))
1768		{
1769		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
1770		  return 0;
1771		}
1772	    }
1773	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
1774	    {
1775	      /* uimm64 is not
1776		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
1777		 ffffffffgggggggghhhhhhhh'.  */
1778	      set_other_error (mismatch_detail, idx,
1779			       _("invalid value for immediate"));
1780	      return 0;
1781	    }
1782	  /* Is the shift amount valid?  */
1783	  switch (opnd->shifter.kind)
1784	    {
1785	    case AARCH64_MOD_LSL:
1786	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
1787	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
1788		{
1789		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
1790						     (size - 1) * 8);
1791		  return 0;
1792		}
1793	      if (!value_aligned_p (opnd->shifter.amount, 8))
1794		{
1795		  set_unaligned_error (mismatch_detail, idx, 8);
1796		  return 0;
1797		}
1798	      break;
1799	    case AARCH64_MOD_MSL:
1800	      /* Only 8 and 16 are valid shift amount.  */
1801	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
1802		{
1803		  set_other_error (mismatch_detail, idx,
1804				   _("shift amount expected to be 0 or 16"));
1805		  return 0;
1806		}
1807	      break;
1808	    default:
1809	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
1810		{
1811		  set_other_error (mismatch_detail, idx,
1812				   _("invalid shift operator"));
1813		  return 0;
1814		}
1815	      break;
1816	    }
1817	  break;
1818
1819	case AARCH64_OPND_FPIMM:
1820	case AARCH64_OPND_SIMD_FPIMM:
1821	  if (opnd->imm.is_fp == 0)
1822	    {
1823	      set_other_error (mismatch_detail, idx,
1824			       _("floating-point immediate expected"));
1825	      return 0;
1826	    }
1827	  /* The value is expected to be an 8-bit floating-point constant with
1828	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
1829	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
1830	     instruction).  */
1831	  if (!value_in_range_p (opnd->imm.value, 0, 255))
1832	    {
1833	      set_other_error (mismatch_detail, idx,
1834			       _("immediate out of range"));
1835	      return 0;
1836	    }
1837	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
1838	    {
1839	      set_other_error (mismatch_detail, idx,
1840			       _("invalid shift operator"));
1841	      return 0;
1842	    }
1843	  break;
1844
1845	default:
1846	  break;
1847	}
1848      break;
1849
1850    case AARCH64_OPND_CLASS_CP_REG:
1851      /* Cn or Cm: 4-bit opcode field named for historical reasons.
1852	 valid range: C0 - C15.  */
1853      if (opnd->reg.regno > 15)
1854	{
1855	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1856	  return 0;
1857	}
1858      break;
1859
1860    case AARCH64_OPND_CLASS_SYSTEM:
1861      switch (type)
1862	{
1863	case AARCH64_OPND_PSTATEFIELD:
1864	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
1865	  /* MSR SPSel, #uimm4
1866	     Uses uimm4 as a control value to select the stack pointer: if
1867	     bit 0 is set it selects the current exception level's stack
1868	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
1869	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
1870	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
1871	    {
1872	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
1873	      return 0;
1874	    }
1875	  break;
1876	default:
1877	  break;
1878	}
1879      break;
1880
1881    case AARCH64_OPND_CLASS_SIMD_ELEMENT:
1882      /* Get the upper bound for the element index.  */
1883      num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1884      /* Index out-of-range.  */
1885      if (!value_in_range_p (opnd->reglane.index, 0, num))
1886	{
1887	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1888	  return 0;
1889	}
1890      /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
1891	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
1892	 number is encoded in "size:M:Rm":
1893	 size	<Vm>
1894	 00		RESERVED
1895	 01		0:Rm
1896	 10		M:Rm
1897	 11		RESERVED  */
1898      if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
1899	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
1900	{
1901	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
1902	  return 0;
1903	}
1904      break;
1905
1906    case AARCH64_OPND_CLASS_MODIFIED_REG:
1907      assert (idx == 1 || idx == 2);
1908      switch (type)
1909	{
1910	case AARCH64_OPND_Rm_EXT:
1911	  if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
1912	      && opnd->shifter.kind != AARCH64_MOD_LSL)
1913	    {
1914	      set_other_error (mismatch_detail, idx,
1915			       _("extend operator expected"));
1916	      return 0;
1917	    }
1918	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
1919	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
1920	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
1921	     case.  */
1922	  if (!aarch64_stack_pointer_p (opnds + 0)
1923	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
1924	    {
1925	      if (!opnd->shifter.operator_present)
1926		{
1927		  set_other_error (mismatch_detail, idx,
1928				   _("missing extend operator"));
1929		  return 0;
1930		}
1931	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
1932		{
1933		  set_other_error (mismatch_detail, idx,
1934				   _("'LSL' operator not allowed"));
1935		  return 0;
1936		}
1937	    }
1938	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
1939		  || opnd->shifter.kind == AARCH64_MOD_LSL);
1940	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
1941	    {
1942	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
1943	      return 0;
1944	    }
1945	  /* In the 64-bit form, the final register operand is written as Wm
1946	     for all but the (possibly omitted) UXTX/LSL and SXTX
1947	     operators.
1948	     N.B. GAS allows X register to be used with any operator as a
1949	     programming convenience.  */
1950	  if (qualifier == AARCH64_OPND_QLF_X
1951	      && opnd->shifter.kind != AARCH64_MOD_LSL
1952	      && opnd->shifter.kind != AARCH64_MOD_UXTX
1953	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
1954	    {
1955	      set_other_error (mismatch_detail, idx, _("W register expected"));
1956	      return 0;
1957	    }
1958	  break;
1959
1960	case AARCH64_OPND_Rm_SFT:
1961	  /* ROR is not available to the shifted register operand in
1962	     arithmetic instructions.  */
1963	  if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
1964	    {
1965	      set_other_error (mismatch_detail, idx,
1966			       _("shift operator expected"));
1967	      return 0;
1968	    }
1969	  if (opnd->shifter.kind == AARCH64_MOD_ROR
1970	      && opcode->iclass != log_shift)
1971	    {
1972	      set_other_error (mismatch_detail, idx,
1973			       _("'ROR' operator not allowed"));
1974	      return 0;
1975	    }
1976	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
1977	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
1978	    {
1979	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
1980	      return 0;
1981	    }
1982	  break;
1983
1984	default:
1985	  break;
1986	}
1987      break;
1988
1989    default:
1990      break;
1991    }
1992
1993  return 1;
1994}
1995
1996/* Main entrypoint for the operand constraint checking.
1997
1998   Return 1 if operands of *INST meet the constraint applied by the operand
1999   codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2000   not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2001   adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2002   with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2003   error kind when it is notified that an instruction does not pass the check).
2004
2005   Un-determined operand qualifiers may get established during the process.  */
2006
2007int
2008aarch64_match_operands_constraint (aarch64_inst *inst,
2009				   aarch64_operand_error *mismatch_detail)
2010{
2011  int i;
2012
2013  DEBUG_TRACE ("enter");
2014
2015  /* Match operands' qualifier.
2016     *INST has already had qualifier establish for some, if not all, of
2017     its operands; we need to find out whether these established
2018     qualifiers match one of the qualifier sequence in
2019     INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2020     with the corresponding qualifier in such a sequence.
2021     Only basic operand constraint checking is done here; the more thorough
2022     constraint checking will carried out by operand_general_constraint_met_p,
2023     which has be to called after this in order to get all of the operands'
2024     qualifiers established.  */
2025  if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2026    {
2027      DEBUG_TRACE ("FAIL on operand qualifier matching");
2028      if (mismatch_detail)
2029	{
2030	  /* Return an error type to indicate that it is the qualifier
2031	     matching failure; we don't care about which operand as there
2032	     are enough information in the opcode table to reproduce it.  */
2033	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2034	  mismatch_detail->index = -1;
2035	  mismatch_detail->error = NULL;
2036	}
2037      return 0;
2038    }
2039
2040  /* Match operands' constraint.  */
2041  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2042    {
2043      enum aarch64_opnd type = inst->opcode->operands[i];
2044      if (type == AARCH64_OPND_NIL)
2045	break;
2046      if (inst->operands[i].skip)
2047	{
2048	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2049	  continue;
2050	}
2051      if (operand_general_constraint_met_p (inst->operands, i, type,
2052					    inst->opcode, mismatch_detail) == 0)
2053	{
2054	  DEBUG_TRACE ("FAIL on operand %d", i);
2055	  return 0;
2056	}
2057    }
2058
2059  DEBUG_TRACE ("PASS");
2060
2061  return 1;
2062}
2063
2064/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2065   Also updates the TYPE of each INST->OPERANDS with the corresponding
2066   value of OPCODE->OPERANDS.
2067
2068   Note that some operand qualifiers may need to be manually cleared by
2069   the caller before it further calls the aarch64_opcode_encode; by
2070   doing this, it helps the qualifier matching facilities work
2071   properly.  */
2072
2073const aarch64_opcode*
2074aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2075{
2076  int i;
2077  const aarch64_opcode *old = inst->opcode;
2078
2079  inst->opcode = opcode;
2080
2081  /* Update the operand types.  */
2082  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2083    {
2084      inst->operands[i].type = opcode->operands[i];
2085      if (opcode->operands[i] == AARCH64_OPND_NIL)
2086	break;
2087    }
2088
2089  DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2090
2091  return old;
2092}
2093
2094int
2095aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2096{
2097  int i;
2098  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2099    if (operands[i] == operand)
2100      return i;
2101    else if (operands[i] == AARCH64_OPND_NIL)
2102      break;
2103  return -1;
2104}
2105
2106/* [0][0]  32-bit integer regs with sp   Wn
2107   [0][1]  64-bit integer regs with sp   Xn  sf=1
2108   [1][0]  32-bit integer regs with #0   Wn
2109   [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2110static const char *int_reg[2][2][32] = {
2111#define R32 "w"
2112#define R64 "x"
2113  { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
2114      R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2115      R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2116      R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30",    "wsp" },
2117    { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
2118      R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2119      R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2120      R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30",     "sp" } },
2121  { { R32  "0", R32  "1", R32  "2", R32  "3", R32  "4", R32  "5", R32  "6", R32  "7",
2122      R32  "8", R32  "9", R32 "10", R32 "11", R32 "12", R32 "13", R32 "14", R32 "15",
2123      R32 "16", R32 "17", R32 "18", R32 "19", R32 "20", R32 "21", R32 "22", R32 "23",
2124      R32 "24", R32 "25", R32 "26", R32 "27", R32 "28", R32 "29", R32 "30", R32 "zr" },
2125    { R64  "0", R64  "1", R64  "2", R64  "3", R64  "4", R64  "5", R64  "6", R64  "7",
2126      R64  "8", R64  "9", R64 "10", R64 "11", R64 "12", R64 "13", R64 "14", R64 "15",
2127      R64 "16", R64 "17", R64 "18", R64 "19", R64 "20", R64 "21", R64 "22", R64 "23",
2128      R64 "24", R64 "25", R64 "26", R64 "27", R64 "28", R64 "29", R64 "30", R64 "zr" } }
2129#undef R64
2130#undef R32
2131};
2132
2133/* Return the integer register name.
2134   if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2135
2136static inline const char *
2137get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2138{
2139  const int has_zr = sp_reg_p ? 0 : 1;
2140  const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2141  return int_reg[has_zr][is_64][regno];
2142}
2143
2144/* Like get_int_reg_name, but IS_64 is always 1.  */
2145
2146static inline const char *
2147get_64bit_int_reg_name (int regno, int sp_reg_p)
2148{
2149  const int has_zr = sp_reg_p ? 0 : 1;
2150  return int_reg[has_zr][1][regno];
2151}
2152
2153/* Types for expanding an encoded 8-bit value to a floating-point value.  */
2154
2155typedef union
2156{
2157  uint64_t i;
2158  double   d;
2159} double_conv_t;
2160
2161typedef union
2162{
2163  uint32_t i;
2164  float    f;
2165} single_conv_t;
2166
2167/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2168   normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2169   (depending on the type of the instruction).  IMM8 will be expanded to a
2170   single-precision floating-point value (IS_DP == 0) or a double-precision
2171   floating-point value (IS_DP == 1).  The expanded value is returned.  */
2172
2173static uint64_t
2174expand_fp_imm (int is_dp, uint32_t imm8)
2175{
2176  uint64_t imm;
2177  uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2178
2179  imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
2180  imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
2181  imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
2182  imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2183    | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
2184  if (is_dp)
2185    {
2186      imm = (imm8_7 << (63-32))		/* imm8<7>  */
2187	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
2188	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2189	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2190	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
2191      imm <<= 32;
2192    }
2193  else
2194    {
2195      imm = (imm8_7 << 31)	/* imm8<7>              */
2196	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
2197	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
2198	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
2199    }
2200
2201  return imm;
2202}
2203
2204/* Produce the string representation of the register list operand *OPND
2205   in the buffer pointed by BUF of size SIZE.  */
2206static void
2207print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd)
2208{
2209  const int num_regs = opnd->reglist.num_regs;
2210  const int first_reg = opnd->reglist.first_regno;
2211  const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2212  const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2213  char tb[8];	/* Temporary buffer.  */
2214
2215  assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2216  assert (num_regs >= 1 && num_regs <= 4);
2217
2218  /* Prepare the index if any.  */
2219  if (opnd->reglist.has_index)
2220    snprintf (tb, 8, "[%d]", opnd->reglist.index);
2221  else
2222    tb[0] = '\0';
2223
2224  /* The hyphenated form is preferred for disassembly if there are
2225     more than two registers in the list, and the register numbers
2226     are monotonically increasing in increments of one.  */
2227  if (num_regs > 2 && last_reg > first_reg)
2228    snprintf (buf, size, "{v%d.%s-v%d.%s}%s", first_reg, qlf_name,
2229	      last_reg, qlf_name, tb);
2230  else
2231    {
2232      const int reg0 = first_reg;
2233      const int reg1 = (first_reg + 1) & 0x1f;
2234      const int reg2 = (first_reg + 2) & 0x1f;
2235      const int reg3 = (first_reg + 3) & 0x1f;
2236
2237      switch (num_regs)
2238	{
2239	case 1:
2240	  snprintf (buf, size, "{v%d.%s}%s", reg0, qlf_name, tb);
2241	  break;
2242	case 2:
2243	  snprintf (buf, size, "{v%d.%s, v%d.%s}%s", reg0, qlf_name,
2244		    reg1, qlf_name, tb);
2245	  break;
2246	case 3:
2247	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s}%s", reg0, qlf_name,
2248		    reg1, qlf_name, reg2, qlf_name, tb);
2249	  break;
2250	case 4:
2251	  snprintf (buf, size, "{v%d.%s, v%d.%s, v%d.%s, v%d.%s}%s",
2252		    reg0, qlf_name, reg1, qlf_name, reg2, qlf_name,
2253		    reg3, qlf_name, tb);
2254	  break;
2255	}
2256    }
2257}
2258
2259/* Produce the string representation of the register offset address operand
2260   *OPND in the buffer pointed by BUF of size SIZE.  */
2261static void
2262print_register_offset_address (char *buf, size_t size,
2263			       const aarch64_opnd_info *opnd)
2264{
2265  const size_t tblen = 16;
2266  char tb[tblen];		/* Temporary buffer.  */
2267  bfd_boolean lsl_p = FALSE;	/* Is LSL shift operator?  */
2268  bfd_boolean wm_p = FALSE;	/* Should Rm be Wm?  */
2269  bfd_boolean print_extend_p = TRUE;
2270  bfd_boolean print_amount_p = TRUE;
2271  const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2272
2273  switch (opnd->shifter.kind)
2274    {
2275    case AARCH64_MOD_UXTW: wm_p = TRUE; break;
2276    case AARCH64_MOD_LSL : lsl_p = TRUE; break;
2277    case AARCH64_MOD_SXTW: wm_p = TRUE; break;
2278    case AARCH64_MOD_SXTX: break;
2279    default: assert (0);
2280    }
2281
2282  if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2283				|| !opnd->shifter.amount_present))
2284    {
2285      /* Not print the shift/extend amount when the amount is zero and
2286         when it is not the special case of 8-bit load/store instruction.  */
2287      print_amount_p = FALSE;
2288      /* Likewise, no need to print the shift operator LSL in such a
2289	 situation.  */
2290      if (lsl_p)
2291	print_extend_p = FALSE;
2292    }
2293
2294  /* Prepare for the extend/shift.  */
2295  if (print_extend_p)
2296    {
2297      if (print_amount_p)
2298	snprintf (tb, tblen, ",%s #%d", shift_name, opnd->shifter.amount);
2299      else
2300	snprintf (tb, tblen, ",%s", shift_name);
2301    }
2302  else
2303    tb[0] = '\0';
2304
2305  snprintf (buf, size, "[%s,%s%s]",
2306	    get_64bit_int_reg_name (opnd->addr.base_regno, 1),
2307	    get_int_reg_name (opnd->addr.offset.regno,
2308			      wm_p ? AARCH64_OPND_QLF_W : AARCH64_OPND_QLF_X,
2309			      0 /* sp_reg_p */),
2310	    tb);
2311}
2312
2313/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
2314   in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
2315   PC, PCREL_P and ADDRESS are used to pass in and return information about
2316   the PC-relative address calculation, where the PC value is passed in
2317   PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
2318   will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
2319   calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
2320
2321   The function serves both the disassembler and the assembler diagnostics
2322   issuer, which is the reason why it lives in this file.  */
2323
2324void
2325aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
2326		       const aarch64_opcode *opcode,
2327		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
2328		       bfd_vma *address)
2329{
2330  int i;
2331  const char *name = NULL;
2332  const aarch64_opnd_info *opnd = opnds + idx;
2333  enum aarch64_modifier_kind kind;
2334  uint64_t addr;
2335
2336  buf[0] = '\0';
2337  if (pcrel_p)
2338    *pcrel_p = 0;
2339
2340  switch (opnd->type)
2341    {
2342    case AARCH64_OPND_Rd:
2343    case AARCH64_OPND_Rn:
2344    case AARCH64_OPND_Rm:
2345    case AARCH64_OPND_Rt:
2346    case AARCH64_OPND_Rt2:
2347    case AARCH64_OPND_Rs:
2348    case AARCH64_OPND_Ra:
2349    case AARCH64_OPND_Rt_SYS:
2350    case AARCH64_OPND_PAIRREG:
2351      /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
2352	 the <ic_op>, therefore we we use opnd->present to override the
2353	 generic optional-ness information.  */
2354      if (opnd->type == AARCH64_OPND_Rt_SYS && !opnd->present)
2355	break;
2356      /* Omit the operand, e.g. RET.  */
2357      if (optional_operand_p (opcode, idx)
2358	  && opnd->reg.regno == get_optional_operand_default_value (opcode))
2359	break;
2360      assert (opnd->qualifier == AARCH64_OPND_QLF_W
2361	      || opnd->qualifier == AARCH64_OPND_QLF_X);
2362      snprintf (buf, size, "%s",
2363		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2364      break;
2365
2366    case AARCH64_OPND_Rd_SP:
2367    case AARCH64_OPND_Rn_SP:
2368      assert (opnd->qualifier == AARCH64_OPND_QLF_W
2369	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
2370	      || opnd->qualifier == AARCH64_OPND_QLF_X
2371	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
2372      snprintf (buf, size, "%s",
2373		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
2374      break;
2375
2376    case AARCH64_OPND_Rm_EXT:
2377      kind = opnd->shifter.kind;
2378      assert (idx == 1 || idx == 2);
2379      if ((aarch64_stack_pointer_p (opnds)
2380	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
2381	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
2382	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
2383	       && kind == AARCH64_MOD_UXTW)
2384	      || (opnd->qualifier == AARCH64_OPND_QLF_X
2385		  && kind == AARCH64_MOD_UXTX)))
2386	{
2387	  /* 'LSL' is the preferred form in this case.  */
2388	  kind = AARCH64_MOD_LSL;
2389	  if (opnd->shifter.amount == 0)
2390	    {
2391	      /* Shifter omitted.  */
2392	      snprintf (buf, size, "%s",
2393			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2394	      break;
2395	    }
2396	}
2397      if (opnd->shifter.amount)
2398	snprintf (buf, size, "%s, %s #%d",
2399		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2400		  aarch64_operand_modifiers[kind].name,
2401		  opnd->shifter.amount);
2402      else
2403	snprintf (buf, size, "%s, %s",
2404		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2405		  aarch64_operand_modifiers[kind].name);
2406      break;
2407
2408    case AARCH64_OPND_Rm_SFT:
2409      assert (opnd->qualifier == AARCH64_OPND_QLF_W
2410	      || opnd->qualifier == AARCH64_OPND_QLF_X);
2411      if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
2412	snprintf (buf, size, "%s",
2413		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
2414      else
2415	snprintf (buf, size, "%s, %s #%d",
2416		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
2417		  aarch64_operand_modifiers[opnd->shifter.kind].name,
2418		  opnd->shifter.amount);
2419      break;
2420
2421    case AARCH64_OPND_Fd:
2422    case AARCH64_OPND_Fn:
2423    case AARCH64_OPND_Fm:
2424    case AARCH64_OPND_Fa:
2425    case AARCH64_OPND_Ft:
2426    case AARCH64_OPND_Ft2:
2427    case AARCH64_OPND_Sd:
2428    case AARCH64_OPND_Sn:
2429    case AARCH64_OPND_Sm:
2430      snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
2431		opnd->reg.regno);
2432      break;
2433
2434    case AARCH64_OPND_Vd:
2435    case AARCH64_OPND_Vn:
2436    case AARCH64_OPND_Vm:
2437      snprintf (buf, size, "v%d.%s", opnd->reg.regno,
2438		aarch64_get_qualifier_name (opnd->qualifier));
2439      break;
2440
2441    case AARCH64_OPND_Ed:
2442    case AARCH64_OPND_En:
2443    case AARCH64_OPND_Em:
2444      snprintf (buf, size, "v%d.%s[%d]", opnd->reglane.regno,
2445		aarch64_get_qualifier_name (opnd->qualifier),
2446		opnd->reglane.index);
2447      break;
2448
2449    case AARCH64_OPND_VdD1:
2450    case AARCH64_OPND_VnD1:
2451      snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
2452      break;
2453
2454    case AARCH64_OPND_LVn:
2455    case AARCH64_OPND_LVt:
2456    case AARCH64_OPND_LVt_AL:
2457    case AARCH64_OPND_LEt:
2458      print_register_list (buf, size, opnd);
2459      break;
2460
2461    case AARCH64_OPND_Cn:
2462    case AARCH64_OPND_Cm:
2463      snprintf (buf, size, "C%d", opnd->reg.regno);
2464      break;
2465
2466    case AARCH64_OPND_IDX:
2467    case AARCH64_OPND_IMM:
2468    case AARCH64_OPND_WIDTH:
2469    case AARCH64_OPND_UIMM3_OP1:
2470    case AARCH64_OPND_UIMM3_OP2:
2471    case AARCH64_OPND_BIT_NUM:
2472    case AARCH64_OPND_IMM_VLSL:
2473    case AARCH64_OPND_IMM_VLSR:
2474    case AARCH64_OPND_SHLL_IMM:
2475    case AARCH64_OPND_IMM0:
2476    case AARCH64_OPND_IMMR:
2477    case AARCH64_OPND_IMMS:
2478    case AARCH64_OPND_FBITS:
2479      snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
2480      break;
2481
2482    case AARCH64_OPND_IMM_MOV:
2483      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2484	{
2485	case 4:	/* e.g. MOV Wd, #<imm32>.  */
2486	    {
2487	      int imm32 = opnd->imm.value;
2488	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
2489	    }
2490	  break;
2491	case 8:	/* e.g. MOV Xd, #<imm64>.  */
2492	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
2493		    opnd->imm.value, opnd->imm.value);
2494	  break;
2495	default: assert (0);
2496	}
2497      break;
2498
2499    case AARCH64_OPND_FPIMM0:
2500      snprintf (buf, size, "#0.0");
2501      break;
2502
2503    case AARCH64_OPND_LIMM:
2504    case AARCH64_OPND_AIMM:
2505    case AARCH64_OPND_HALF:
2506      if (opnd->shifter.amount)
2507	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%d", opnd->imm.value,
2508		  opnd->shifter.amount);
2509      else
2510	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2511      break;
2512
2513    case AARCH64_OPND_SIMD_IMM:
2514    case AARCH64_OPND_SIMD_IMM_SFT:
2515      if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
2516	  || opnd->shifter.kind == AARCH64_MOD_NONE)
2517	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
2518      else
2519	snprintf (buf, size, "#0x%" PRIx64 ", %s #%d", opnd->imm.value,
2520		  aarch64_operand_modifiers[opnd->shifter.kind].name,
2521		  opnd->shifter.amount);
2522      break;
2523
2524    case AARCH64_OPND_FPIMM:
2525    case AARCH64_OPND_SIMD_FPIMM:
2526      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
2527	{
2528	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
2529	    {
2530	      single_conv_t c;
2531	      c.i = expand_fp_imm (0, opnd->imm.value);
2532	      snprintf (buf, size,  "#%.18e", c.f);
2533	    }
2534	  break;
2535	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
2536	    {
2537	      double_conv_t c;
2538	      c.i = expand_fp_imm (1, opnd->imm.value);
2539	      snprintf (buf, size,  "#%.18e", c.d);
2540	    }
2541	  break;
2542	default: assert (0);
2543	}
2544      break;
2545
2546    case AARCH64_OPND_CCMP_IMM:
2547    case AARCH64_OPND_NZCV:
2548    case AARCH64_OPND_EXCEPTION:
2549    case AARCH64_OPND_UIMM4:
2550    case AARCH64_OPND_UIMM7:
2551      if (optional_operand_p (opcode, idx) == TRUE
2552	  && (opnd->imm.value ==
2553	      (int64_t) get_optional_operand_default_value (opcode)))
2554	/* Omit the operand, e.g. DCPS1.  */
2555	break;
2556      snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
2557      break;
2558
2559    case AARCH64_OPND_COND:
2560    case AARCH64_OPND_COND1:
2561      snprintf (buf, size, "%s", opnd->cond->names[0]);
2562      break;
2563
2564    case AARCH64_OPND_ADDR_ADRP:
2565      addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
2566	+ opnd->imm.value;
2567      if (pcrel_p)
2568	*pcrel_p = 1;
2569      if (address)
2570	*address = addr;
2571      /* This is not necessary during the disassembling, as print_address_func
2572	 in the disassemble_info will take care of the printing.  But some
2573	 other callers may be still interested in getting the string in *STR,
2574	 so here we do snprintf regardless.  */
2575      snprintf (buf, size, "#0x%" PRIx64, addr);
2576      break;
2577
2578    case AARCH64_OPND_ADDR_PCREL14:
2579    case AARCH64_OPND_ADDR_PCREL19:
2580    case AARCH64_OPND_ADDR_PCREL21:
2581    case AARCH64_OPND_ADDR_PCREL26:
2582      addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
2583      if (pcrel_p)
2584	*pcrel_p = 1;
2585      if (address)
2586	*address = addr;
2587      /* This is not necessary during the disassembling, as print_address_func
2588	 in the disassemble_info will take care of the printing.  But some
2589	 other callers may be still interested in getting the string in *STR,
2590	 so here we do snprintf regardless.  */
2591      snprintf (buf, size, "#0x%" PRIx64, addr);
2592      break;
2593
2594    case AARCH64_OPND_ADDR_SIMPLE:
2595    case AARCH64_OPND_SIMD_ADDR_SIMPLE:
2596    case AARCH64_OPND_SIMD_ADDR_POST:
2597      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2598      if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
2599	{
2600	  if (opnd->addr.offset.is_reg)
2601	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
2602	  else
2603	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
2604	}
2605      else
2606	snprintf (buf, size, "[%s]", name);
2607      break;
2608
2609    case AARCH64_OPND_ADDR_REGOFF:
2610      print_register_offset_address (buf, size, opnd);
2611      break;
2612
2613    case AARCH64_OPND_ADDR_SIMM7:
2614    case AARCH64_OPND_ADDR_SIMM9:
2615    case AARCH64_OPND_ADDR_SIMM9_2:
2616      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2617      if (opnd->addr.writeback)
2618	{
2619	  if (opnd->addr.preind)
2620	    snprintf (buf, size, "[%s,#%d]!", name, opnd->addr.offset.imm);
2621	  else
2622	    snprintf (buf, size, "[%s],#%d", name, opnd->addr.offset.imm);
2623	}
2624      else
2625	{
2626	  if (opnd->addr.offset.imm)
2627	    snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2628	  else
2629	    snprintf (buf, size, "[%s]", name);
2630	}
2631      break;
2632
2633    case AARCH64_OPND_ADDR_UIMM12:
2634      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
2635      if (opnd->addr.offset.imm)
2636	snprintf (buf, size, "[%s,#%d]", name, opnd->addr.offset.imm);
2637      else
2638	snprintf (buf, size, "[%s]", name);
2639      break;
2640
2641    case AARCH64_OPND_SYSREG:
2642      for (i = 0; aarch64_sys_regs[i].name; ++i)
2643	if (aarch64_sys_regs[i].value == opnd->sysreg
2644	    && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
2645	  break;
2646      if (aarch64_sys_regs[i].name)
2647	snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
2648      else
2649	{
2650	  /* Implementation defined system register.  */
2651	  unsigned int value = opnd->sysreg;
2652	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
2653		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
2654		    value & 0x7);
2655	}
2656      break;
2657
2658    case AARCH64_OPND_PSTATEFIELD:
2659      for (i = 0; aarch64_pstatefields[i].name; ++i)
2660	if (aarch64_pstatefields[i].value == opnd->pstatefield)
2661	  break;
2662      assert (aarch64_pstatefields[i].name);
2663      snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
2664      break;
2665
2666    case AARCH64_OPND_SYSREG_AT:
2667    case AARCH64_OPND_SYSREG_DC:
2668    case AARCH64_OPND_SYSREG_IC:
2669    case AARCH64_OPND_SYSREG_TLBI:
2670      snprintf (buf, size, "%s", opnd->sysins_op->template);
2671      break;
2672
2673    case AARCH64_OPND_BARRIER:
2674      snprintf (buf, size, "%s", opnd->barrier->name);
2675      break;
2676
2677    case AARCH64_OPND_BARRIER_ISB:
2678      /* Operand can be omitted, e.g. in DCPS1.  */
2679      if (! optional_operand_p (opcode, idx)
2680	  || (opnd->barrier->value
2681	      != get_optional_operand_default_value (opcode)))
2682	snprintf (buf, size, "#0x%x", opnd->barrier->value);
2683      break;
2684
2685    case AARCH64_OPND_PRFOP:
2686      if (opnd->prfop->name != NULL)
2687	snprintf (buf, size, "%s", opnd->prfop->name);
2688      else
2689	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
2690      break;
2691
2692    default:
2693      assert (0);
2694    }
2695}
2696
2697#define CPENC(op0,op1,crn,crm,op2) \
2698  ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
2699  /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
2700#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
2701  /* for 3.9.10 System Instructions */
2702#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
2703
2704#define C0  0
2705#define C1  1
2706#define C2  2
2707#define C3  3
2708#define C4  4
2709#define C5  5
2710#define C6  6
2711#define C7  7
2712#define C8  8
2713#define C9  9
2714#define C10 10
2715#define C11 11
2716#define C12 12
2717#define C13 13
2718#define C14 14
2719#define C15 15
2720
2721#ifdef F_DEPRECATED
2722#undef F_DEPRECATED
2723#endif
2724#define F_DEPRECATED	0x1	/* Deprecated system register.  */
2725
2726/* TODO there are two more issues need to be resolved
2727   1. handle read-only and write-only system registers
2728   2. handle cpu-implementation-defined system registers.  */
2729const aarch64_sys_reg aarch64_sys_regs [] =
2730{
2731  { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
2732  { "elr_el1",          CPEN_(0,C0,1),	0 },
2733  { "sp_el0",           CPEN_(0,C1,0),	0 },
2734  { "spsel",            CPEN_(0,C2,0),	0 },
2735  { "daif",             CPEN_(3,C2,1),	0 },
2736  { "currentel",        CPEN_(0,C2,2),	0 }, /* RO */
2737  { "nzcv",             CPEN_(3,C2,0),	0 },
2738  { "fpcr",             CPEN_(3,C4,0),	0 },
2739  { "fpsr",             CPEN_(3,C4,1),	0 },
2740  { "dspsr_el0",        CPEN_(3,C5,0),	0 },
2741  { "dlr_el0",          CPEN_(3,C5,1),	0 },
2742  { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
2743  { "elr_el2",          CPEN_(4,C0,1),	0 },
2744  { "sp_el1",           CPEN_(4,C1,0),	0 },
2745  { "spsr_irq",         CPEN_(4,C3,0),	0 },
2746  { "spsr_abt",         CPEN_(4,C3,1),	0 },
2747  { "spsr_und",         CPEN_(4,C3,2),	0 },
2748  { "spsr_fiq",         CPEN_(4,C3,3),	0 },
2749  { "spsr_el3",         CPEN_(6,C0,0),	0 },
2750  { "elr_el3",          CPEN_(6,C0,1),	0 },
2751  { "sp_el2",           CPEN_(6,C1,0),	0 },
2752  { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
2753  { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
2754  { "midr_el1",         CPENC(3,0,C0,C0,0),	0 }, /* RO */
2755  { "ctr_el0",          CPENC(3,3,C0,C0,1),	0 }, /* RO */
2756  { "mpidr_el1",        CPENC(3,0,C0,C0,5),	0 }, /* RO */
2757  { "revidr_el1",       CPENC(3,0,C0,C0,6),	0 }, /* RO */
2758  { "aidr_el1",         CPENC(3,1,C0,C0,7),	0 }, /* RO */
2759  { "dczid_el0",        CPENC(3,3,C0,C0,7),	0 }, /* RO */
2760  { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	0 }, /* RO */
2761  { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	0 }, /* RO */
2762  { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	0 }, /* RO */
2763  { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	0 }, /* RO */
2764  { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	0 }, /* RO */
2765  { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	0 }, /* RO */
2766  { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	0 }, /* RO */
2767  { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	0 }, /* RO */
2768  { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	0 }, /* RO */
2769  { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	0 }, /* RO */
2770  { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	0 }, /* RO */
2771  { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	0 }, /* RO */
2772  { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	0 }, /* RO */
2773  { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	0 }, /* RO */
2774  { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	0 }, /* RO */
2775  { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	0 }, /* RO */
2776  { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	0 }, /* RO */
2777  { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	0 }, /* RO */
2778  { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	0 }, /* RO */
2779  { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	0 }, /* RO */
2780  { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	0 }, /* RO */
2781  { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	0 }, /* RO */
2782  { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	0 }, /* RO */
2783  { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	0 }, /* RO */
2784  { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	0 }, /* RO */
2785  { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	0 }, /* RO */
2786  { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	0 }, /* RO */
2787  { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	0 }, /* RO */
2788  { "clidr_el1",        CPENC(3,1,C0,C0,1),	0 }, /* RO */
2789  { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 }, /* RO */
2790  { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
2791  { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
2792  { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
2793  { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
2794  { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
2795  { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
2796  { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
2797  { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
2798  { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
2799  { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
2800  { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
2801  { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
2802  { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
2803  { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
2804  { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
2805  { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
2806  { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
2807  { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
2808  { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
2809  { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
2810  { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
2811  { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
2812  { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
2813  { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
2814  { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
2815  { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
2816  { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
2817  { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
2818  { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
2819  { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
2820  { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
2821  { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
2822  { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
2823  { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
2824  { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
2825  { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
2826  { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
2827  { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
2828  { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
2829  { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
2830  { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
2831  { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
2832  { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
2833  { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
2834  { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
2835  { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
2836  { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
2837  { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
2838  { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
2839  { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
2840  { "rvbar_el1",        CPENC(3,0,C12,C0,1),	0 }, /* RO */
2841  { "rvbar_el2",        CPENC(3,4,C12,C0,1),	0 }, /* RO */
2842  { "rvbar_el3",        CPENC(3,6,C12,C0,1),	0 }, /* RO */
2843  { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
2844  { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
2845  { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
2846  { "isr_el1",          CPENC(3,0,C12,C1,0),	0 }, /* RO */
2847  { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
2848  { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
2849  { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RO */
2850  { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
2851  { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
2852  { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
2853  { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
2854  { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RO */
2855  { "cntpct_el0",       CPENC(3,3,C14,C0,1),	0 }, /* RO */
2856  { "cntvct_el0",       CPENC(3,3,C14,C0,2),	0 }, /* RO */
2857  { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
2858  { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
2859  { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
2860  { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
2861  { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
2862  { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
2863  { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
2864  { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
2865  { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
2866  { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
2867  { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
2868  { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
2869  { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
2870  { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
2871  { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
2872  { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
2873  { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
2874  { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
2875  { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
2876  { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
2877  { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	0 },  /* r */
2878  { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
2879  { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
2880  { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* r */
2881  { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* w */
2882  { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },  /* r */
2883  { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },  /* w */
2884  { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
2885  { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
2886  { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
2887  { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
2888  { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
2889  { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
2890  { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
2891  { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
2892  { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
2893  { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
2894  { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
2895  { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
2896  { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
2897  { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
2898  { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
2899  { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
2900  { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
2901  { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
2902  { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
2903  { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
2904  { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
2905  { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
2906  { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
2907  { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
2908  { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
2909  { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
2910  { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
2911  { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
2912  { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
2913  { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
2914  { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
2915  { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
2916  { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
2917  { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
2918  { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
2919  { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
2920  { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
2921  { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
2922  { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
2923  { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
2924  { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
2925  { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
2926  { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
2927  { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
2928  { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
2929  { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
2930  { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
2931  { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
2932  { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
2933  { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
2934  { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
2935  { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
2936  { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
2937  { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
2938  { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
2939  { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
2940  { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
2941  { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
2942  { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
2943  { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
2944  { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
2945  { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
2946  { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
2947  { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
2948  { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
2949  { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
2950  { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	0 },  /* r */
2951  { "oslar_el1",         CPENC(2,0,C1, C0, 4),	0 },  /* w */
2952  { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	0 },  /* r */
2953  { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
2954  { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
2955  { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
2956  { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
2957  { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	0 },  /* r */
2958
2959  { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
2960  { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
2961  { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
2962  { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
2963  { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	0 },  /* w */
2964  { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
2965  { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	0 },  /* r */
2966  { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	0 },  /* r */
2967  { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
2968  { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
2969  { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
2970  { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
2971  { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
2972  { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
2973  { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
2974  { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
2975  { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
2976  { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
2977  { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
2978  { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
2979  { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
2980  { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
2981  { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
2982  { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
2983  { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
2984  { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
2985  { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
2986  { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
2987  { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
2988  { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
2989  { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
2990  { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
2991  { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
2992  { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
2993  { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
2994  { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
2995  { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
2996  { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
2997  { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
2998  { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
2999  { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
3000  { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
3001  { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
3002  { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
3003  { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
3004  { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
3005  { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
3006  { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
3007  { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
3008  { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
3009  { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
3010  { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
3011  { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
3012  { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
3013  { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
3014  { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
3015  { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
3016  { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
3017  { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
3018  { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
3019  { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
3020  { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
3021  { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
3022  { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
3023  { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
3024  { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
3025  { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
3026  { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
3027  { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
3028  { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
3029  { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
3030  { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
3031  { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
3032  { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
3033  { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
3034  { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
3035  { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
3036  { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
3037  { 0,          CPENC(0,0,0,0,0),	0 },
3038};
3039
3040bfd_boolean
3041aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
3042{
3043  return (reg->flags & F_DEPRECATED) != 0;
3044}
3045
3046const aarch64_sys_reg aarch64_pstatefields [] =
3047{
3048  { "spsel",            0x05,	0 },
3049  { "daifset",          0x1e,	0 },
3050  { "daifclr",          0x1f,	0 },
3051  { 0,          CPENC(0,0,0,0,0), 0 },
3052};
3053
3054const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
3055{
3056    { "ialluis", CPENS(0,C7,C1,0), 0 },
3057    { "iallu",   CPENS(0,C7,C5,0), 0 },
3058    { "ivau",    CPENS(3,C7,C5,1), 1 },
3059    { 0, CPENS(0,0,0,0), 0 }
3060};
3061
3062const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
3063{
3064    { "zva",        CPENS(3,C7,C4,1),  1 },
3065    { "ivac",       CPENS(0,C7,C6,1),  1 },
3066    { "isw",        CPENS(0,C7,C6,2),  1 },
3067    { "cvac",       CPENS(3,C7,C10,1), 1 },
3068    { "csw",        CPENS(0,C7,C10,2), 1 },
3069    { "cvau",       CPENS(3,C7,C11,1), 1 },
3070    { "civac",      CPENS(3,C7,C14,1), 1 },
3071    { "cisw",       CPENS(0,C7,C14,2), 1 },
3072    { 0,       CPENS(0,0,0,0), 0 }
3073};
3074
3075const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
3076{
3077    { "s1e1r",      CPENS(0,C7,C8,0), 1 },
3078    { "s1e1w",      CPENS(0,C7,C8,1), 1 },
3079    { "s1e0r",      CPENS(0,C7,C8,2), 1 },
3080    { "s1e0w",      CPENS(0,C7,C8,3), 1 },
3081    { "s12e1r",     CPENS(4,C7,C8,4), 1 },
3082    { "s12e1w",     CPENS(4,C7,C8,5), 1 },
3083    { "s12e0r",     CPENS(4,C7,C8,6), 1 },
3084    { "s12e0w",     CPENS(4,C7,C8,7), 1 },
3085    { "s1e2r",      CPENS(4,C7,C8,0), 1 },
3086    { "s1e2w",      CPENS(4,C7,C8,1), 1 },
3087    { "s1e3r",      CPENS(6,C7,C8,0), 1 },
3088    { "s1e3w",      CPENS(6,C7,C8,1), 1 },
3089    { 0,       CPENS(0,0,0,0), 0 }
3090};
3091
3092const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
3093{
3094    { "vmalle1",   CPENS(0,C8,C7,0), 0 },
3095    { "vae1",      CPENS(0,C8,C7,1), 1 },
3096    { "aside1",    CPENS(0,C8,C7,2), 1 },
3097    { "vaae1",     CPENS(0,C8,C7,3), 1 },
3098    { "vmalle1is", CPENS(0,C8,C3,0), 0 },
3099    { "vae1is",    CPENS(0,C8,C3,1), 1 },
3100    { "aside1is",  CPENS(0,C8,C3,2), 1 },
3101    { "vaae1is",   CPENS(0,C8,C3,3), 1 },
3102    { "ipas2e1is", CPENS(4,C8,C0,1), 1 },
3103    { "ipas2le1is",CPENS(4,C8,C0,5), 1 },
3104    { "ipas2e1",   CPENS(4,C8,C4,1), 1 },
3105    { "ipas2le1",  CPENS(4,C8,C4,5), 1 },
3106    { "vae2",      CPENS(4,C8,C7,1), 1 },
3107    { "vae2is",    CPENS(4,C8,C3,1), 1 },
3108    { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
3109    { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
3110    { "vae3",      CPENS(6,C8,C7,1), 1 },
3111    { "vae3is",    CPENS(6,C8,C3,1), 1 },
3112    { "alle2",     CPENS(4,C8,C7,0), 0 },
3113    { "alle2is",   CPENS(4,C8,C3,0), 0 },
3114    { "alle1",     CPENS(4,C8,C7,4), 0 },
3115    { "alle1is",   CPENS(4,C8,C3,4), 0 },
3116    { "alle3",     CPENS(6,C8,C7,0), 0 },
3117    { "alle3is",   CPENS(6,C8,C3,0), 0 },
3118    { "vale1is",   CPENS(0,C8,C3,5), 1 },
3119    { "vale2is",   CPENS(4,C8,C3,5), 1 },
3120    { "vale3is",   CPENS(6,C8,C3,5), 1 },
3121    { "vaale1is",  CPENS(0,C8,C3,7), 1 },
3122    { "vale1",     CPENS(0,C8,C7,5), 1 },
3123    { "vale2",     CPENS(4,C8,C7,5), 1 },
3124    { "vale3",     CPENS(6,C8,C7,5), 1 },
3125    { "vaale1",    CPENS(0,C8,C7,7), 1 },
3126    { 0,       CPENS(0,0,0,0), 0 }
3127};
3128
3129#undef C0
3130#undef C1
3131#undef C2
3132#undef C3
3133#undef C4
3134#undef C5
3135#undef C6
3136#undef C7
3137#undef C8
3138#undef C9
3139#undef C10
3140#undef C11
3141#undef C12
3142#undef C13
3143#undef C14
3144#undef C15
3145
3146/* Include the opcode description table as well as the operand description
3147   table.  */
3148#include "aarch64-tbl.h"
3149