1/* aarch64-opc.c -- AArch64 opcode support.
2   Copyright (C) 2009-2017 Free Software Foundation, Inc.
3   Contributed by ARM Ltd.
4
5   This file is part of the GNU opcodes library.
6
7   This library is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   It is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30#include "libiberty.h"
31
32#include "aarch64-opc.h"
33
34#ifdef DEBUG_AARCH64
35int debug_dump = FALSE;
36#endif /* DEBUG_AARCH64 */
37
38/* The enumeration strings associated with each value of a 5-bit SVE
39   pattern operand.  A null entry indicates a reserved meaning.  */
40const char *const aarch64_sve_pattern_array[32] = {
41  /* 0-7.  */
42  "pow2",
43  "vl1",
44  "vl2",
45  "vl3",
46  "vl4",
47  "vl5",
48  "vl6",
49  "vl7",
50  /* 8-15.  */
51  "vl8",
52  "vl16",
53  "vl32",
54  "vl64",
55  "vl128",
56  "vl256",
57  0,
58  0,
59  /* 16-23.  */
60  0,
61  0,
62  0,
63  0,
64  0,
65  0,
66  0,
67  0,
68  /* 24-31.  */
69  0,
70  0,
71  0,
72  0,
73  0,
74  "mul4",
75  "mul3",
76  "all"
77};
78
79/* The enumeration strings associated with each value of a 4-bit SVE
80   prefetch operand.  A null entry indicates a reserved meaning.  */
81const char *const aarch64_sve_prfop_array[16] = {
82  /* 0-7.  */
83  "pldl1keep",
84  "pldl1strm",
85  "pldl2keep",
86  "pldl2strm",
87  "pldl3keep",
88  "pldl3strm",
89  0,
90  0,
91  /* 8-15.  */
92  "pstl1keep",
93  "pstl1strm",
94  "pstl2keep",
95  "pstl2strm",
96  "pstl3keep",
97  "pstl3strm",
98  0,
99  0
100};
101
102/* Helper functions to determine which operand to be used to encode/decode
103   the size:Q fields for AdvSIMD instructions.  */
104
105static inline bfd_boolean
106vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107{
108  return ((qualifier >= AARCH64_OPND_QLF_V_8B
109	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110	  : FALSE);
111}
112
113static inline bfd_boolean
114fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115{
116  return ((qualifier >= AARCH64_OPND_QLF_S_B
117	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118	  : FALSE);
119}
120
121enum data_pattern
122{
123  DP_UNKNOWN,
124  DP_VECTOR_3SAME,
125  DP_VECTOR_LONG,
126  DP_VECTOR_WIDE,
127  DP_VECTOR_ACROSS_LANES,
128};
129
130static const char significant_operand_index [] =
131{
132  0,	/* DP_UNKNOWN, by default using operand 0.  */
133  0,	/* DP_VECTOR_3SAME */
134  1,	/* DP_VECTOR_LONG */
135  2,	/* DP_VECTOR_WIDE */
136  1,	/* DP_VECTOR_ACROSS_LANES */
137};
138
139/* Given a sequence of qualifiers in QUALIFIERS, determine and return
140   the data pattern.
141   N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142   corresponds to one of a sequence of operands.  */
143
144static enum data_pattern
145get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146{
147  if (vector_qualifier_p (qualifiers[0]) == TRUE)
148    {
149      /* e.g. v.4s, v.4s, v.4s
150	   or v.4h, v.4h, v.h[3].  */
151      if (qualifiers[0] == qualifiers[1]
152	  && vector_qualifier_p (qualifiers[2]) == TRUE
153	  && (aarch64_get_qualifier_esize (qualifiers[0])
154	      == aarch64_get_qualifier_esize (qualifiers[1]))
155	  && (aarch64_get_qualifier_esize (qualifiers[0])
156	      == aarch64_get_qualifier_esize (qualifiers[2])))
157	return DP_VECTOR_3SAME;
158      /* e.g. v.8h, v.8b, v.8b.
159           or v.4s, v.4h, v.h[2].
160	   or v.8h, v.16b.  */
161      if (vector_qualifier_p (qualifiers[1]) == TRUE
162	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163	  && (aarch64_get_qualifier_esize (qualifiers[0])
164	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165	return DP_VECTOR_LONG;
166      /* e.g. v.8h, v.8h, v.8b.  */
167      if (qualifiers[0] == qualifiers[1]
168	  && vector_qualifier_p (qualifiers[2]) == TRUE
169	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170	  && (aarch64_get_qualifier_esize (qualifiers[0])
171	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172	  && (aarch64_get_qualifier_esize (qualifiers[0])
173	      == aarch64_get_qualifier_esize (qualifiers[1])))
174	return DP_VECTOR_WIDE;
175    }
176  else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177    {
178      /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
179      if (vector_qualifier_p (qualifiers[1]) == TRUE
180	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181	return DP_VECTOR_ACROSS_LANES;
182    }
183
184  return DP_UNKNOWN;
185}
186
187/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188   the AdvSIMD instructions.  */
189/* N.B. it is possible to do some optimization that doesn't call
190   get_data_pattern each time when we need to select an operand.  We can
191   either buffer the caculated the result or statically generate the data,
192   however, it is not obvious that the optimization will bring significant
193   benefit.  */
194
195int
196aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197{
198  return
199    significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200}
201
202const aarch64_field fields[] =
203{
204    {  0,  0 },	/* NIL.  */
205    {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
206    {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
207    {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
208    { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
209    {  5, 19 },	/* imm19: e.g. in CBZ.  */
210    {  5, 19 },	/* immhi: e.g. in ADRP.  */
211    { 29,  2 },	/* immlo: e.g. in ADRP.  */
212    { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
213    { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
214    { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
215    { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
216    {  0,  5 },	/* Rt: in load/store instructions.  */
217    {  0,  5 },	/* Rd: in many integer instructions.  */
218    {  5,  5 },	/* Rn: in many integer instructions.  */
219    { 10,  5 },	/* Rt2: in load/store pair instructions.  */
220    { 10,  5 },	/* Ra: in fp instructions.  */
221    {  5,  3 },	/* op2: in the system instructions.  */
222    {  8,  4 },	/* CRm: in the system instructions.  */
223    { 12,  4 },	/* CRn: in the system instructions.  */
224    { 16,  3 },	/* op1: in the system instructions.  */
225    { 19,  2 },	/* op0: in the system instructions.  */
226    { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
227    { 12,  4 },	/* cond: condition flags as a source operand.  */
228    { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
229    { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
230    { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
231    { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
232    { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
233    { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
234    { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
235    { 12,  1 },	/* S: in load/store reg offset instructions.  */
236    { 21,  2 },	/* hw: in move wide constant instructions.  */
237    { 22,  2 },	/* opc: in load/store reg offset instructions.  */
238    { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
239    { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
240    { 22,  2 },	/* type: floating point type field in fp data inst.  */
241    { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
242    { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
243    { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
244    { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
245    { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
246    { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
247    { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
248    { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
249    {  5, 14 },	/* imm14: in test bit and branch instructions.  */
250    {  5, 16 },	/* imm16: in exception instructions.  */
251    {  0, 26 },	/* imm26: in unconditional branch instructions.  */
252    { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
253    { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
254    { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
255    { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
256    { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
257    { 22,  1 },	/* N: in logical (immediate) instructions.  */
258    { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
259    { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
260    { 31,  1 },	/* sf: in integer data processing instructions.  */
261    { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
262    { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
263    { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
264    { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
265    { 31,  1 },	/* b5: in the test bit and branch instructions.  */
266    { 19,  5 },	/* b40: in the test bit and branch instructions.  */
267    { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
268    {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
269    { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
270    { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
271    { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
272    {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
273    { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
274    {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
275    { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
276    { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
277    { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
278    {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
279    {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
280    {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
281    { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
282    {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
283    {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
284    {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
285    {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
286    { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
287    {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
288    {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
289    { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
290    {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
291    {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
292    {  5,  1 }, /* SVE_i1: single-bit immediate.  */
293    { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
294    { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
295    { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
296    {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
297    { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
298    { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
299    { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
300    {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
301    {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
302    { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
303    {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
304    { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
305    {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
306    {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
307    { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
308    { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
309    { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
310    { 16,  4 }, /* SVE_tsz: triangular size select.  */
311    { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
312    {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
313    { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
314    { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
315    { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
316    { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
317    { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
318    { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
319};
320
321enum aarch64_operand_class
322aarch64_get_operand_class (enum aarch64_opnd type)
323{
324  return aarch64_operands[type].op_class;
325}
326
327const char *
328aarch64_get_operand_name (enum aarch64_opnd type)
329{
330  return aarch64_operands[type].name;
331}
332
333/* Get operand description string.
334   This is usually for the diagnosis purpose.  */
335const char *
336aarch64_get_operand_desc (enum aarch64_opnd type)
337{
338  return aarch64_operands[type].desc;
339}
340
341/* Table of all conditional affixes.  */
342const aarch64_cond aarch64_conds[16] =
343{
344  {{"eq", "none"}, 0x0},
345  {{"ne", "any"}, 0x1},
346  {{"cs", "hs", "nlast"}, 0x2},
347  {{"cc", "lo", "ul", "last"}, 0x3},
348  {{"mi", "first"}, 0x4},
349  {{"pl", "nfrst"}, 0x5},
350  {{"vs"}, 0x6},
351  {{"vc"}, 0x7},
352  {{"hi", "pmore"}, 0x8},
353  {{"ls", "plast"}, 0x9},
354  {{"ge", "tcont"}, 0xa},
355  {{"lt", "tstop"}, 0xb},
356  {{"gt"}, 0xc},
357  {{"le"}, 0xd},
358  {{"al"}, 0xe},
359  {{"nv"}, 0xf},
360};
361
362const aarch64_cond *
363get_cond_from_value (aarch64_insn value)
364{
365  assert (value < 16);
366  return &aarch64_conds[(unsigned int) value];
367}
368
369const aarch64_cond *
370get_inverted_cond (const aarch64_cond *cond)
371{
372  return &aarch64_conds[cond->value ^ 0x1];
373}
374
375/* Table describing the operand extension/shifting operators; indexed by
376   enum aarch64_modifier_kind.
377
378   The value column provides the most common values for encoding modifiers,
379   which enables table-driven encoding/decoding for the modifiers.  */
380const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
381{
382    {"none", 0x0},
383    {"msl",  0x0},
384    {"ror",  0x3},
385    {"asr",  0x2},
386    {"lsr",  0x1},
387    {"lsl",  0x0},
388    {"uxtb", 0x0},
389    {"uxth", 0x1},
390    {"uxtw", 0x2},
391    {"uxtx", 0x3},
392    {"sxtb", 0x4},
393    {"sxth", 0x5},
394    {"sxtw", 0x6},
395    {"sxtx", 0x7},
396    {"mul", 0x0},
397    {"mul vl", 0x0},
398    {NULL, 0},
399};
400
401enum aarch64_modifier_kind
402aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
403{
404  return desc - aarch64_operand_modifiers;
405}
406
407aarch64_insn
408aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
409{
410  return aarch64_operand_modifiers[kind].value;
411}
412
413enum aarch64_modifier_kind
414aarch64_get_operand_modifier_from_value (aarch64_insn value,
415					 bfd_boolean extend_p)
416{
417  if (extend_p == TRUE)
418    return AARCH64_MOD_UXTB + value;
419  else
420    return AARCH64_MOD_LSL - value;
421}
422
423bfd_boolean
424aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
425{
426  return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
427    ? TRUE : FALSE;
428}
429
430static inline bfd_boolean
431aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
432{
433  return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
434    ? TRUE : FALSE;
435}
436
437const struct aarch64_name_value_pair aarch64_barrier_options[16] =
438{
439    { "#0x00", 0x0 },
440    { "oshld", 0x1 },
441    { "oshst", 0x2 },
442    { "osh",   0x3 },
443    { "#0x04", 0x4 },
444    { "nshld", 0x5 },
445    { "nshst", 0x6 },
446    { "nsh",   0x7 },
447    { "#0x08", 0x8 },
448    { "ishld", 0x9 },
449    { "ishst", 0xa },
450    { "ish",   0xb },
451    { "#0x0c", 0xc },
452    { "ld",    0xd },
453    { "st",    0xe },
454    { "sy",    0xf },
455};
456
457/* Table describing the operands supported by the aliases of the HINT
458   instruction.
459
460   The name column is the operand that is accepted for the alias.  The value
461   column is the hint number of the alias.  The list of operands is terminated
462   by NULL in the name column.  */
463
464const struct aarch64_name_value_pair aarch64_hint_options[] =
465{
466  { "csync", 0x11 },    /* PSB CSYNC.  */
467  { NULL, 0x0 },
468};
469
470/* op -> op:       load = 0 instruction = 1 store = 2
471   l  -> level:    1-3
472   t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
473#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
474const struct aarch64_name_value_pair aarch64_prfops[32] =
475{
476  { "pldl1keep", B(0, 1, 0) },
477  { "pldl1strm", B(0, 1, 1) },
478  { "pldl2keep", B(0, 2, 0) },
479  { "pldl2strm", B(0, 2, 1) },
480  { "pldl3keep", B(0, 3, 0) },
481  { "pldl3strm", B(0, 3, 1) },
482  { NULL, 0x06 },
483  { NULL, 0x07 },
484  { "plil1keep", B(1, 1, 0) },
485  { "plil1strm", B(1, 1, 1) },
486  { "plil2keep", B(1, 2, 0) },
487  { "plil2strm", B(1, 2, 1) },
488  { "plil3keep", B(1, 3, 0) },
489  { "plil3strm", B(1, 3, 1) },
490  { NULL, 0x0e },
491  { NULL, 0x0f },
492  { "pstl1keep", B(2, 1, 0) },
493  { "pstl1strm", B(2, 1, 1) },
494  { "pstl2keep", B(2, 2, 0) },
495  { "pstl2strm", B(2, 2, 1) },
496  { "pstl3keep", B(2, 3, 0) },
497  { "pstl3strm", B(2, 3, 1) },
498  { NULL, 0x16 },
499  { NULL, 0x17 },
500  { NULL, 0x18 },
501  { NULL, 0x19 },
502  { NULL, 0x1a },
503  { NULL, 0x1b },
504  { NULL, 0x1c },
505  { NULL, 0x1d },
506  { NULL, 0x1e },
507  { NULL, 0x1f },
508};
509#undef B
510
511/* Utilities on value constraint.  */
512
513static inline int
514value_in_range_p (int64_t value, int low, int high)
515{
516  return (value >= low && value <= high) ? 1 : 0;
517}
518
519/* Return true if VALUE is a multiple of ALIGN.  */
520static inline int
521value_aligned_p (int64_t value, int align)
522{
523  return (value % align) == 0;
524}
525
526/* A signed value fits in a field.  */
527static inline int
528value_fit_signed_field_p (int64_t value, unsigned width)
529{
530  assert (width < 32);
531  if (width < sizeof (value) * 8)
532    {
533      int64_t lim = (int64_t)1 << (width - 1);
534      if (value >= -lim && value < lim)
535	return 1;
536    }
537  return 0;
538}
539
540/* An unsigned value fits in a field.  */
541static inline int
542value_fit_unsigned_field_p (int64_t value, unsigned width)
543{
544  assert (width < 32);
545  if (width < sizeof (value) * 8)
546    {
547      int64_t lim = (int64_t)1 << width;
548      if (value >= 0 && value < lim)
549	return 1;
550    }
551  return 0;
552}
553
554/* Return 1 if OPERAND is SP or WSP.  */
555int
556aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
557{
558  return ((aarch64_get_operand_class (operand->type)
559	   == AARCH64_OPND_CLASS_INT_REG)
560	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
561	  && operand->reg.regno == 31);
562}
563
564/* Return 1 if OPERAND is XZR or WZP.  */
565int
566aarch64_zero_register_p (const aarch64_opnd_info *operand)
567{
568  return ((aarch64_get_operand_class (operand->type)
569	   == AARCH64_OPND_CLASS_INT_REG)
570	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
571	  && operand->reg.regno == 31);
572}
573
574/* Return true if the operand *OPERAND that has the operand code
575   OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
576   qualified by the qualifier TARGET.  */
577
578static inline int
579operand_also_qualified_p (const struct aarch64_opnd_info *operand,
580			  aarch64_opnd_qualifier_t target)
581{
582  switch (operand->qualifier)
583    {
584    case AARCH64_OPND_QLF_W:
585      if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
586	return 1;
587      break;
588    case AARCH64_OPND_QLF_X:
589      if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
590	return 1;
591      break;
592    case AARCH64_OPND_QLF_WSP:
593      if (target == AARCH64_OPND_QLF_W
594	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
595	return 1;
596      break;
597    case AARCH64_OPND_QLF_SP:
598      if (target == AARCH64_OPND_QLF_X
599	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
600	return 1;
601      break;
602    default:
603      break;
604    }
605
606  return 0;
607}
608
609/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
610   for operand KNOWN_IDX, return the expected qualifier for operand IDX.
611
612   Return NIL if more than one expected qualifiers are found.  */
613
614aarch64_opnd_qualifier_t
615aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
616				int idx,
617				const aarch64_opnd_qualifier_t known_qlf,
618				int known_idx)
619{
620  int i, saved_i;
621
622  /* Special case.
623
624     When the known qualifier is NIL, we have to assume that there is only
625     one qualifier sequence in the *QSEQ_LIST and return the corresponding
626     qualifier directly.  One scenario is that for instruction
627	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
628     which has only one possible valid qualifier sequence
629	NIL, S_D
630     the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
631     determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
632
633     Because the qualifier NIL has dual roles in the qualifier sequence:
634     it can mean no qualifier for the operand, or the qualifer sequence is
635     not in use (when all qualifiers in the sequence are NILs), we have to
636     handle this special case here.  */
637  if (known_qlf == AARCH64_OPND_NIL)
638    {
639      assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
640      return qseq_list[0][idx];
641    }
642
643  for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
644    {
645      if (qseq_list[i][known_idx] == known_qlf)
646	{
647	  if (saved_i != -1)
648	    /* More than one sequences are found to have KNOWN_QLF at
649	       KNOWN_IDX.  */
650	    return AARCH64_OPND_NIL;
651	  saved_i = i;
652	}
653    }
654
655  return qseq_list[saved_i][idx];
656}
657
658enum operand_qualifier_kind
659{
660  OQK_NIL,
661  OQK_OPD_VARIANT,
662  OQK_VALUE_IN_RANGE,
663  OQK_MISC,
664};
665
666/* Operand qualifier description.  */
667struct operand_qualifier_data
668{
669  /* The usage of the three data fields depends on the qualifier kind.  */
670  int data0;
671  int data1;
672  int data2;
673  /* Description.  */
674  const char *desc;
675  /* Kind.  */
676  enum operand_qualifier_kind kind;
677};
678
679/* Indexed by the operand qualifier enumerators.  */
680struct operand_qualifier_data aarch64_opnd_qualifiers[] =
681{
682  {0, 0, 0, "NIL", OQK_NIL},
683
684  /* Operand variant qualifiers.
685     First 3 fields:
686     element size, number of elements and common value for encoding.  */
687
688  {4, 1, 0x0, "w", OQK_OPD_VARIANT},
689  {8, 1, 0x1, "x", OQK_OPD_VARIANT},
690  {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
691  {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
692
693  {1, 1, 0x0, "b", OQK_OPD_VARIANT},
694  {2, 1, 0x1, "h", OQK_OPD_VARIANT},
695  {4, 1, 0x2, "s", OQK_OPD_VARIANT},
696  {8, 1, 0x3, "d", OQK_OPD_VARIANT},
697  {16, 1, 0x4, "q", OQK_OPD_VARIANT},
698
699  {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
700  {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
701  {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
702  {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
703  {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
704  {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
705  {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
706  {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
707  {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
708  {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
709
710  {0, 0, 0, "z", OQK_OPD_VARIANT},
711  {0, 0, 0, "m", OQK_OPD_VARIANT},
712
713  /* Qualifiers constraining the value range.
714     First 3 fields:
715     Lower bound, higher bound, unused.  */
716
717  {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
718  {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
719  {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
720  {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
721  {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
722  {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
723  {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
724
725  /* Qualifiers for miscellaneous purpose.
726     First 3 fields:
727     unused, unused and unused.  */
728
729  {0, 0, 0, "lsl", 0},
730  {0, 0, 0, "msl", 0},
731
732  {0, 0, 0, "retrieving", 0},
733};
734
735static inline bfd_boolean
736operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
737{
738  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
739    ? TRUE : FALSE;
740}
741
742static inline bfd_boolean
743qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
744{
745  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
746    ? TRUE : FALSE;
747}
748
749const char*
750aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
751{
752  return aarch64_opnd_qualifiers[qualifier].desc;
753}
754
755/* Given an operand qualifier, return the expected data element size
756   of a qualified operand.  */
757unsigned char
758aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
759{
760  assert (operand_variant_qualifier_p (qualifier) == TRUE);
761  return aarch64_opnd_qualifiers[qualifier].data0;
762}
763
764unsigned char
765aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
766{
767  assert (operand_variant_qualifier_p (qualifier) == TRUE);
768  return aarch64_opnd_qualifiers[qualifier].data1;
769}
770
771aarch64_insn
772aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
773{
774  assert (operand_variant_qualifier_p (qualifier) == TRUE);
775  return aarch64_opnd_qualifiers[qualifier].data2;
776}
777
778static int
779get_lower_bound (aarch64_opnd_qualifier_t qualifier)
780{
781  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
782  return aarch64_opnd_qualifiers[qualifier].data0;
783}
784
785static int
786get_upper_bound (aarch64_opnd_qualifier_t qualifier)
787{
788  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
789  return aarch64_opnd_qualifiers[qualifier].data1;
790}
791
792#ifdef DEBUG_AARCH64
793void
794aarch64_verbose (const char *str, ...)
795{
796  va_list ap;
797  va_start (ap, str);
798  printf ("#### ");
799  vprintf (str, ap);
800  printf ("\n");
801  va_end (ap);
802}
803
804static inline void
805dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
806{
807  int i;
808  printf ("#### \t");
809  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
810    printf ("%s,", aarch64_get_qualifier_name (*qualifier));
811  printf ("\n");
812}
813
814static void
815dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
816		       const aarch64_opnd_qualifier_t *qualifier)
817{
818  int i;
819  aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
820
821  aarch64_verbose ("dump_match_qualifiers:");
822  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
823    curr[i] = opnd[i].qualifier;
824  dump_qualifier_sequence (curr);
825  aarch64_verbose ("against");
826  dump_qualifier_sequence (qualifier);
827}
828#endif /* DEBUG_AARCH64 */
829
830/* TODO improve this, we can have an extra field at the runtime to
831   store the number of operands rather than calculating it every time.  */
832
833int
834aarch64_num_of_operands (const aarch64_opcode *opcode)
835{
836  int i = 0;
837  const enum aarch64_opnd *opnds = opcode->operands;
838  while (opnds[i++] != AARCH64_OPND_NIL)
839    ;
840  --i;
841  assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
842  return i;
843}
844
845/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
846   If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
847
848   N.B. on the entry, it is very likely that only some operands in *INST
849   have had their qualifiers been established.
850
851   If STOP_AT is not -1, the function will only try to match
852   the qualifier sequence for operands before and including the operand
853   of index STOP_AT; and on success *RET will only be filled with the first
854   (STOP_AT+1) qualifiers.
855
856   A couple examples of the matching algorithm:
857
858   X,W,NIL should match
859   X,W,NIL
860
861   NIL,NIL should match
862   X  ,NIL
863
864   Apart from serving the main encoding routine, this can also be called
865   during or after the operand decoding.  */
866
867int
868aarch64_find_best_match (const aarch64_inst *inst,
869			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
870			 int stop_at, aarch64_opnd_qualifier_t *ret)
871{
872  int found = 0;
873  int i, num_opnds;
874  const aarch64_opnd_qualifier_t *qualifiers;
875
876  num_opnds = aarch64_num_of_operands (inst->opcode);
877  if (num_opnds == 0)
878    {
879      DEBUG_TRACE ("SUCCEED: no operand");
880      return 1;
881    }
882
883  if (stop_at < 0 || stop_at >= num_opnds)
884    stop_at = num_opnds - 1;
885
886  /* For each pattern.  */
887  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
888    {
889      int j;
890      qualifiers = *qualifiers_list;
891
892      /* Start as positive.  */
893      found = 1;
894
895      DEBUG_TRACE ("%d", i);
896#ifdef DEBUG_AARCH64
897      if (debug_dump)
898	dump_match_qualifiers (inst->operands, qualifiers);
899#endif
900
901      /* Most opcodes has much fewer patterns in the list.
902	 First NIL qualifier indicates the end in the list.   */
903      if (empty_qualifier_sequence_p (qualifiers) == TRUE)
904	{
905	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
906	  if (i)
907	    found = 0;
908	  break;
909	}
910
911      for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
912	{
913	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
914	    {
915	      /* Either the operand does not have qualifier, or the qualifier
916		 for the operand needs to be deduced from the qualifier
917		 sequence.
918		 In the latter case, any constraint checking related with
919		 the obtained qualifier should be done later in
920		 operand_general_constraint_met_p.  */
921	      continue;
922	    }
923	  else if (*qualifiers != inst->operands[j].qualifier)
924	    {
925	      /* Unless the target qualifier can also qualify the operand
926		 (which has already had a non-nil qualifier), non-equal
927		 qualifiers are generally un-matched.  */
928	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
929		continue;
930	      else
931		{
932		  found = 0;
933		  break;
934		}
935	    }
936	  else
937	    continue;	/* Equal qualifiers are certainly matched.  */
938	}
939
940      /* Qualifiers established.  */
941      if (found == 1)
942	break;
943    }
944
945  if (found == 1)
946    {
947      /* Fill the result in *RET.  */
948      int j;
949      qualifiers = *qualifiers_list;
950
951      DEBUG_TRACE ("complete qualifiers using list %d", i);
952#ifdef DEBUG_AARCH64
953      if (debug_dump)
954	dump_qualifier_sequence (qualifiers);
955#endif
956
957      for (j = 0; j <= stop_at; ++j, ++qualifiers)
958	ret[j] = *qualifiers;
959      for (; j < AARCH64_MAX_OPND_NUM; ++j)
960	ret[j] = AARCH64_OPND_QLF_NIL;
961
962      DEBUG_TRACE ("SUCCESS");
963      return 1;
964    }
965
966  DEBUG_TRACE ("FAIL");
967  return 0;
968}
969
970/* Operand qualifier matching and resolving.
971
972   Return 1 if the operand qualifier(s) in *INST match one of the qualifier
973   sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
974
975   if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
976   succeeds.  */
977
978static int
979match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
980{
981  int i, nops;
982  aarch64_opnd_qualifier_seq_t qualifiers;
983
984  if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
985			       qualifiers))
986    {
987      DEBUG_TRACE ("matching FAIL");
988      return 0;
989    }
990
991  if (inst->opcode->flags & F_STRICT)
992    {
993      /* Require an exact qualifier match, even for NIL qualifiers.  */
994      nops = aarch64_num_of_operands (inst->opcode);
995      for (i = 0; i < nops; ++i)
996	if (inst->operands[i].qualifier != qualifiers[i])
997	  return FALSE;
998    }
999
1000  /* Update the qualifiers.  */
1001  if (update_p == TRUE)
1002    for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1003      {
1004	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1005	  break;
1006	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1007			"update %s with %s for operand %d",
1008			aarch64_get_qualifier_name (inst->operands[i].qualifier),
1009			aarch64_get_qualifier_name (qualifiers[i]), i);
1010	inst->operands[i].qualifier = qualifiers[i];
1011      }
1012
1013  DEBUG_TRACE ("matching SUCCESS");
1014  return 1;
1015}
1016
1017/* Return TRUE if VALUE is a wide constant that can be moved into a general
1018   register by MOVZ.
1019
1020   IS32 indicates whether value is a 32-bit immediate or not.
1021   If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1022   amount will be returned in *SHIFT_AMOUNT.  */
1023
1024bfd_boolean
1025aarch64_wide_constant_p (int64_t value, int is32, unsigned int *shift_amount)
1026{
1027  int amount;
1028
1029  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1030
1031  if (is32)
1032    {
1033      /* Allow all zeros or all ones in top 32-bits, so that
1034	 32-bit constant expressions like ~0x80000000 are
1035	 permitted.  */
1036      uint64_t ext = value;
1037      if (ext >> 32 != 0 && ext >> 32 != (uint64_t) 0xffffffff)
1038	/* Immediate out of range.  */
1039	return FALSE;
1040      value &= (int64_t) 0xffffffff;
1041    }
1042
1043  /* first, try movz then movn */
1044  amount = -1;
1045  if ((value & ((int64_t) 0xffff << 0)) == value)
1046    amount = 0;
1047  else if ((value & ((int64_t) 0xffff << 16)) == value)
1048    amount = 16;
1049  else if (!is32 && (value & ((int64_t) 0xffff << 32)) == value)
1050    amount = 32;
1051  else if (!is32 && (value & ((int64_t) 0xffff << 48)) == value)
1052    amount = 48;
1053
1054  if (amount == -1)
1055    {
1056      DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1057      return FALSE;
1058    }
1059
1060  if (shift_amount != NULL)
1061    *shift_amount = amount;
1062
1063  DEBUG_TRACE ("exit TRUE with amount %d", amount);
1064
1065  return TRUE;
1066}
1067
1068/* Build the accepted values for immediate logical SIMD instructions.
1069
1070   The standard encodings of the immediate value are:
1071     N      imms     immr         SIMD size  R             S
1072     1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
1073     0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
1074     0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
1075     0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
1076     0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
1077     0      11110s   00000r       2       UInt(r)       UInt(s)
1078   where all-ones value of S is reserved.
1079
1080   Let's call E the SIMD size.
1081
1082   The immediate value is: S+1 bits '1' rotated to the right by R.
1083
1084   The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1085   (remember S != E - 1).  */
1086
1087#define TOTAL_IMM_NB  5334
1088
1089typedef struct
1090{
1091  uint64_t imm;
1092  aarch64_insn encoding;
1093} simd_imm_encoding;
1094
1095static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1096
1097static int
1098simd_imm_encoding_cmp(const void *i1, const void *i2)
1099{
1100  const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1101  const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1102
1103  if (imm1->imm < imm2->imm)
1104    return -1;
1105  if (imm1->imm > imm2->imm)
1106    return +1;
1107  return 0;
1108}
1109
1110/* immediate bitfield standard encoding
1111   imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
1112   1         ssssss     rrrrrr      64        rrrrrr ssssss
1113   0         0sssss     0rrrrr      32        rrrrr  sssss
1114   0         10ssss     00rrrr      16        rrrr   ssss
1115   0         110sss     000rrr      8         rrr    sss
1116   0         1110ss     0000rr      4         rr     ss
1117   0         11110s     00000r      2         r      s  */
1118static inline int
1119encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1120{
1121  return (is64 << 12) | (r << 6) | s;
1122}
1123
1124static void
1125build_immediate_table (void)
1126{
1127  uint32_t log_e, e, s, r, s_mask;
1128  uint64_t mask, imm;
1129  int nb_imms;
1130  int is64;
1131
1132  nb_imms = 0;
1133  for (log_e = 1; log_e <= 6; log_e++)
1134    {
1135      /* Get element size.  */
1136      e = 1u << log_e;
1137      if (log_e == 6)
1138	{
1139	  is64 = 1;
1140	  mask = 0xffffffffffffffffull;
1141	  s_mask = 0;
1142	}
1143      else
1144	{
1145	  is64 = 0;
1146	  mask = (1ull << e) - 1;
1147	  /* log_e  s_mask
1148	     1     ((1 << 4) - 1) << 2 = 111100
1149	     2     ((1 << 3) - 1) << 3 = 111000
1150	     3     ((1 << 2) - 1) << 4 = 110000
1151	     4     ((1 << 1) - 1) << 5 = 100000
1152	     5     ((1 << 0) - 1) << 6 = 000000  */
1153	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1154	}
1155      for (s = 0; s < e - 1; s++)
1156	for (r = 0; r < e; r++)
1157	  {
1158	    /* s+1 consecutive bits to 1 (s < 63) */
1159	    imm = (1ull << (s + 1)) - 1;
1160	    /* rotate right by r */
1161	    if (r != 0)
1162	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1163	    /* replicate the constant depending on SIMD size */
1164	    switch (log_e)
1165	      {
1166	      case 1: imm = (imm <<  2) | imm;
1167		/* Fall through.  */
1168	      case 2: imm = (imm <<  4) | imm;
1169		/* Fall through.  */
1170	      case 3: imm = (imm <<  8) | imm;
1171		/* Fall through.  */
1172	      case 4: imm = (imm << 16) | imm;
1173		/* Fall through.  */
1174	      case 5: imm = (imm << 32) | imm;
1175		/* Fall through.  */
1176	      case 6: break;
1177	      default: abort ();
1178	      }
1179	    simd_immediates[nb_imms].imm = imm;
1180	    simd_immediates[nb_imms].encoding =
1181	      encode_immediate_bitfield(is64, s | s_mask, r);
1182	    nb_imms++;
1183	  }
1184    }
1185  assert (nb_imms == TOTAL_IMM_NB);
1186  qsort(simd_immediates, nb_imms,
1187	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1188}
1189
1190/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1191   be accepted by logical (immediate) instructions
1192   e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1193
1194   ESIZE is the number of bytes in the decoded immediate value.
1195   If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1196   VALUE will be returned in *ENCODING.  */
1197
1198bfd_boolean
1199aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1200{
1201  simd_imm_encoding imm_enc;
1202  const simd_imm_encoding *imm_encoding;
1203  static bfd_boolean initialized = FALSE;
1204  uint64_t upper;
1205  int i;
1206
1207  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), is32: %d", value,
1208	       value, is32);
1209
1210  if (initialized == FALSE)
1211    {
1212      build_immediate_table ();
1213      initialized = TRUE;
1214    }
1215
1216  /* Allow all zeros or all ones in top bits, so that
1217     constant expressions like ~1 are permitted.  */
1218  upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1219  if ((value & ~upper) != value && (value | upper) != value)
1220    return FALSE;
1221
1222  /* Replicate to a full 64-bit value.  */
1223  value &= ~upper;
1224  for (i = esize * 8; i < 64; i *= 2)
1225    value |= (value << i);
1226
1227  imm_enc.imm = value;
1228  imm_encoding = (const simd_imm_encoding *)
1229    bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1230            sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1231  if (imm_encoding == NULL)
1232    {
1233      DEBUG_TRACE ("exit with FALSE");
1234      return FALSE;
1235    }
1236  if (encoding != NULL)
1237    *encoding = imm_encoding->encoding;
1238  DEBUG_TRACE ("exit with TRUE");
1239  return TRUE;
1240}
1241
1242/* If 64-bit immediate IMM is in the format of
1243   "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1244   where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1245   of value "abcdefgh".  Otherwise return -1.  */
1246int
1247aarch64_shrink_expanded_imm8 (uint64_t imm)
1248{
1249  int i, ret;
1250  uint32_t byte;
1251
1252  ret = 0;
1253  for (i = 0; i < 8; i++)
1254    {
1255      byte = (imm >> (8 * i)) & 0xff;
1256      if (byte == 0xff)
1257	ret |= 1 << i;
1258      else if (byte != 0x00)
1259	return -1;
1260    }
1261  return ret;
1262}
1263
1264/* Utility inline functions for operand_general_constraint_met_p.  */
1265
1266static inline void
1267set_error (aarch64_operand_error *mismatch_detail,
1268	   enum aarch64_operand_error_kind kind, int idx,
1269	   const char* error)
1270{
1271  if (mismatch_detail == NULL)
1272    return;
1273  mismatch_detail->kind = kind;
1274  mismatch_detail->index = idx;
1275  mismatch_detail->error = error;
1276}
1277
1278static inline void
1279set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1280		  const char* error)
1281{
1282  if (mismatch_detail == NULL)
1283    return;
1284  set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1285}
1286
1287static inline void
1288set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1289			int idx, int lower_bound, int upper_bound,
1290			const char* error)
1291{
1292  if (mismatch_detail == NULL)
1293    return;
1294  set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1295  mismatch_detail->data[0] = lower_bound;
1296  mismatch_detail->data[1] = upper_bound;
1297}
1298
1299static inline void
1300set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1301			    int idx, int lower_bound, int upper_bound)
1302{
1303  if (mismatch_detail == NULL)
1304    return;
1305  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1306			  _("immediate value"));
1307}
1308
1309static inline void
1310set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1311			       int idx, int lower_bound, int upper_bound)
1312{
1313  if (mismatch_detail == NULL)
1314    return;
1315  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1316			  _("immediate offset"));
1317}
1318
1319static inline void
1320set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1321			      int idx, int lower_bound, int upper_bound)
1322{
1323  if (mismatch_detail == NULL)
1324    return;
1325  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1326			  _("register number"));
1327}
1328
1329static inline void
1330set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1331				 int idx, int lower_bound, int upper_bound)
1332{
1333  if (mismatch_detail == NULL)
1334    return;
1335  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1336			  _("register element index"));
1337}
1338
1339static inline void
1340set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1341				   int idx, int lower_bound, int upper_bound)
1342{
1343  if (mismatch_detail == NULL)
1344    return;
1345  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1346			  _("shift amount"));
1347}
1348
1349/* Report that the MUL modifier in operand IDX should be in the range
1350   [LOWER_BOUND, UPPER_BOUND].  */
1351static inline void
1352set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1353				   int idx, int lower_bound, int upper_bound)
1354{
1355  if (mismatch_detail == NULL)
1356    return;
1357  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1358			  _("multiplier"));
1359}
1360
1361static inline void
1362set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1363		     int alignment)
1364{
1365  if (mismatch_detail == NULL)
1366    return;
1367  set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1368  mismatch_detail->data[0] = alignment;
1369}
1370
1371static inline void
1372set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1373		    int expected_num)
1374{
1375  if (mismatch_detail == NULL)
1376    return;
1377  set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1378  mismatch_detail->data[0] = expected_num;
1379}
1380
1381static inline void
1382set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1383		 const char* error)
1384{
1385  if (mismatch_detail == NULL)
1386    return;
1387  set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1388}
1389
1390/* General constraint checking based on operand code.
1391
1392   Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1393   as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1394
1395   This function has to be called after the qualifiers for all operands
1396   have been resolved.
1397
1398   Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1399   i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1400   of error message during the disassembling where error message is not
1401   wanted.  We avoid the dynamic construction of strings of error messages
1402   here (i.e. in libopcodes), as it is costly and complicated; instead, we
1403   use a combination of error code, static string and some integer data to
1404   represent an error.  */
1405
1406static int
1407operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1408				  enum aarch64_opnd type,
1409				  const aarch64_opcode *opcode,
1410				  aarch64_operand_error *mismatch_detail)
1411{
1412  unsigned num, modifiers, shift;
1413  unsigned char size;
1414  int64_t imm, min_value, max_value;
1415  uint64_t uvalue, mask;
1416  const aarch64_opnd_info *opnd = opnds + idx;
1417  aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1418
1419  assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1420
1421  switch (aarch64_operands[type].op_class)
1422    {
1423    case AARCH64_OPND_CLASS_INT_REG:
1424      /* Check pair reg constraints for cas* instructions.  */
1425      if (type == AARCH64_OPND_PAIRREG)
1426	{
1427	  assert (idx == 1 || idx == 3);
1428	  if (opnds[idx - 1].reg.regno % 2 != 0)
1429	    {
1430	      set_syntax_error (mismatch_detail, idx - 1,
1431				_("reg pair must start from even reg"));
1432	      return 0;
1433	    }
1434	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1435	    {
1436	      set_syntax_error (mismatch_detail, idx,
1437				_("reg pair must be contiguous"));
1438	      return 0;
1439	    }
1440	  break;
1441	}
1442
1443      /* <Xt> may be optional in some IC and TLBI instructions.  */
1444      if (type == AARCH64_OPND_Rt_SYS)
1445	{
1446	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1447			       == AARCH64_OPND_CLASS_SYSTEM));
1448	  if (opnds[1].present
1449	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1450	    {
1451	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1452	      return 0;
1453	    }
1454	  if (!opnds[1].present
1455	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1456	    {
1457	      set_other_error (mismatch_detail, idx, _("missing register"));
1458	      return 0;
1459	    }
1460	}
1461      switch (qualifier)
1462	{
1463	case AARCH64_OPND_QLF_WSP:
1464	case AARCH64_OPND_QLF_SP:
1465	  if (!aarch64_stack_pointer_p (opnd))
1466	    {
1467	      set_other_error (mismatch_detail, idx,
1468			       _("stack pointer register expected"));
1469	      return 0;
1470	    }
1471	  break;
1472	default:
1473	  break;
1474	}
1475      break;
1476
1477    case AARCH64_OPND_CLASS_SVE_REG:
1478      switch (type)
1479	{
1480	case AARCH64_OPND_SVE_Zm3_INDEX:
1481	case AARCH64_OPND_SVE_Zm3_22_INDEX:
1482	case AARCH64_OPND_SVE_Zm4_INDEX:
1483	  size = get_operand_fields_width (get_operand_from_code (type));
1484	  shift = get_operand_specific_data (&aarch64_operands[type]);
1485	  mask = (1 << shift) - 1;
1486	  if (opnd->reg.regno > mask)
1487	    {
1488	      assert (mask == 7 || mask == 15);
1489	      set_other_error (mismatch_detail, idx,
1490			       mask == 15
1491			       ? _("z0-z15 expected")
1492			       : _("z0-z7 expected"));
1493	      return 0;
1494	    }
1495	  mask = (1 << (size - shift)) - 1;
1496	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
1497	    {
1498	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1499	      return 0;
1500	    }
1501	  break;
1502
1503	case AARCH64_OPND_SVE_Zn_INDEX:
1504	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1505	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1506	    {
1507	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
1508					       0, 64 / size - 1);
1509	      return 0;
1510	    }
1511	  break;
1512
1513	case AARCH64_OPND_SVE_ZnxN:
1514	case AARCH64_OPND_SVE_ZtxN:
1515	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1516	    {
1517	      set_other_error (mismatch_detail, idx,
1518			       _("invalid register list"));
1519	      return 0;
1520	    }
1521	  break;
1522
1523	default:
1524	  break;
1525	}
1526      break;
1527
1528    case AARCH64_OPND_CLASS_PRED_REG:
1529      if (opnd->reg.regno >= 8
1530	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
1531	{
1532	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1533	  return 0;
1534	}
1535      break;
1536
1537    case AARCH64_OPND_CLASS_COND:
1538      if (type == AARCH64_OPND_COND1
1539	  && (opnds[idx].cond->value & 0xe) == 0xe)
1540	{
1541	  /* Not allow AL or NV.  */
1542	  set_syntax_error (mismatch_detail, idx, NULL);
1543	}
1544      break;
1545
1546    case AARCH64_OPND_CLASS_ADDRESS:
1547      /* Check writeback.  */
1548      switch (opcode->iclass)
1549	{
1550	case ldst_pos:
1551	case ldst_unscaled:
1552	case ldstnapair_offs:
1553	case ldstpair_off:
1554	case ldst_unpriv:
1555	  if (opnd->addr.writeback == 1)
1556	    {
1557	      set_syntax_error (mismatch_detail, idx,
1558				_("unexpected address writeback"));
1559	      return 0;
1560	    }
1561	  break;
1562	case ldst_imm10:
1563	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1564	    {
1565	      set_syntax_error (mismatch_detail, idx,
1566				_("unexpected address writeback"));
1567	      return 0;
1568	    }
1569	  break;
1570	case ldst_imm9:
1571	case ldstpair_indexed:
1572	case asisdlsep:
1573	case asisdlsop:
1574	  if (opnd->addr.writeback == 0)
1575	    {
1576	      set_syntax_error (mismatch_detail, idx,
1577				_("address writeback expected"));
1578	      return 0;
1579	    }
1580	  break;
1581	default:
1582	  assert (opnd->addr.writeback == 0);
1583	  break;
1584	}
1585      switch (type)
1586	{
1587	case AARCH64_OPND_ADDR_SIMM7:
1588	  /* Scaled signed 7 bits immediate offset.  */
1589	  /* Get the size of the data element that is accessed, which may be
1590	     different from that of the source register size,
1591	     e.g. in strb/ldrb.  */
1592	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1593	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1594	    {
1595	      set_offset_out_of_range_error (mismatch_detail, idx,
1596					     -64 * size, 63 * size);
1597	      return 0;
1598	    }
1599	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1600	    {
1601	      set_unaligned_error (mismatch_detail, idx, size);
1602	      return 0;
1603	    }
1604	  break;
1605	case AARCH64_OPND_ADDR_SIMM9:
1606	  /* Unscaled signed 9 bits immediate offset.  */
1607	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1608	    {
1609	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1610	      return 0;
1611	    }
1612	  break;
1613
1614	case AARCH64_OPND_ADDR_SIMM9_2:
1615	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1616	     or unaligned.  */
1617	  size = aarch64_get_qualifier_esize (qualifier);
1618	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1619	       && !value_aligned_p (opnd->addr.offset.imm, size))
1620	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1621	    return 1;
1622	  set_other_error (mismatch_detail, idx,
1623			   _("negative or unaligned offset expected"));
1624	  return 0;
1625
1626	case AARCH64_OPND_ADDR_SIMM10:
1627	  /* Scaled signed 10 bits immediate offset.  */
1628	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1629	    {
1630	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1631	      return 0;
1632	    }
1633	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
1634	    {
1635	      set_unaligned_error (mismatch_detail, idx, 8);
1636	      return 0;
1637	    }
1638	  break;
1639
1640	case AARCH64_OPND_SIMD_ADDR_POST:
1641	  /* AdvSIMD load/store multiple structures, post-index.  */
1642	  assert (idx == 1);
1643	  if (opnd->addr.offset.is_reg)
1644	    {
1645	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1646		return 1;
1647	      else
1648		{
1649		  set_other_error (mismatch_detail, idx,
1650				   _("invalid register offset"));
1651		  return 0;
1652		}
1653	    }
1654	  else
1655	    {
1656	      const aarch64_opnd_info *prev = &opnds[idx-1];
1657	      unsigned num_bytes; /* total number of bytes transferred.  */
1658	      /* The opcode dependent area stores the number of elements in
1659		 each structure to be loaded/stored.  */
1660	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1661	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1662		/* Special handling of loading single structure to all lane.  */
1663		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1664		  * aarch64_get_qualifier_esize (prev->qualifier);
1665	      else
1666		num_bytes = prev->reglist.num_regs
1667		  * aarch64_get_qualifier_esize (prev->qualifier)
1668		  * aarch64_get_qualifier_nelem (prev->qualifier);
1669	      if ((int) num_bytes != opnd->addr.offset.imm)
1670		{
1671		  set_other_error (mismatch_detail, idx,
1672				   _("invalid post-increment amount"));
1673		  return 0;
1674		}
1675	    }
1676	  break;
1677
1678	case AARCH64_OPND_ADDR_REGOFF:
1679	  /* Get the size of the data element that is accessed, which may be
1680	     different from that of the source register size,
1681	     e.g. in strb/ldrb.  */
1682	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1683	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1684	  if (opnd->shifter.amount != 0
1685	      && opnd->shifter.amount != (int)get_logsz (size))
1686	    {
1687	      set_other_error (mismatch_detail, idx,
1688			       _("invalid shift amount"));
1689	      return 0;
1690	    }
1691	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1692	     operators.  */
1693	  switch (opnd->shifter.kind)
1694	    {
1695	    case AARCH64_MOD_UXTW:
1696	    case AARCH64_MOD_LSL:
1697	    case AARCH64_MOD_SXTW:
1698	    case AARCH64_MOD_SXTX: break;
1699	    default:
1700	      set_other_error (mismatch_detail, idx,
1701			       _("invalid extend/shift operator"));
1702	      return 0;
1703	    }
1704	  break;
1705
1706	case AARCH64_OPND_ADDR_UIMM12:
1707	  imm = opnd->addr.offset.imm;
1708	  /* Get the size of the data element that is accessed, which may be
1709	     different from that of the source register size,
1710	     e.g. in strb/ldrb.  */
1711	  size = aarch64_get_qualifier_esize (qualifier);
1712	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1713	    {
1714	      set_offset_out_of_range_error (mismatch_detail, idx,
1715					     0, 4095 * size);
1716	      return 0;
1717	    }
1718	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1719	    {
1720	      set_unaligned_error (mismatch_detail, idx, size);
1721	      return 0;
1722	    }
1723	  break;
1724
1725	case AARCH64_OPND_ADDR_PCREL14:
1726	case AARCH64_OPND_ADDR_PCREL19:
1727	case AARCH64_OPND_ADDR_PCREL21:
1728	case AARCH64_OPND_ADDR_PCREL26:
1729	  imm = opnd->imm.value;
1730	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1731	    {
1732	      /* The offset value in a PC-relative branch instruction is alway
1733		 4-byte aligned and is encoded without the lowest 2 bits.  */
1734	      if (!value_aligned_p (imm, 4))
1735		{
1736		  set_unaligned_error (mismatch_detail, idx, 4);
1737		  return 0;
1738		}
1739	      /* Right shift by 2 so that we can carry out the following check
1740		 canonically.  */
1741	      imm >>= 2;
1742	    }
1743	  size = get_operand_fields_width (get_operand_from_code (type));
1744	  if (!value_fit_signed_field_p (imm, size))
1745	    {
1746	      set_other_error (mismatch_detail, idx,
1747			       _("immediate out of range"));
1748	      return 0;
1749	    }
1750	  break;
1751
1752	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1753	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1754	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1755	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1756	  min_value = -8;
1757	  max_value = 7;
1758	sve_imm_offset_vl:
1759	  assert (!opnd->addr.offset.is_reg);
1760	  assert (opnd->addr.preind);
1761	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1762	  min_value *= num;
1763	  max_value *= num;
1764	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1765	      || (opnd->shifter.operator_present
1766		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1767	    {
1768	      set_other_error (mismatch_detail, idx,
1769			       _("invalid addressing mode"));
1770	      return 0;
1771	    }
1772	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1773	    {
1774	      set_offset_out_of_range_error (mismatch_detail, idx,
1775					     min_value, max_value);
1776	      return 0;
1777	    }
1778	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1779	    {
1780	      set_unaligned_error (mismatch_detail, idx, num);
1781	      return 0;
1782	    }
1783	  break;
1784
1785	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1786	  min_value = -32;
1787	  max_value = 31;
1788	  goto sve_imm_offset_vl;
1789
1790	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1791	  min_value = -256;
1792	  max_value = 255;
1793	  goto sve_imm_offset_vl;
1794
1795	case AARCH64_OPND_SVE_ADDR_RI_U6:
1796	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1797	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1798	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1799	  min_value = 0;
1800	  max_value = 63;
1801	sve_imm_offset:
1802	  assert (!opnd->addr.offset.is_reg);
1803	  assert (opnd->addr.preind);
1804	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1805	  min_value *= num;
1806	  max_value *= num;
1807	  if (opnd->shifter.operator_present
1808	      || opnd->shifter.amount_present)
1809	    {
1810	      set_other_error (mismatch_detail, idx,
1811			       _("invalid addressing mode"));
1812	      return 0;
1813	    }
1814	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1815	    {
1816	      set_offset_out_of_range_error (mismatch_detail, idx,
1817					     min_value, max_value);
1818	      return 0;
1819	    }
1820	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1821	    {
1822	      set_unaligned_error (mismatch_detail, idx, num);
1823	      return 0;
1824	    }
1825	  break;
1826
1827	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1828	  min_value = -8;
1829	  max_value = 7;
1830	  goto sve_imm_offset;
1831
1832	case AARCH64_OPND_SVE_ADDR_RR:
1833	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1834	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1835	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1836	case AARCH64_OPND_SVE_ADDR_RX:
1837	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1838	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1839	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1840	case AARCH64_OPND_SVE_ADDR_RZ:
1841	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1842	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1843	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1844	  modifiers = 1 << AARCH64_MOD_LSL;
1845	sve_rr_operand:
1846	  assert (opnd->addr.offset.is_reg);
1847	  assert (opnd->addr.preind);
1848	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1849	      && opnd->addr.offset.regno == 31)
1850	    {
1851	      set_other_error (mismatch_detail, idx,
1852			       _("index register xzr is not allowed"));
1853	      return 0;
1854	    }
1855	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1856	      || (opnd->shifter.amount
1857		  != get_operand_specific_data (&aarch64_operands[type])))
1858	    {
1859	      set_other_error (mismatch_detail, idx,
1860			       _("invalid addressing mode"));
1861	      return 0;
1862	    }
1863	  break;
1864
1865	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1866	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1867	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1868	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1869	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1870	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1871	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1872	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1873	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1874	  goto sve_rr_operand;
1875
1876	case AARCH64_OPND_SVE_ADDR_ZI_U5:
1877	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1878	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1879	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1880	  min_value = 0;
1881	  max_value = 31;
1882	  goto sve_imm_offset;
1883
1884	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1885	  modifiers = 1 << AARCH64_MOD_LSL;
1886	sve_zz_operand:
1887	  assert (opnd->addr.offset.is_reg);
1888	  assert (opnd->addr.preind);
1889	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1890	      || opnd->shifter.amount < 0
1891	      || opnd->shifter.amount > 3)
1892	    {
1893	      set_other_error (mismatch_detail, idx,
1894			       _("invalid addressing mode"));
1895	      return 0;
1896	    }
1897	  break;
1898
1899	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1900	  modifiers = (1 << AARCH64_MOD_SXTW);
1901	  goto sve_zz_operand;
1902
1903	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1904	  modifiers = 1 << AARCH64_MOD_UXTW;
1905	  goto sve_zz_operand;
1906
1907	default:
1908	  break;
1909	}
1910      break;
1911
1912    case AARCH64_OPND_CLASS_SIMD_REGLIST:
1913      if (type == AARCH64_OPND_LEt)
1914	{
1915	  /* Get the upper bound for the element index.  */
1916	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
1917	  if (!value_in_range_p (opnd->reglist.index, 0, num))
1918	    {
1919	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
1920	      return 0;
1921	    }
1922	}
1923      /* The opcode dependent area stores the number of elements in
1924	 each structure to be loaded/stored.  */
1925      num = get_opcode_dependent_value (opcode);
1926      switch (type)
1927	{
1928	case AARCH64_OPND_LVt:
1929	  assert (num >= 1 && num <= 4);
1930	  /* Unless LD1/ST1, the number of registers should be equal to that
1931	     of the structure elements.  */
1932	  if (num != 1 && opnd->reglist.num_regs != num)
1933	    {
1934	      set_reg_list_error (mismatch_detail, idx, num);
1935	      return 0;
1936	    }
1937	  break;
1938	case AARCH64_OPND_LVt_AL:
1939	case AARCH64_OPND_LEt:
1940	  assert (num >= 1 && num <= 4);
1941	  /* The number of registers should be equal to that of the structure
1942	     elements.  */
1943	  if (opnd->reglist.num_regs != num)
1944	    {
1945	      set_reg_list_error (mismatch_detail, idx, num);
1946	      return 0;
1947	    }
1948	  break;
1949	default:
1950	  break;
1951	}
1952      break;
1953
1954    case AARCH64_OPND_CLASS_IMMEDIATE:
1955      /* Constraint check on immediate operand.  */
1956      imm = opnd->imm.value;
1957      /* E.g. imm_0_31 constrains value to be 0..31.  */
1958      if (qualifier_value_in_range_constraint_p (qualifier)
1959	  && !value_in_range_p (imm, get_lower_bound (qualifier),
1960				get_upper_bound (qualifier)))
1961	{
1962	  set_imm_out_of_range_error (mismatch_detail, idx,
1963				      get_lower_bound (qualifier),
1964				      get_upper_bound (qualifier));
1965	  return 0;
1966	}
1967
1968      switch (type)
1969	{
1970	case AARCH64_OPND_AIMM:
1971	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
1972	    {
1973	      set_other_error (mismatch_detail, idx,
1974			       _("invalid shift operator"));
1975	      return 0;
1976	    }
1977	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
1978	    {
1979	      set_other_error (mismatch_detail, idx,
1980			       _("shift amount must be 0 or 12"));
1981	      return 0;
1982	    }
1983	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
1984	    {
1985	      set_other_error (mismatch_detail, idx,
1986			       _("immediate out of range"));
1987	      return 0;
1988	    }
1989	  break;
1990
1991	case AARCH64_OPND_HALF:
1992	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
1993	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
1994	    {
1995	      set_other_error (mismatch_detail, idx,
1996			       _("invalid shift operator"));
1997	      return 0;
1998	    }
1999	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2000	  if (!value_aligned_p (opnd->shifter.amount, 16))
2001	    {
2002	      set_other_error (mismatch_detail, idx,
2003			       _("shift amount must be a multiple of 16"));
2004	      return 0;
2005	    }
2006	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2007	    {
2008	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
2009						 0, size * 8 - 16);
2010	      return 0;
2011	    }
2012	  if (opnd->imm.value < 0)
2013	    {
2014	      set_other_error (mismatch_detail, idx,
2015			       _("negative immediate value not allowed"));
2016	      return 0;
2017	    }
2018	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2019	    {
2020	      set_other_error (mismatch_detail, idx,
2021			       _("immediate out of range"));
2022	      return 0;
2023	    }
2024	  break;
2025
2026	case AARCH64_OPND_IMM_MOV:
2027	    {
2028	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2029	      imm = opnd->imm.value;
2030	      assert (idx == 1);
2031	      switch (opcode->op)
2032		{
2033		case OP_MOV_IMM_WIDEN:
2034		  imm = ~imm;
2035		  /* Fall through.  */
2036		case OP_MOV_IMM_WIDE:
2037		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2038		    {
2039		      set_other_error (mismatch_detail, idx,
2040				       _("immediate out of range"));
2041		      return 0;
2042		    }
2043		  break;
2044		case OP_MOV_IMM_LOG:
2045		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
2046		    {
2047		      set_other_error (mismatch_detail, idx,
2048				       _("immediate out of range"));
2049		      return 0;
2050		    }
2051		  break;
2052		default:
2053		  assert (0);
2054		  return 0;
2055		}
2056	    }
2057	  break;
2058
2059	case AARCH64_OPND_NZCV:
2060	case AARCH64_OPND_CCMP_IMM:
2061	case AARCH64_OPND_EXCEPTION:
2062	case AARCH64_OPND_UIMM4:
2063	case AARCH64_OPND_UIMM7:
2064	case AARCH64_OPND_UIMM3_OP1:
2065	case AARCH64_OPND_UIMM3_OP2:
2066	case AARCH64_OPND_SVE_UIMM3:
2067	case AARCH64_OPND_SVE_UIMM7:
2068	case AARCH64_OPND_SVE_UIMM8:
2069	case AARCH64_OPND_SVE_UIMM8_53:
2070	  size = get_operand_fields_width (get_operand_from_code (type));
2071	  assert (size < 32);
2072	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2073	    {
2074	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2075					  (1 << size) - 1);
2076	      return 0;
2077	    }
2078	  break;
2079
2080	case AARCH64_OPND_SIMM5:
2081	case AARCH64_OPND_SVE_SIMM5:
2082	case AARCH64_OPND_SVE_SIMM5B:
2083	case AARCH64_OPND_SVE_SIMM6:
2084	case AARCH64_OPND_SVE_SIMM8:
2085	  size = get_operand_fields_width (get_operand_from_code (type));
2086	  assert (size < 32);
2087	  if (!value_fit_signed_field_p (opnd->imm.value, size))
2088	    {
2089	      set_imm_out_of_range_error (mismatch_detail, idx,
2090					  -(1 << (size - 1)),
2091					  (1 << (size - 1)) - 1);
2092	      return 0;
2093	    }
2094	  break;
2095
2096	case AARCH64_OPND_WIDTH:
2097	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2098		  && opnds[0].type == AARCH64_OPND_Rd);
2099	  size = get_upper_bound (qualifier);
2100	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
2101	    /* lsb+width <= reg.size  */
2102	    {
2103	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
2104					  size - opnds[idx-1].imm.value);
2105	      return 0;
2106	    }
2107	  break;
2108
2109	case AARCH64_OPND_LIMM:
2110	case AARCH64_OPND_SVE_LIMM:
2111	  {
2112	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2113	    uint64_t uimm = opnd->imm.value;
2114	    if (opcode->op == OP_BIC)
2115	      uimm = ~uimm;
2116	    if (aarch64_logical_immediate_p (uimm, esize, NULL) == FALSE)
2117	      {
2118		set_other_error (mismatch_detail, idx,
2119				 _("immediate out of range"));
2120		return 0;
2121	      }
2122	  }
2123	  break;
2124
2125	case AARCH64_OPND_IMM0:
2126	case AARCH64_OPND_FPIMM0:
2127	  if (opnd->imm.value != 0)
2128	    {
2129	      set_other_error (mismatch_detail, idx,
2130			       _("immediate zero expected"));
2131	      return 0;
2132	    }
2133	  break;
2134
2135	case AARCH64_OPND_IMM_ROT1:
2136	case AARCH64_OPND_IMM_ROT2:
2137	case AARCH64_OPND_SVE_IMM_ROT2:
2138	  if (opnd->imm.value != 0
2139	      && opnd->imm.value != 90
2140	      && opnd->imm.value != 180
2141	      && opnd->imm.value != 270)
2142	    {
2143	      set_other_error (mismatch_detail, idx,
2144			       _("rotate expected to be 0, 90, 180 or 270"));
2145	      return 0;
2146	    }
2147	  break;
2148
2149	case AARCH64_OPND_IMM_ROT3:
2150	case AARCH64_OPND_SVE_IMM_ROT1:
2151	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
2152	    {
2153	      set_other_error (mismatch_detail, idx,
2154			       _("rotate expected to be 90 or 270"));
2155	      return 0;
2156	    }
2157	  break;
2158
2159	case AARCH64_OPND_SHLL_IMM:
2160	  assert (idx == 2);
2161	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2162	  if (opnd->imm.value != size)
2163	    {
2164	      set_other_error (mismatch_detail, idx,
2165			       _("invalid shift amount"));
2166	      return 0;
2167	    }
2168	  break;
2169
2170	case AARCH64_OPND_IMM_VLSL:
2171	  size = aarch64_get_qualifier_esize (qualifier);
2172	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2173	    {
2174	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2175					  size * 8 - 1);
2176	      return 0;
2177	    }
2178	  break;
2179
2180	case AARCH64_OPND_IMM_VLSR:
2181	  size = aarch64_get_qualifier_esize (qualifier);
2182	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2183	    {
2184	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2185	      return 0;
2186	    }
2187	  break;
2188
2189	case AARCH64_OPND_SIMD_IMM:
2190	case AARCH64_OPND_SIMD_IMM_SFT:
2191	  /* Qualifier check.  */
2192	  switch (qualifier)
2193	    {
2194	    case AARCH64_OPND_QLF_LSL:
2195	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
2196		{
2197		  set_other_error (mismatch_detail, idx,
2198				   _("invalid shift operator"));
2199		  return 0;
2200		}
2201	      break;
2202	    case AARCH64_OPND_QLF_MSL:
2203	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
2204		{
2205		  set_other_error (mismatch_detail, idx,
2206				   _("invalid shift operator"));
2207		  return 0;
2208		}
2209	      break;
2210	    case AARCH64_OPND_QLF_NIL:
2211	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2212		{
2213		  set_other_error (mismatch_detail, idx,
2214				   _("shift is not permitted"));
2215		  return 0;
2216		}
2217	      break;
2218	    default:
2219	      assert (0);
2220	      return 0;
2221	    }
2222	  /* Is the immediate valid?  */
2223	  assert (idx == 1);
2224	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2225	    {
2226	      /* uimm8 or simm8 */
2227	      if (!value_in_range_p (opnd->imm.value, -128, 255))
2228		{
2229		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2230		  return 0;
2231		}
2232	    }
2233	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2234	    {
2235	      /* uimm64 is not
2236		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2237		 ffffffffgggggggghhhhhhhh'.  */
2238	      set_other_error (mismatch_detail, idx,
2239			       _("invalid value for immediate"));
2240	      return 0;
2241	    }
2242	  /* Is the shift amount valid?  */
2243	  switch (opnd->shifter.kind)
2244	    {
2245	    case AARCH64_MOD_LSL:
2246	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2247	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2248		{
2249		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2250						     (size - 1) * 8);
2251		  return 0;
2252		}
2253	      if (!value_aligned_p (opnd->shifter.amount, 8))
2254		{
2255		  set_unaligned_error (mismatch_detail, idx, 8);
2256		  return 0;
2257		}
2258	      break;
2259	    case AARCH64_MOD_MSL:
2260	      /* Only 8 and 16 are valid shift amount.  */
2261	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2262		{
2263		  set_other_error (mismatch_detail, idx,
2264				   _("shift amount must be 0 or 16"));
2265		  return 0;
2266		}
2267	      break;
2268	    default:
2269	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2270		{
2271		  set_other_error (mismatch_detail, idx,
2272				   _("invalid shift operator"));
2273		  return 0;
2274		}
2275	      break;
2276	    }
2277	  break;
2278
2279	case AARCH64_OPND_FPIMM:
2280	case AARCH64_OPND_SIMD_FPIMM:
2281	case AARCH64_OPND_SVE_FPIMM8:
2282	  if (opnd->imm.is_fp == 0)
2283	    {
2284	      set_other_error (mismatch_detail, idx,
2285			       _("floating-point immediate expected"));
2286	      return 0;
2287	    }
2288	  /* The value is expected to be an 8-bit floating-point constant with
2289	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
2290	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2291	     instruction).  */
2292	  if (!value_in_range_p (opnd->imm.value, 0, 255))
2293	    {
2294	      set_other_error (mismatch_detail, idx,
2295			       _("immediate out of range"));
2296	      return 0;
2297	    }
2298	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
2299	    {
2300	      set_other_error (mismatch_detail, idx,
2301			       _("invalid shift operator"));
2302	      return 0;
2303	    }
2304	  break;
2305
2306	case AARCH64_OPND_SVE_AIMM:
2307	  min_value = 0;
2308	sve_aimm:
2309	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2310	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2311	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2312	  uvalue = opnd->imm.value;
2313	  shift = opnd->shifter.amount;
2314	  if (size == 1)
2315	    {
2316	      if (shift != 0)
2317		{
2318		  set_other_error (mismatch_detail, idx,
2319				   _("no shift amount allowed for"
2320				     " 8-bit constants"));
2321		  return 0;
2322		}
2323	    }
2324	  else
2325	    {
2326	      if (shift != 0 && shift != 8)
2327		{
2328		  set_other_error (mismatch_detail, idx,
2329				   _("shift amount must be 0 or 8"));
2330		  return 0;
2331		}
2332	      if (shift == 0 && (uvalue & 0xff) == 0)
2333		{
2334		  shift = 8;
2335		  uvalue = (int64_t) uvalue / 256;
2336		}
2337	    }
2338	  mask >>= shift;
2339	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2340	    {
2341	      set_other_error (mismatch_detail, idx,
2342			       _("immediate too big for element size"));
2343	      return 0;
2344	    }
2345	  uvalue = (uvalue - min_value) & mask;
2346	  if (uvalue > 0xff)
2347	    {
2348	      set_other_error (mismatch_detail, idx,
2349			       _("invalid arithmetic immediate"));
2350	      return 0;
2351	    }
2352	  break;
2353
2354	case AARCH64_OPND_SVE_ASIMM:
2355	  min_value = -128;
2356	  goto sve_aimm;
2357
2358	case AARCH64_OPND_SVE_I1_HALF_ONE:
2359	  assert (opnd->imm.is_fp);
2360	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2361	    {
2362	      set_other_error (mismatch_detail, idx,
2363			       _("floating-point value must be 0.5 or 1.0"));
2364	      return 0;
2365	    }
2366	  break;
2367
2368	case AARCH64_OPND_SVE_I1_HALF_TWO:
2369	  assert (opnd->imm.is_fp);
2370	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2371	    {
2372	      set_other_error (mismatch_detail, idx,
2373			       _("floating-point value must be 0.5 or 2.0"));
2374	      return 0;
2375	    }
2376	  break;
2377
2378	case AARCH64_OPND_SVE_I1_ZERO_ONE:
2379	  assert (opnd->imm.is_fp);
2380	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2381	    {
2382	      set_other_error (mismatch_detail, idx,
2383			       _("floating-point value must be 0.0 or 1.0"));
2384	      return 0;
2385	    }
2386	  break;
2387
2388	case AARCH64_OPND_SVE_INV_LIMM:
2389	  {
2390	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2391	    uint64_t uimm = ~opnd->imm.value;
2392	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2393	      {
2394		set_other_error (mismatch_detail, idx,
2395				 _("immediate out of range"));
2396		return 0;
2397	      }
2398	  }
2399	  break;
2400
2401	case AARCH64_OPND_SVE_LIMM_MOV:
2402	  {
2403	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2404	    uint64_t uimm = opnd->imm.value;
2405	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2406	      {
2407		set_other_error (mismatch_detail, idx,
2408				 _("immediate out of range"));
2409		return 0;
2410	      }
2411	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2412	      {
2413		set_other_error (mismatch_detail, idx,
2414				 _("invalid replicated MOV immediate"));
2415		return 0;
2416	      }
2417	  }
2418	  break;
2419
2420	case AARCH64_OPND_SVE_PATTERN_SCALED:
2421	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2422	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2423	    {
2424	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2425	      return 0;
2426	    }
2427	  break;
2428
2429	case AARCH64_OPND_SVE_SHLIMM_PRED:
2430	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2431	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2432	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2433	    {
2434	      set_imm_out_of_range_error (mismatch_detail, idx,
2435					  0, 8 * size - 1);
2436	      return 0;
2437	    }
2438	  break;
2439
2440	case AARCH64_OPND_SVE_SHRIMM_PRED:
2441	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2442	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2443	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2444	    {
2445	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8 * size);
2446	      return 0;
2447	    }
2448	  break;
2449
2450	default:
2451	  break;
2452	}
2453      break;
2454
2455    case AARCH64_OPND_CLASS_SYSTEM:
2456      switch (type)
2457	{
2458	case AARCH64_OPND_PSTATEFIELD:
2459	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2460	  /* MSR UAO, #uimm4
2461	     MSR PAN, #uimm4
2462	     The immediate must be #0 or #1.  */
2463	  if ((opnd->pstatefield == 0x03	/* UAO.  */
2464	       || opnd->pstatefield == 0x04)	/* PAN.  */
2465	      && opnds[1].imm.value > 1)
2466	    {
2467	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2468	      return 0;
2469	    }
2470	  /* MSR SPSel, #uimm4
2471	     Uses uimm4 as a control value to select the stack pointer: if
2472	     bit 0 is set it selects the current exception level's stack
2473	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2474	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
2475	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2476	    {
2477	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2478	      return 0;
2479	    }
2480	  break;
2481	default:
2482	  break;
2483	}
2484      break;
2485
2486    case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2487      /* Get the upper bound for the element index.  */
2488      if (opcode->op == OP_FCMLA_ELEM)
2489	/* FCMLA index range depends on the vector size of other operands
2490	   and is halfed because complex numbers take two elements.  */
2491	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2492	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2493      else
2494	num = 16;
2495      num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2496
2497      /* Index out-of-range.  */
2498      if (!value_in_range_p (opnd->reglane.index, 0, num))
2499	{
2500	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2501	  return 0;
2502	}
2503      /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2504	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
2505	 number is encoded in "size:M:Rm":
2506	 size	<Vm>
2507	 00		RESERVED
2508	 01		0:Rm
2509	 10		M:Rm
2510	 11		RESERVED  */
2511      if (type == AARCH64_OPND_Em && qualifier == AARCH64_OPND_QLF_S_H
2512	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
2513	{
2514	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2515	  return 0;
2516	}
2517      break;
2518
2519    case AARCH64_OPND_CLASS_MODIFIED_REG:
2520      assert (idx == 1 || idx == 2);
2521      switch (type)
2522	{
2523	case AARCH64_OPND_Rm_EXT:
2524	  if (aarch64_extend_operator_p (opnd->shifter.kind) == FALSE
2525	      && opnd->shifter.kind != AARCH64_MOD_LSL)
2526	    {
2527	      set_other_error (mismatch_detail, idx,
2528			       _("extend operator expected"));
2529	      return 0;
2530	    }
2531	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2532	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
2533	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
2534	     case.  */
2535	  if (!aarch64_stack_pointer_p (opnds + 0)
2536	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2537	    {
2538	      if (!opnd->shifter.operator_present)
2539		{
2540		  set_other_error (mismatch_detail, idx,
2541				   _("missing extend operator"));
2542		  return 0;
2543		}
2544	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2545		{
2546		  set_other_error (mismatch_detail, idx,
2547				   _("'LSL' operator not allowed"));
2548		  return 0;
2549		}
2550	    }
2551	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
2552		  || opnd->shifter.kind == AARCH64_MOD_LSL);
2553	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2554	    {
2555	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2556	      return 0;
2557	    }
2558	  /* In the 64-bit form, the final register operand is written as Wm
2559	     for all but the (possibly omitted) UXTX/LSL and SXTX
2560	     operators.
2561	     N.B. GAS allows X register to be used with any operator as a
2562	     programming convenience.  */
2563	  if (qualifier == AARCH64_OPND_QLF_X
2564	      && opnd->shifter.kind != AARCH64_MOD_LSL
2565	      && opnd->shifter.kind != AARCH64_MOD_UXTX
2566	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
2567	    {
2568	      set_other_error (mismatch_detail, idx, _("W register expected"));
2569	      return 0;
2570	    }
2571	  break;
2572
2573	case AARCH64_OPND_Rm_SFT:
2574	  /* ROR is not available to the shifted register operand in
2575	     arithmetic instructions.  */
2576	  if (aarch64_shift_operator_p (opnd->shifter.kind) == FALSE)
2577	    {
2578	      set_other_error (mismatch_detail, idx,
2579			       _("shift operator expected"));
2580	      return 0;
2581	    }
2582	  if (opnd->shifter.kind == AARCH64_MOD_ROR
2583	      && opcode->iclass != log_shift)
2584	    {
2585	      set_other_error (mismatch_detail, idx,
2586			       _("'ROR' operator not allowed"));
2587	      return 0;
2588	    }
2589	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2590	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
2591	    {
2592	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2593	      return 0;
2594	    }
2595	  break;
2596
2597	default:
2598	  break;
2599	}
2600      break;
2601
2602    default:
2603      break;
2604    }
2605
2606  return 1;
2607}
2608
2609/* Main entrypoint for the operand constraint checking.
2610
2611   Return 1 if operands of *INST meet the constraint applied by the operand
2612   codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2613   not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2614   adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2615   with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2616   error kind when it is notified that an instruction does not pass the check).
2617
2618   Un-determined operand qualifiers may get established during the process.  */
2619
2620int
2621aarch64_match_operands_constraint (aarch64_inst *inst,
2622				   aarch64_operand_error *mismatch_detail)
2623{
2624  int i;
2625
2626  DEBUG_TRACE ("enter");
2627
2628  /* Check for cases where a source register needs to be the same as the
2629     destination register.  Do this before matching qualifiers since if
2630     an instruction has both invalid tying and invalid qualifiers,
2631     the error about qualifiers would suggest several alternative
2632     instructions that also have invalid tying.  */
2633  i = inst->opcode->tied_operand;
2634  if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2635    {
2636      if (mismatch_detail)
2637	{
2638	  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2639	  mismatch_detail->index = i;
2640	  mismatch_detail->error = NULL;
2641	}
2642      return 0;
2643    }
2644
2645  /* Match operands' qualifier.
2646     *INST has already had qualifier establish for some, if not all, of
2647     its operands; we need to find out whether these established
2648     qualifiers match one of the qualifier sequence in
2649     INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2650     with the corresponding qualifier in such a sequence.
2651     Only basic operand constraint checking is done here; the more thorough
2652     constraint checking will carried out by operand_general_constraint_met_p,
2653     which has be to called after this in order to get all of the operands'
2654     qualifiers established.  */
2655  if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2656    {
2657      DEBUG_TRACE ("FAIL on operand qualifier matching");
2658      if (mismatch_detail)
2659	{
2660	  /* Return an error type to indicate that it is the qualifier
2661	     matching failure; we don't care about which operand as there
2662	     are enough information in the opcode table to reproduce it.  */
2663	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2664	  mismatch_detail->index = -1;
2665	  mismatch_detail->error = NULL;
2666	}
2667      return 0;
2668    }
2669
2670  /* Match operands' constraint.  */
2671  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2672    {
2673      enum aarch64_opnd type = inst->opcode->operands[i];
2674      if (type == AARCH64_OPND_NIL)
2675	break;
2676      if (inst->operands[i].skip)
2677	{
2678	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2679	  continue;
2680	}
2681      if (operand_general_constraint_met_p (inst->operands, i, type,
2682					    inst->opcode, mismatch_detail) == 0)
2683	{
2684	  DEBUG_TRACE ("FAIL on operand %d", i);
2685	  return 0;
2686	}
2687    }
2688
2689  DEBUG_TRACE ("PASS");
2690
2691  return 1;
2692}
2693
2694/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2695   Also updates the TYPE of each INST->OPERANDS with the corresponding
2696   value of OPCODE->OPERANDS.
2697
2698   Note that some operand qualifiers may need to be manually cleared by
2699   the caller before it further calls the aarch64_opcode_encode; by
2700   doing this, it helps the qualifier matching facilities work
2701   properly.  */
2702
2703const aarch64_opcode*
2704aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2705{
2706  int i;
2707  const aarch64_opcode *old = inst->opcode;
2708
2709  inst->opcode = opcode;
2710
2711  /* Update the operand types.  */
2712  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2713    {
2714      inst->operands[i].type = opcode->operands[i];
2715      if (opcode->operands[i] == AARCH64_OPND_NIL)
2716	break;
2717    }
2718
2719  DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2720
2721  return old;
2722}
2723
2724int
2725aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2726{
2727  int i;
2728  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2729    if (operands[i] == operand)
2730      return i;
2731    else if (operands[i] == AARCH64_OPND_NIL)
2732      break;
2733  return -1;
2734}
2735
2736/* R0...R30, followed by FOR31.  */
2737#define BANK(R, FOR31) \
2738  { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
2739    R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2740    R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2741    R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
2742/* [0][0]  32-bit integer regs with sp   Wn
2743   [0][1]  64-bit integer regs with sp   Xn  sf=1
2744   [1][0]  32-bit integer regs with #0   Wn
2745   [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2746static const char *int_reg[2][2][32] = {
2747#define R32(X) "w" #X
2748#define R64(X) "x" #X
2749  { BANK (R32, "wsp"), BANK (R64, "sp") },
2750  { BANK (R32, "wzr"), BANK (R64, "xzr") }
2751#undef R64
2752#undef R32
2753};
2754
2755/* Names of the SVE vector registers, first with .S suffixes,
2756   then with .D suffixes.  */
2757
2758static const char *sve_reg[2][32] = {
2759#define ZS(X) "z" #X ".s"
2760#define ZD(X) "z" #X ".d"
2761  BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2762#undef ZD
2763#undef ZS
2764};
2765#undef BANK
2766
2767/* Return the integer register name.
2768   if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2769
2770static inline const char *
2771get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2772{
2773  const int has_zr = sp_reg_p ? 0 : 1;
2774  const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2775  return int_reg[has_zr][is_64][regno];
2776}
2777
2778/* Like get_int_reg_name, but IS_64 is always 1.  */
2779
2780static inline const char *
2781get_64bit_int_reg_name (int regno, int sp_reg_p)
2782{
2783  const int has_zr = sp_reg_p ? 0 : 1;
2784  return int_reg[has_zr][1][regno];
2785}
2786
2787/* Get the name of the integer offset register in OPND, using the shift type
2788   to decide whether it's a word or doubleword.  */
2789
2790static inline const char *
2791get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2792{
2793  switch (opnd->shifter.kind)
2794    {
2795    case AARCH64_MOD_UXTW:
2796    case AARCH64_MOD_SXTW:
2797      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2798
2799    case AARCH64_MOD_LSL:
2800    case AARCH64_MOD_SXTX:
2801      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2802
2803    default:
2804      abort ();
2805    }
2806}
2807
2808/* Get the name of the SVE vector offset register in OPND, using the operand
2809   qualifier to decide whether the suffix should be .S or .D.  */
2810
2811static inline const char *
2812get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2813{
2814  assert (qualifier == AARCH64_OPND_QLF_S_S
2815	  || qualifier == AARCH64_OPND_QLF_S_D);
2816  return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2817}
2818
2819/* Types for expanding an encoded 8-bit value to a floating-point value.  */
2820
2821typedef union
2822{
2823  uint64_t i;
2824  double   d;
2825} double_conv_t;
2826
2827typedef union
2828{
2829  uint32_t i;
2830  float    f;
2831} single_conv_t;
2832
2833typedef union
2834{
2835  uint32_t i;
2836  float    f;
2837} half_conv_t;
2838
2839/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2840   normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2841   (depending on the type of the instruction).  IMM8 will be expanded to a
2842   single-precision floating-point value (SIZE == 4) or a double-precision
2843   floating-point value (SIZE == 8).  A half-precision floating-point value
2844   (SIZE == 2) is expanded to a single-precision floating-point value.  The
2845   expanded value is returned.  */
2846
2847static uint64_t
2848expand_fp_imm (int size, uint32_t imm8)
2849{
2850  uint64_t imm;
2851  uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2852
2853  imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
2854  imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
2855  imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
2856  imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2857    | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
2858  if (size == 8)
2859    {
2860      imm = (imm8_7 << (63-32))		/* imm8<7>  */
2861	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
2862	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2863	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2864	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
2865      imm <<= 32;
2866    }
2867  else if (size == 4 || size == 2)
2868    {
2869      imm = (imm8_7 << 31)	/* imm8<7>              */
2870	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
2871	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
2872	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
2873    }
2874  else
2875    {
2876      /* An unsupported size.  */
2877      assert (0);
2878    }
2879
2880  return imm;
2881}
2882
2883/* Produce the string representation of the register list operand *OPND
2884   in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
2885   the register name that comes before the register number, such as "v".  */
2886static void
2887print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2888		     const char *prefix)
2889{
2890  const int num_regs = opnd->reglist.num_regs;
2891  const int first_reg = opnd->reglist.first_regno;
2892  const int last_reg = (first_reg + num_regs - 1) & 0x1f;
2893  const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
2894  char tb[8];	/* Temporary buffer.  */
2895
2896  assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
2897  assert (num_regs >= 1 && num_regs <= 4);
2898
2899  /* Prepare the index if any.  */
2900  if (opnd->reglist.has_index)
2901    snprintf (tb, 8, "[%" PRIi64 "]", opnd->reglist.index);
2902  else
2903    tb[0] = '\0';
2904
2905  /* The hyphenated form is preferred for disassembly if there are
2906     more than two registers in the list, and the register numbers
2907     are monotonically increasing in increments of one.  */
2908  if (num_regs > 2 && last_reg > first_reg)
2909    snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
2910	      prefix, last_reg, qlf_name, tb);
2911  else
2912    {
2913      const int reg0 = first_reg;
2914      const int reg1 = (first_reg + 1) & 0x1f;
2915      const int reg2 = (first_reg + 2) & 0x1f;
2916      const int reg3 = (first_reg + 3) & 0x1f;
2917
2918      switch (num_regs)
2919	{
2920	case 1:
2921	  snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
2922	  break;
2923	case 2:
2924	  snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
2925		    prefix, reg1, qlf_name, tb);
2926	  break;
2927	case 3:
2928	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
2929		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2930		    prefix, reg2, qlf_name, tb);
2931	  break;
2932	case 4:
2933	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
2934		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
2935		    prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
2936	  break;
2937	}
2938    }
2939}
2940
2941/* Print the register+immediate address in OPND to BUF, which has SIZE
2942   characters.  BASE is the name of the base register.  */
2943
2944static void
2945print_immediate_offset_address (char *buf, size_t size,
2946				const aarch64_opnd_info *opnd,
2947				const char *base)
2948{
2949  if (opnd->addr.writeback)
2950    {
2951      if (opnd->addr.preind)
2952	snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
2953      else
2954	snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
2955    }
2956  else
2957    {
2958      if (opnd->shifter.operator_present)
2959	{
2960	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
2961	  snprintf (buf, size, "[%s, #%d, mul vl]",
2962		    base, opnd->addr.offset.imm);
2963	}
2964      else if (opnd->addr.offset.imm)
2965	snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
2966      else
2967	snprintf (buf, size, "[%s]", base);
2968    }
2969}
2970
2971/* Produce the string representation of the register offset address operand
2972   *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
2973   the names of the base and offset registers.  */
2974static void
2975print_register_offset_address (char *buf, size_t size,
2976			       const aarch64_opnd_info *opnd,
2977			       const char *base, const char *offset)
2978{
2979  char tb[16];			/* Temporary buffer.  */
2980  bfd_boolean print_extend_p = TRUE;
2981  bfd_boolean print_amount_p = TRUE;
2982  const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
2983
2984  if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
2985				|| !opnd->shifter.amount_present))
2986    {
2987      /* Not print the shift/extend amount when the amount is zero and
2988         when it is not the special case of 8-bit load/store instruction.  */
2989      print_amount_p = FALSE;
2990      /* Likewise, no need to print the shift operator LSL in such a
2991	 situation.  */
2992      if (opnd->shifter.kind == AARCH64_MOD_LSL)
2993	print_extend_p = FALSE;
2994    }
2995
2996  /* Prepare for the extend/shift.  */
2997  if (print_extend_p)
2998    {
2999      if (print_amount_p)
3000	snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3001		  opnd->shifter.amount);
3002      else
3003	snprintf (tb, sizeof (tb), ", %s", shift_name);
3004    }
3005  else
3006    tb[0] = '\0';
3007
3008  snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3009}
3010
3011/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3012   in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
3013   PC, PCREL_P and ADDRESS are used to pass in and return information about
3014   the PC-relative address calculation, where the PC value is passed in
3015   PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3016   will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3017   calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3018
3019   The function serves both the disassembler and the assembler diagnostics
3020   issuer, which is the reason why it lives in this file.  */
3021
3022void
3023aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3024		       const aarch64_opcode *opcode,
3025		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3026		       bfd_vma *address)
3027{
3028  unsigned int i, num_conds;
3029  const char *name = NULL;
3030  const aarch64_opnd_info *opnd = opnds + idx;
3031  enum aarch64_modifier_kind kind;
3032  uint64_t addr, enum_value;
3033
3034  buf[0] = '\0';
3035  if (pcrel_p)
3036    *pcrel_p = 0;
3037
3038  switch (opnd->type)
3039    {
3040    case AARCH64_OPND_Rd:
3041    case AARCH64_OPND_Rn:
3042    case AARCH64_OPND_Rm:
3043    case AARCH64_OPND_Rt:
3044    case AARCH64_OPND_Rt2:
3045    case AARCH64_OPND_Rs:
3046    case AARCH64_OPND_Ra:
3047    case AARCH64_OPND_Rt_SYS:
3048    case AARCH64_OPND_PAIRREG:
3049    case AARCH64_OPND_SVE_Rm:
3050      /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3051	 the <ic_op>, therefore we we use opnd->present to override the
3052	 generic optional-ness information.  */
3053      if (opnd->type == AARCH64_OPND_Rt_SYS)
3054	{
3055	  if (!opnd->present)
3056	    break;
3057	}
3058      /* Omit the operand, e.g. RET.  */
3059      else if (optional_operand_p (opcode, idx)
3060	       && (opnd->reg.regno
3061		   == get_optional_operand_default_value (opcode)))
3062	break;
3063      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3064	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3065      snprintf (buf, size, "%s",
3066		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3067      break;
3068
3069    case AARCH64_OPND_Rd_SP:
3070    case AARCH64_OPND_Rn_SP:
3071    case AARCH64_OPND_SVE_Rn_SP:
3072    case AARCH64_OPND_Rm_SP:
3073      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3074	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
3075	      || opnd->qualifier == AARCH64_OPND_QLF_X
3076	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
3077      snprintf (buf, size, "%s",
3078		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3079      break;
3080
3081    case AARCH64_OPND_Rm_EXT:
3082      kind = opnd->shifter.kind;
3083      assert (idx == 1 || idx == 2);
3084      if ((aarch64_stack_pointer_p (opnds)
3085	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3086	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
3087	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
3088	       && kind == AARCH64_MOD_UXTW)
3089	      || (opnd->qualifier == AARCH64_OPND_QLF_X
3090		  && kind == AARCH64_MOD_UXTX)))
3091	{
3092	  /* 'LSL' is the preferred form in this case.  */
3093	  kind = AARCH64_MOD_LSL;
3094	  if (opnd->shifter.amount == 0)
3095	    {
3096	      /* Shifter omitted.  */
3097	      snprintf (buf, size, "%s",
3098			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3099	      break;
3100	    }
3101	}
3102      if (opnd->shifter.amount)
3103	snprintf (buf, size, "%s, %s #%" PRIi64,
3104		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3105		  aarch64_operand_modifiers[kind].name,
3106		  opnd->shifter.amount);
3107      else
3108	snprintf (buf, size, "%s, %s",
3109		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3110		  aarch64_operand_modifiers[kind].name);
3111      break;
3112
3113    case AARCH64_OPND_Rm_SFT:
3114      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3115	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3116      if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3117	snprintf (buf, size, "%s",
3118		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3119      else
3120	snprintf (buf, size, "%s, %s #%" PRIi64,
3121		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3122		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3123		  opnd->shifter.amount);
3124      break;
3125
3126    case AARCH64_OPND_Fd:
3127    case AARCH64_OPND_Fn:
3128    case AARCH64_OPND_Fm:
3129    case AARCH64_OPND_Fa:
3130    case AARCH64_OPND_Ft:
3131    case AARCH64_OPND_Ft2:
3132    case AARCH64_OPND_Sd:
3133    case AARCH64_OPND_Sn:
3134    case AARCH64_OPND_Sm:
3135    case AARCH64_OPND_SVE_VZn:
3136    case AARCH64_OPND_SVE_Vd:
3137    case AARCH64_OPND_SVE_Vm:
3138    case AARCH64_OPND_SVE_Vn:
3139      snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3140		opnd->reg.regno);
3141      break;
3142
3143    case AARCH64_OPND_Vd:
3144    case AARCH64_OPND_Vn:
3145    case AARCH64_OPND_Vm:
3146      snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3147		aarch64_get_qualifier_name (opnd->qualifier));
3148      break;
3149
3150    case AARCH64_OPND_Ed:
3151    case AARCH64_OPND_En:
3152    case AARCH64_OPND_Em:
3153      snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3154		aarch64_get_qualifier_name (opnd->qualifier),
3155		opnd->reglane.index);
3156      break;
3157
3158    case AARCH64_OPND_VdD1:
3159    case AARCH64_OPND_VnD1:
3160      snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3161      break;
3162
3163    case AARCH64_OPND_LVn:
3164    case AARCH64_OPND_LVt:
3165    case AARCH64_OPND_LVt_AL:
3166    case AARCH64_OPND_LEt:
3167      print_register_list (buf, size, opnd, "v");
3168      break;
3169
3170    case AARCH64_OPND_SVE_Pd:
3171    case AARCH64_OPND_SVE_Pg3:
3172    case AARCH64_OPND_SVE_Pg4_5:
3173    case AARCH64_OPND_SVE_Pg4_10:
3174    case AARCH64_OPND_SVE_Pg4_16:
3175    case AARCH64_OPND_SVE_Pm:
3176    case AARCH64_OPND_SVE_Pn:
3177    case AARCH64_OPND_SVE_Pt:
3178      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3179	snprintf (buf, size, "p%d", opnd->reg.regno);
3180      else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3181	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3182	snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3183		  aarch64_get_qualifier_name (opnd->qualifier));
3184      else
3185	snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3186		  aarch64_get_qualifier_name (opnd->qualifier));
3187      break;
3188
3189    case AARCH64_OPND_SVE_Za_5:
3190    case AARCH64_OPND_SVE_Za_16:
3191    case AARCH64_OPND_SVE_Zd:
3192    case AARCH64_OPND_SVE_Zm_5:
3193    case AARCH64_OPND_SVE_Zm_16:
3194    case AARCH64_OPND_SVE_Zn:
3195    case AARCH64_OPND_SVE_Zt:
3196      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3197	snprintf (buf, size, "z%d", opnd->reg.regno);
3198      else
3199	snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3200		  aarch64_get_qualifier_name (opnd->qualifier));
3201      break;
3202
3203    case AARCH64_OPND_SVE_ZnxN:
3204    case AARCH64_OPND_SVE_ZtxN:
3205      print_register_list (buf, size, opnd, "z");
3206      break;
3207
3208    case AARCH64_OPND_SVE_Zm3_INDEX:
3209    case AARCH64_OPND_SVE_Zm3_22_INDEX:
3210    case AARCH64_OPND_SVE_Zm4_INDEX:
3211    case AARCH64_OPND_SVE_Zn_INDEX:
3212      snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3213		aarch64_get_qualifier_name (opnd->qualifier),
3214		opnd->reglane.index);
3215      break;
3216
3217    case AARCH64_OPND_CRn:
3218    case AARCH64_OPND_CRm:
3219      snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3220      break;
3221
3222    case AARCH64_OPND_IDX:
3223    case AARCH64_OPND_IMM:
3224    case AARCH64_OPND_WIDTH:
3225    case AARCH64_OPND_UIMM3_OP1:
3226    case AARCH64_OPND_UIMM3_OP2:
3227    case AARCH64_OPND_BIT_NUM:
3228    case AARCH64_OPND_IMM_VLSL:
3229    case AARCH64_OPND_IMM_VLSR:
3230    case AARCH64_OPND_SHLL_IMM:
3231    case AARCH64_OPND_IMM0:
3232    case AARCH64_OPND_IMMR:
3233    case AARCH64_OPND_IMMS:
3234    case AARCH64_OPND_FBITS:
3235    case AARCH64_OPND_SIMM5:
3236    case AARCH64_OPND_SVE_SHLIMM_PRED:
3237    case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3238    case AARCH64_OPND_SVE_SHRIMM_PRED:
3239    case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3240    case AARCH64_OPND_SVE_SIMM5:
3241    case AARCH64_OPND_SVE_SIMM5B:
3242    case AARCH64_OPND_SVE_SIMM6:
3243    case AARCH64_OPND_SVE_SIMM8:
3244    case AARCH64_OPND_SVE_UIMM3:
3245    case AARCH64_OPND_SVE_UIMM7:
3246    case AARCH64_OPND_SVE_UIMM8:
3247    case AARCH64_OPND_SVE_UIMM8_53:
3248    case AARCH64_OPND_IMM_ROT1:
3249    case AARCH64_OPND_IMM_ROT2:
3250    case AARCH64_OPND_IMM_ROT3:
3251    case AARCH64_OPND_SVE_IMM_ROT1:
3252    case AARCH64_OPND_SVE_IMM_ROT2:
3253      snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3254      break;
3255
3256    case AARCH64_OPND_SVE_I1_HALF_ONE:
3257    case AARCH64_OPND_SVE_I1_HALF_TWO:
3258    case AARCH64_OPND_SVE_I1_ZERO_ONE:
3259      {
3260	single_conv_t c;
3261	c.i = opnd->imm.value;
3262	snprintf (buf, size, "#%.1f", c.f);
3263	break;
3264      }
3265
3266    case AARCH64_OPND_SVE_PATTERN:
3267      if (optional_operand_p (opcode, idx)
3268	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3269	break;
3270      enum_value = opnd->imm.value;
3271      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3272      if (aarch64_sve_pattern_array[enum_value])
3273	snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3274      else
3275	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3276      break;
3277
3278    case AARCH64_OPND_SVE_PATTERN_SCALED:
3279      if (optional_operand_p (opcode, idx)
3280	  && !opnd->shifter.operator_present
3281	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3282	break;
3283      enum_value = opnd->imm.value;
3284      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3285      if (aarch64_sve_pattern_array[opnd->imm.value])
3286	snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3287      else
3288	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3289      if (opnd->shifter.operator_present)
3290	{
3291	  size_t len = strlen (buf);
3292	  snprintf (buf + len, size - len, ", %s #%" PRIi64,
3293		    aarch64_operand_modifiers[opnd->shifter.kind].name,
3294		    opnd->shifter.amount);
3295	}
3296      break;
3297
3298    case AARCH64_OPND_SVE_PRFOP:
3299      enum_value = opnd->imm.value;
3300      assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3301      if (aarch64_sve_prfop_array[enum_value])
3302	snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3303      else
3304	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3305      break;
3306
3307    case AARCH64_OPND_IMM_MOV:
3308      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3309	{
3310	case 4:	/* e.g. MOV Wd, #<imm32>.  */
3311	    {
3312	      int imm32 = opnd->imm.value;
3313	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3314	    }
3315	  break;
3316	case 8:	/* e.g. MOV Xd, #<imm64>.  */
3317	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3318		    opnd->imm.value, opnd->imm.value);
3319	  break;
3320	default: assert (0);
3321	}
3322      break;
3323
3324    case AARCH64_OPND_FPIMM0:
3325      snprintf (buf, size, "#0.0");
3326      break;
3327
3328    case AARCH64_OPND_LIMM:
3329    case AARCH64_OPND_AIMM:
3330    case AARCH64_OPND_HALF:
3331    case AARCH64_OPND_SVE_INV_LIMM:
3332    case AARCH64_OPND_SVE_LIMM:
3333    case AARCH64_OPND_SVE_LIMM_MOV:
3334      if (opnd->shifter.amount)
3335	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3336		  opnd->shifter.amount);
3337      else
3338	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3339      break;
3340
3341    case AARCH64_OPND_SIMD_IMM:
3342    case AARCH64_OPND_SIMD_IMM_SFT:
3343      if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3344	  || opnd->shifter.kind == AARCH64_MOD_NONE)
3345	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3346      else
3347	snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3348		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3349		  opnd->shifter.amount);
3350      break;
3351
3352    case AARCH64_OPND_SVE_AIMM:
3353    case AARCH64_OPND_SVE_ASIMM:
3354      if (opnd->shifter.amount)
3355	snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3356		  opnd->shifter.amount);
3357      else
3358	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3359      break;
3360
3361    case AARCH64_OPND_FPIMM:
3362    case AARCH64_OPND_SIMD_FPIMM:
3363    case AARCH64_OPND_SVE_FPIMM8:
3364      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3365	{
3366	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
3367	    {
3368	      half_conv_t c;
3369	      c.i = expand_fp_imm (2, opnd->imm.value);
3370	      snprintf (buf, size,  "#%.18e", c.f);
3371	    }
3372	  break;
3373	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
3374	    {
3375	      single_conv_t c;
3376	      c.i = expand_fp_imm (4, opnd->imm.value);
3377	      snprintf (buf, size,  "#%.18e", c.f);
3378	    }
3379	  break;
3380	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
3381	    {
3382	      double_conv_t c;
3383	      c.i = expand_fp_imm (8, opnd->imm.value);
3384	      snprintf (buf, size,  "#%.18e", c.d);
3385	    }
3386	  break;
3387	default: assert (0);
3388	}
3389      break;
3390
3391    case AARCH64_OPND_CCMP_IMM:
3392    case AARCH64_OPND_NZCV:
3393    case AARCH64_OPND_EXCEPTION:
3394    case AARCH64_OPND_UIMM4:
3395    case AARCH64_OPND_UIMM7:
3396      if (optional_operand_p (opcode, idx) == TRUE
3397	  && (opnd->imm.value ==
3398	      (int64_t) get_optional_operand_default_value (opcode)))
3399	/* Omit the operand, e.g. DCPS1.  */
3400	break;
3401      snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3402      break;
3403
3404    case AARCH64_OPND_COND:
3405    case AARCH64_OPND_COND1:
3406      snprintf (buf, size, "%s", opnd->cond->names[0]);
3407      num_conds = ARRAY_SIZE (opnd->cond->names);
3408      for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3409	{
3410	  size_t len = strlen (buf);
3411	  if (i == 1)
3412	    snprintf (buf + len, size - len, "  // %s = %s",
3413		      opnd->cond->names[0], opnd->cond->names[i]);
3414	  else
3415	    snprintf (buf + len, size - len, ", %s",
3416		      opnd->cond->names[i]);
3417	}
3418      break;
3419
3420    case AARCH64_OPND_ADDR_ADRP:
3421      addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3422	+ opnd->imm.value;
3423      if (pcrel_p)
3424	*pcrel_p = 1;
3425      if (address)
3426	*address = addr;
3427      /* This is not necessary during the disassembling, as print_address_func
3428	 in the disassemble_info will take care of the printing.  But some
3429	 other callers may be still interested in getting the string in *STR,
3430	 so here we do snprintf regardless.  */
3431      snprintf (buf, size, "#0x%" PRIx64, addr);
3432      break;
3433
3434    case AARCH64_OPND_ADDR_PCREL14:
3435    case AARCH64_OPND_ADDR_PCREL19:
3436    case AARCH64_OPND_ADDR_PCREL21:
3437    case AARCH64_OPND_ADDR_PCREL26:
3438      addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3439      if (pcrel_p)
3440	*pcrel_p = 1;
3441      if (address)
3442	*address = addr;
3443      /* This is not necessary during the disassembling, as print_address_func
3444	 in the disassemble_info will take care of the printing.  But some
3445	 other callers may be still interested in getting the string in *STR,
3446	 so here we do snprintf regardless.  */
3447      snprintf (buf, size, "#0x%" PRIx64, addr);
3448      break;
3449
3450    case AARCH64_OPND_ADDR_SIMPLE:
3451    case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3452    case AARCH64_OPND_SIMD_ADDR_POST:
3453      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3454      if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3455	{
3456	  if (opnd->addr.offset.is_reg)
3457	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3458	  else
3459	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3460	}
3461      else
3462	snprintf (buf, size, "[%s]", name);
3463      break;
3464
3465    case AARCH64_OPND_ADDR_REGOFF:
3466    case AARCH64_OPND_SVE_ADDR_RR:
3467    case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3468    case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3469    case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3470    case AARCH64_OPND_SVE_ADDR_RX:
3471    case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3472    case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3473    case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3474      print_register_offset_address
3475	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3476	 get_offset_int_reg_name (opnd));
3477      break;
3478
3479    case AARCH64_OPND_SVE_ADDR_RZ:
3480    case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3481    case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3482    case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3483    case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3484    case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3485    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3486    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3487    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3488    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3489    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3490    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3491      print_register_offset_address
3492	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3493	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3494      break;
3495
3496    case AARCH64_OPND_ADDR_SIMM7:
3497    case AARCH64_OPND_ADDR_SIMM9:
3498    case AARCH64_OPND_ADDR_SIMM9_2:
3499    case AARCH64_OPND_ADDR_SIMM10:
3500    case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3501    case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3502    case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3503    case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3504    case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3505    case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3506    case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3507    case AARCH64_OPND_SVE_ADDR_RI_U6:
3508    case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3509    case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3510    case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3511      print_immediate_offset_address
3512	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3513      break;
3514
3515    case AARCH64_OPND_SVE_ADDR_ZI_U5:
3516    case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3517    case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3518    case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3519      print_immediate_offset_address
3520	(buf, size, opnd,
3521	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3522      break;
3523
3524    case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3525    case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3526    case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3527      print_register_offset_address
3528	(buf, size, opnd,
3529	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3530	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3531      break;
3532
3533    case AARCH64_OPND_ADDR_UIMM12:
3534      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3535      if (opnd->addr.offset.imm)
3536	snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3537      else
3538	snprintf (buf, size, "[%s]", name);
3539      break;
3540
3541    case AARCH64_OPND_SYSREG:
3542      for (i = 0; aarch64_sys_regs[i].name; ++i)
3543	if (aarch64_sys_regs[i].value == opnd->sysreg
3544	    && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i]))
3545	  break;
3546      if (aarch64_sys_regs[i].name)
3547	snprintf (buf, size, "%s", aarch64_sys_regs[i].name);
3548      else
3549	{
3550	  /* Implementation defined system register.  */
3551	  unsigned int value = opnd->sysreg;
3552	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3553		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3554		    value & 0x7);
3555	}
3556      break;
3557
3558    case AARCH64_OPND_PSTATEFIELD:
3559      for (i = 0; aarch64_pstatefields[i].name; ++i)
3560	if (aarch64_pstatefields[i].value == opnd->pstatefield)
3561	  break;
3562      assert (aarch64_pstatefields[i].name);
3563      snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3564      break;
3565
3566    case AARCH64_OPND_SYSREG_AT:
3567    case AARCH64_OPND_SYSREG_DC:
3568    case AARCH64_OPND_SYSREG_IC:
3569    case AARCH64_OPND_SYSREG_TLBI:
3570      snprintf (buf, size, "%s", opnd->sysins_op->name);
3571      break;
3572
3573    case AARCH64_OPND_BARRIER:
3574      snprintf (buf, size, "%s", opnd->barrier->name);
3575      break;
3576
3577    case AARCH64_OPND_BARRIER_ISB:
3578      /* Operand can be omitted, e.g. in DCPS1.  */
3579      if (! optional_operand_p (opcode, idx)
3580	  || (opnd->barrier->value
3581	      != get_optional_operand_default_value (opcode)))
3582	snprintf (buf, size, "#0x%x", opnd->barrier->value);
3583      break;
3584
3585    case AARCH64_OPND_PRFOP:
3586      if (opnd->prfop->name != NULL)
3587	snprintf (buf, size, "%s", opnd->prfop->name);
3588      else
3589	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3590      break;
3591
3592    case AARCH64_OPND_BARRIER_PSB:
3593      snprintf (buf, size, "%s", opnd->hint_option->name);
3594      break;
3595
3596    default:
3597      assert (0);
3598    }
3599}
3600
3601#define CPENC(op0,op1,crn,crm,op2) \
3602  ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3603  /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3604#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3605  /* for 3.9.10 System Instructions */
3606#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3607
3608#define C0  0
3609#define C1  1
3610#define C2  2
3611#define C3  3
3612#define C4  4
3613#define C5  5
3614#define C6  6
3615#define C7  7
3616#define C8  8
3617#define C9  9
3618#define C10 10
3619#define C11 11
3620#define C12 12
3621#define C13 13
3622#define C14 14
3623#define C15 15
3624
3625#ifdef F_DEPRECATED
3626#undef F_DEPRECATED
3627#endif
3628#define F_DEPRECATED	0x1	/* Deprecated system register.  */
3629
3630#ifdef F_ARCHEXT
3631#undef F_ARCHEXT
3632#endif
3633#define F_ARCHEXT	0x2	/* Architecture dependent system register.  */
3634
3635#ifdef F_HASXT
3636#undef F_HASXT
3637#endif
3638#define F_HASXT		0x4	/* System instruction register <Xt>
3639				   operand.  */
3640
3641
3642/* TODO there are two more issues need to be resolved
3643   1. handle read-only and write-only system registers
3644   2. handle cpu-implementation-defined system registers.  */
3645const aarch64_sys_reg aarch64_sys_regs [] =
3646{
3647  { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
3648  { "spsr_el12",	CPEN_ (5, C0, 0), F_ARCHEXT },
3649  { "elr_el1",          CPEN_(0,C0,1),	0 },
3650  { "elr_el12",	CPEN_ (5, C0, 1), F_ARCHEXT },
3651  { "sp_el0",           CPEN_(0,C1,0),	0 },
3652  { "spsel",            CPEN_(0,C2,0),	0 },
3653  { "daif",             CPEN_(3,C2,1),	0 },
3654  { "currentel",        CPEN_(0,C2,2),	0 }, /* RO */
3655  { "pan",		CPEN_(0,C2,3),	F_ARCHEXT },
3656  { "uao",		CPEN_ (0, C2, 4), F_ARCHEXT },
3657  { "nzcv",             CPEN_(3,C2,0),	0 },
3658  { "fpcr",             CPEN_(3,C4,0),	0 },
3659  { "fpsr",             CPEN_(3,C4,1),	0 },
3660  { "dspsr_el0",        CPEN_(3,C5,0),	0 },
3661  { "dlr_el0",          CPEN_(3,C5,1),	0 },
3662  { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
3663  { "elr_el2",          CPEN_(4,C0,1),	0 },
3664  { "sp_el1",           CPEN_(4,C1,0),	0 },
3665  { "spsr_irq",         CPEN_(4,C3,0),	0 },
3666  { "spsr_abt",         CPEN_(4,C3,1),	0 },
3667  { "spsr_und",         CPEN_(4,C3,2),	0 },
3668  { "spsr_fiq",         CPEN_(4,C3,3),	0 },
3669  { "spsr_el3",         CPEN_(6,C0,0),	0 },
3670  { "elr_el3",          CPEN_(6,C0,1),	0 },
3671  { "sp_el2",           CPEN_(6,C1,0),	0 },
3672  { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
3673  { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
3674  { "midr_el1",         CPENC(3,0,C0,C0,0),	0 }, /* RO */
3675  { "ctr_el0",          CPENC(3,3,C0,C0,1),	0 }, /* RO */
3676  { "mpidr_el1",        CPENC(3,0,C0,C0,5),	0 }, /* RO */
3677  { "revidr_el1",       CPENC(3,0,C0,C0,6),	0 }, /* RO */
3678  { "aidr_el1",         CPENC(3,1,C0,C0,7),	0 }, /* RO */
3679  { "dczid_el0",        CPENC(3,3,C0,C0,7),	0 }, /* RO */
3680  { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	0 }, /* RO */
3681  { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	0 }, /* RO */
3682  { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	0 }, /* RO */
3683  { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	0 }, /* RO */
3684  { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	0 }, /* RO */
3685  { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	0 }, /* RO */
3686  { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	0 }, /* RO */
3687  { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	0 }, /* RO */
3688  { "id_mmfr4_el1",     CPENC(3,0,C0,C2,6),	0 }, /* RO */
3689  { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	0 }, /* RO */
3690  { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	0 }, /* RO */
3691  { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	0 }, /* RO */
3692  { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	0 }, /* RO */
3693  { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	0 }, /* RO */
3694  { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	0 }, /* RO */
3695  { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	0 }, /* RO */
3696  { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	0 }, /* RO */
3697  { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	0 }, /* RO */
3698  { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	0 }, /* RO */
3699  { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	0 }, /* RO */
3700  { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	0 }, /* RO */
3701  { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	0 }, /* RO */
3702  { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	0 }, /* RO */
3703  { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	0 }, /* RO */
3704  { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	0 }, /* RO */
3705  { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	0 }, /* RO */
3706  { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	0 }, /* RO */
3707  { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT }, /* RO */
3708  { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	0 }, /* RO */
3709  { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	0 }, /* RO */
3710  { "id_aa64zfr0_el1",  CPENC (3, 0, C0, C4, 4), F_ARCHEXT }, /* RO */
3711  { "clidr_el1",        CPENC(3,1,C0,C0,1),	0 }, /* RO */
3712  { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 }, /* RO */
3713  { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
3714  { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
3715  { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
3716  { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
3717  { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
3718  { "sctlr_el12",	CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3719  { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
3720  { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
3721  { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
3722  { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
3723  { "cpacr_el12",	CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3724  { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
3725  { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
3726  { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
3727  { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
3728  { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
3729  { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
3730  { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
3731  { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
3732  { "zcr_el1",          CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3733  { "zcr_el12",         CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3734  { "zcr_el2",          CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3735  { "zcr_el3",          CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3736  { "zidr_el1",         CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3737  { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
3738  { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
3739  { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
3740  { "ttbr1_el2",	CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3741  { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
3742  { "ttbr0_el12",	CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3743  { "ttbr1_el12",	CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3744  { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
3745  { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
3746  { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
3747  { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
3748  { "tcr_el12",		CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3749  { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
3750  { "apiakeylo_el1",	CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3751  { "apiakeyhi_el1",	CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3752  { "apibkeylo_el1",	CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3753  { "apibkeyhi_el1",	CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3754  { "apdakeylo_el1",	CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3755  { "apdakeyhi_el1",	CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3756  { "apdbkeylo_el1",	CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3757  { "apdbkeyhi_el1",	CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3758  { "apgakeylo_el1",	CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3759  { "apgakeyhi_el1",	CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3760  { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
3761  { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
3762  { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
3763  { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
3764  { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
3765  { "afsr0_el12",	CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3766  { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
3767  { "afsr1_el12",	CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3768  { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
3769  { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
3770  { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
3771  { "esr_el12",		CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3772  { "vsesr_el2",	CPENC (3, 4, C5, C2, 3), F_ARCHEXT }, /* RO */
3773  { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
3774  { "erridr_el1",	CPENC (3, 0, C5, C3, 0), F_ARCHEXT }, /* RO */
3775  { "errselr_el1",	CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3776  { "erxfr_el1",	CPENC (3, 0, C5, C4, 0), F_ARCHEXT }, /* RO */
3777  { "erxctlr_el1",	CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3778  { "erxstatus_el1",	CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3779  { "erxaddr_el1",	CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3780  { "erxmisc0_el1",	CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3781  { "erxmisc1_el1",	CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3782  { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
3783  { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
3784  { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
3785  { "far_el12",		CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3786  { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
3787  { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
3788  { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
3789  { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
3790  { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
3791  { "mair_el12",	CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3792  { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
3793  { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
3794  { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
3795  { "amair_el12",	CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3796  { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
3797  { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
3798  { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
3799  { "vbar_el12",	CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3800  { "rvbar_el1",        CPENC(3,0,C12,C0,1),	0 }, /* RO */
3801  { "rvbar_el2",        CPENC(3,4,C12,C0,1),	0 }, /* RO */
3802  { "rvbar_el3",        CPENC(3,6,C12,C0,1),	0 }, /* RO */
3803  { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
3804  { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
3805  { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
3806  { "isr_el1",          CPENC(3,0,C12,C1,0),	0 }, /* RO */
3807  { "disr_el1",		CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3808  { "vdisr_el2",	CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3809  { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
3810  { "contextidr_el2",	CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3811  { "contextidr_el12",	CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3812  { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
3813  { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RO */
3814  { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
3815  { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
3816  { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
3817  { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
3818  { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RO */
3819  { "cntpct_el0",       CPENC(3,3,C14,C0,1),	0 }, /* RO */
3820  { "cntvct_el0",       CPENC(3,3,C14,C0,2),	0 }, /* RO */
3821  { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
3822  { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
3823  { "cntkctl_el12",	CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3824  { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
3825  { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
3826  { "cntp_tval_el02",	CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
3827  { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
3828  { "cntp_ctl_el02",	CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
3829  { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
3830  { "cntp_cval_el02",	CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
3831  { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
3832  { "cntv_tval_el02",	CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
3833  { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
3834  { "cntv_ctl_el02",	CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
3835  { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
3836  { "cntv_cval_el02",	CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
3837  { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
3838  { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
3839  { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
3840  { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
3841  { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
3842  { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
3843  { "cnthv_tval_el2",	CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
3844  { "cnthv_ctl_el2",	CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
3845  { "cnthv_cval_el2",	CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
3846  { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
3847  { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
3848  { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
3849  { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
3850  { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
3851  { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	0 },  /* r */
3852  { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
3853  { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
3854  { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* r */
3855  { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	0 },  /* w */
3856  { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },  /* r */
3857  { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },  /* w */
3858  { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
3859  { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
3860  { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
3861  { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
3862  { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
3863  { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
3864  { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
3865  { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
3866  { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
3867  { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
3868  { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
3869  { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
3870  { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
3871  { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
3872  { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
3873  { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
3874  { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
3875  { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
3876  { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
3877  { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
3878  { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
3879  { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
3880  { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
3881  { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
3882  { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
3883  { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
3884  { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
3885  { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
3886  { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
3887  { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
3888  { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
3889  { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
3890  { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
3891  { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
3892  { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
3893  { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
3894  { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
3895  { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
3896  { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
3897  { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
3898  { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
3899  { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
3900  { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
3901  { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
3902  { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
3903  { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
3904  { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
3905  { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
3906  { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
3907  { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
3908  { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
3909  { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
3910  { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
3911  { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
3912  { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
3913  { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
3914  { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
3915  { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
3916  { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
3917  { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
3918  { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
3919  { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
3920  { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
3921  { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
3922  { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
3923  { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
3924  { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	0 },  /* r */
3925  { "oslar_el1",         CPENC(2,0,C1, C0, 4),	0 },  /* w */
3926  { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	0 },  /* r */
3927  { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
3928  { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
3929  { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
3930  { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
3931  { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	0 },  /* r */
3932  { "pmblimitr_el1",	 CPENC (3, 0, C9, C10, 0), F_ARCHEXT },  /* rw */
3933  { "pmbptr_el1",	 CPENC (3, 0, C9, C10, 1), F_ARCHEXT },  /* rw */
3934  { "pmbsr_el1",	 CPENC (3, 0, C9, C10, 3), F_ARCHEXT },  /* rw */
3935  { "pmbidr_el1",	 CPENC (3, 0, C9, C10, 7), F_ARCHEXT },  /* ro */
3936  { "pmscr_el1",	 CPENC (3, 0, C9, C9, 0),  F_ARCHEXT },  /* rw */
3937  { "pmsicr_el1",	 CPENC (3, 0, C9, C9, 2),  F_ARCHEXT },  /* rw */
3938  { "pmsirr_el1",	 CPENC (3, 0, C9, C9, 3),  F_ARCHEXT },  /* rw */
3939  { "pmsfcr_el1",	 CPENC (3, 0, C9, C9, 4),  F_ARCHEXT },  /* rw */
3940  { "pmsevfr_el1",	 CPENC (3, 0, C9, C9, 5),  F_ARCHEXT },  /* rw */
3941  { "pmslatfr_el1",	 CPENC (3, 0, C9, C9, 6),  F_ARCHEXT },  /* rw */
3942  { "pmsidr_el1",	 CPENC (3, 0, C9, C9, 7),  F_ARCHEXT },  /* ro */
3943  { "pmscr_el2",	 CPENC (3, 4, C9, C9, 0),  F_ARCHEXT },  /* rw */
3944  { "pmscr_el12",	 CPENC (3, 5, C9, C9, 0),  F_ARCHEXT },  /* rw */
3945  { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
3946  { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
3947  { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
3948  { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
3949  { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	0 },  /* w */
3950  { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
3951  { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	0 },  /* r */
3952  { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	0 },  /* r */
3953  { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
3954  { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
3955  { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
3956  { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
3957  { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
3958  { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
3959  { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
3960  { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
3961  { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
3962  { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
3963  { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
3964  { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
3965  { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
3966  { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
3967  { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
3968  { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
3969  { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
3970  { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
3971  { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
3972  { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
3973  { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
3974  { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
3975  { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
3976  { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
3977  { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
3978  { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
3979  { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
3980  { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
3981  { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
3982  { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
3983  { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
3984  { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
3985  { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
3986  { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
3987  { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
3988  { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
3989  { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
3990  { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
3991  { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
3992  { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
3993  { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
3994  { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
3995  { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
3996  { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
3997  { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
3998  { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
3999  { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
4000  { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
4001  { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
4002  { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
4003  { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
4004  { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
4005  { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
4006  { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
4007  { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
4008  { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
4009  { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
4010  { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
4011  { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
4012  { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
4013  { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
4014  { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
4015  { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
4016  { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
4017  { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
4018  { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
4019  { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
4020  { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
4021  { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
4022  { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
4023  { 0,          CPENC(0,0,0,0,0),	0 },
4024};
4025
4026bfd_boolean
4027aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4028{
4029  return (reg->flags & F_DEPRECATED) != 0;
4030}
4031
4032bfd_boolean
4033aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4034			     const aarch64_sys_reg *reg)
4035{
4036  if (!(reg->flags & F_ARCHEXT))
4037    return TRUE;
4038
4039  /* PAN.  Values are from aarch64_sys_regs.  */
4040  if (reg->value == CPEN_(0,C2,3)
4041      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4042    return FALSE;
4043
4044  /* Virtualization host extensions: system registers.  */
4045  if ((reg->value == CPENC (3, 4, C2, C0, 1)
4046       || reg->value == CPENC (3, 4, C13, C0, 1)
4047       || reg->value == CPENC (3, 4, C14, C3, 0)
4048       || reg->value == CPENC (3, 4, C14, C3, 1)
4049       || reg->value == CPENC (3, 4, C14, C3, 2))
4050      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4051      return FALSE;
4052
4053  /* Virtualization host extensions: *_el12 names of *_el1 registers.  */
4054  if ((reg->value == CPEN_ (5, C0, 0)
4055       || reg->value == CPEN_ (5, C0, 1)
4056       || reg->value == CPENC (3, 5, C1, C0, 0)
4057       || reg->value == CPENC (3, 5, C1, C0, 2)
4058       || reg->value == CPENC (3, 5, C2, C0, 0)
4059       || reg->value == CPENC (3, 5, C2, C0, 1)
4060       || reg->value == CPENC (3, 5, C2, C0, 2)
4061       || reg->value == CPENC (3, 5, C5, C1, 0)
4062       || reg->value == CPENC (3, 5, C5, C1, 1)
4063       || reg->value == CPENC (3, 5, C5, C2, 0)
4064       || reg->value == CPENC (3, 5, C6, C0, 0)
4065       || reg->value == CPENC (3, 5, C10, C2, 0)
4066       || reg->value == CPENC (3, 5, C10, C3, 0)
4067       || reg->value == CPENC (3, 5, C12, C0, 0)
4068       || reg->value == CPENC (3, 5, C13, C0, 1)
4069       || reg->value == CPENC (3, 5, C14, C1, 0))
4070      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4071    return FALSE;
4072
4073  /* Virtualization host extensions: *_el02 names of *_el0 registers.  */
4074  if ((reg->value == CPENC (3, 5, C14, C2, 0)
4075       || reg->value == CPENC (3, 5, C14, C2, 1)
4076       || reg->value == CPENC (3, 5, C14, C2, 2)
4077       || reg->value == CPENC (3, 5, C14, C3, 0)
4078       || reg->value == CPENC (3, 5, C14, C3, 1)
4079       || reg->value == CPENC (3, 5, C14, C3, 2))
4080      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4081    return FALSE;
4082
4083  /* ARMv8.2 features.  */
4084
4085  /* ID_AA64MMFR2_EL1.  */
4086  if (reg->value == CPENC (3, 0, C0, C7, 2)
4087      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4088    return FALSE;
4089
4090  /* PSTATE.UAO.  */
4091  if (reg->value == CPEN_ (0, C2, 4)
4092      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4093    return FALSE;
4094
4095  /* RAS extension.  */
4096
4097  /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4098     ERXMISC0_EL1 AND ERXMISC1_EL1.  */
4099  if ((reg->value == CPENC (3, 0, C5, C3, 0)
4100       || reg->value == CPENC (3, 0, C5, C3, 1)
4101       || reg->value == CPENC (3, 0, C5, C3, 2)
4102       || reg->value == CPENC (3, 0, C5, C3, 3)
4103       || reg->value == CPENC (3, 0, C5, C4, 0)
4104       || reg->value == CPENC (3, 0, C5, C4, 1)
4105       || reg->value == CPENC (3, 0, C5, C4, 2)
4106       || reg->value == CPENC (3, 0, C5, C4, 3)
4107       || reg->value == CPENC (3, 0, C5, C5, 0)
4108       || reg->value == CPENC (3, 0, C5, C5, 1))
4109      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4110    return FALSE;
4111
4112  /* VSESR_EL2, DISR_EL1 and VDISR_EL2.  */
4113  if ((reg->value == CPENC (3, 4, C5, C2, 3)
4114       || reg->value == CPENC (3, 0, C12, C1, 1)
4115       || reg->value == CPENC (3, 4, C12, C1, 1))
4116      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4117    return FALSE;
4118
4119  /* Statistical Profiling extension.  */
4120  if ((reg->value == CPENC (3, 0, C9, C10, 0)
4121       || reg->value == CPENC (3, 0, C9, C10, 1)
4122       || reg->value == CPENC (3, 0, C9, C10, 3)
4123       || reg->value == CPENC (3, 0, C9, C10, 7)
4124       || reg->value == CPENC (3, 0, C9, C9, 0)
4125       || reg->value == CPENC (3, 0, C9, C9, 2)
4126       || reg->value == CPENC (3, 0, C9, C9, 3)
4127       || reg->value == CPENC (3, 0, C9, C9, 4)
4128       || reg->value == CPENC (3, 0, C9, C9, 5)
4129       || reg->value == CPENC (3, 0, C9, C9, 6)
4130       || reg->value == CPENC (3, 0, C9, C9, 7)
4131       || reg->value == CPENC (3, 4, C9, C9, 0)
4132       || reg->value == CPENC (3, 5, C9, C9, 0))
4133      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4134    return FALSE;
4135
4136  /* ARMv8.3 Pointer authentication keys.  */
4137  if ((reg->value == CPENC (3, 0, C2, C1, 0)
4138       || reg->value == CPENC (3, 0, C2, C1, 1)
4139       || reg->value == CPENC (3, 0, C2, C1, 2)
4140       || reg->value == CPENC (3, 0, C2, C1, 3)
4141       || reg->value == CPENC (3, 0, C2, C2, 0)
4142       || reg->value == CPENC (3, 0, C2, C2, 1)
4143       || reg->value == CPENC (3, 0, C2, C2, 2)
4144       || reg->value == CPENC (3, 0, C2, C2, 3)
4145       || reg->value == CPENC (3, 0, C2, C3, 0)
4146       || reg->value == CPENC (3, 0, C2, C3, 1))
4147      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4148    return FALSE;
4149
4150  /* SVE.  */
4151  if ((reg->value == CPENC (3, 0, C0, C4, 4)
4152       || reg->value == CPENC (3, 0, C1, C2, 0)
4153       || reg->value == CPENC (3, 4, C1, C2, 0)
4154       || reg->value == CPENC (3, 6, C1, C2, 0)
4155       || reg->value == CPENC (3, 5, C1, C2, 0)
4156       || reg->value == CPENC (3, 0, C0, C0, 7))
4157      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4158    return FALSE;
4159
4160  return TRUE;
4161}
4162
4163const aarch64_sys_reg aarch64_pstatefields [] =
4164{
4165  { "spsel",            0x05,	0 },
4166  { "daifset",          0x1e,	0 },
4167  { "daifclr",          0x1f,	0 },
4168  { "pan",		0x04,	F_ARCHEXT },
4169  { "uao",		0x03,	F_ARCHEXT },
4170  { 0,          CPENC(0,0,0,0,0), 0 },
4171};
4172
4173bfd_boolean
4174aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4175				 const aarch64_sys_reg *reg)
4176{
4177  if (!(reg->flags & F_ARCHEXT))
4178    return TRUE;
4179
4180  /* PAN.  Values are from aarch64_pstatefields.  */
4181  if (reg->value == 0x04
4182      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4183    return FALSE;
4184
4185  /* UAO.  Values are from aarch64_pstatefields.  */
4186  if (reg->value == 0x03
4187      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4188    return FALSE;
4189
4190  return TRUE;
4191}
4192
4193const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4194{
4195    { "ialluis", CPENS(0,C7,C1,0), 0 },
4196    { "iallu",   CPENS(0,C7,C5,0), 0 },
4197    { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
4198    { 0, CPENS(0,0,0,0), 0 }
4199};
4200
4201const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4202{
4203    { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
4204    { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
4205    { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
4206    { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
4207    { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
4208    { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
4209    { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4210    { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
4211    { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
4212    { 0,       CPENS(0,0,0,0), 0 }
4213};
4214
4215const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4216{
4217    { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
4218    { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
4219    { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
4220    { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
4221    { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
4222    { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
4223    { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
4224    { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
4225    { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
4226    { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
4227    { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
4228    { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
4229    { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4230    { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4231    { 0,       CPENS(0,0,0,0), 0 }
4232};
4233
4234const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4235{
4236    { "vmalle1",   CPENS(0,C8,C7,0), 0 },
4237    { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
4238    { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
4239    { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
4240    { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4241    { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
4242    { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
4243    { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
4244    { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4245    { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4246    { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
4247    { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
4248    { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
4249    { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
4250    { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4251    { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4252    { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
4253    { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
4254    { "alle2",     CPENS(4,C8,C7,0), 0 },
4255    { "alle2is",   CPENS(4,C8,C3,0), 0 },
4256    { "alle1",     CPENS(4,C8,C7,4), 0 },
4257    { "alle1is",   CPENS(4,C8,C3,4), 0 },
4258    { "alle3",     CPENS(6,C8,C7,0), 0 },
4259    { "alle3is",   CPENS(6,C8,C3,0), 0 },
4260    { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
4261    { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
4262    { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
4263    { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
4264    { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
4265    { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
4266    { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
4267    { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
4268    { 0,       CPENS(0,0,0,0), 0 }
4269};
4270
4271bfd_boolean
4272aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4273{
4274  return (sys_ins_reg->flags & F_HASXT) != 0;
4275}
4276
4277extern bfd_boolean
4278aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4279				 const aarch64_sys_ins_reg *reg)
4280{
4281  if (!(reg->flags & F_ARCHEXT))
4282    return TRUE;
4283
4284  /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
4285  if (reg->value == CPENS (3, C7, C12, 1)
4286      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4287    return FALSE;
4288
4289  /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
4290  if ((reg->value == CPENS (0, C7, C9, 0)
4291       || reg->value == CPENS (0, C7, C9, 1))
4292      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4293    return FALSE;
4294
4295  return TRUE;
4296}
4297
4298#undef C0
4299#undef C1
4300#undef C2
4301#undef C3
4302#undef C4
4303#undef C5
4304#undef C6
4305#undef C7
4306#undef C8
4307#undef C9
4308#undef C10
4309#undef C11
4310#undef C12
4311#undef C13
4312#undef C14
4313#undef C15
4314
4315#define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
4316#define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4317
4318static bfd_boolean
4319verify_ldpsw (const struct aarch64_opcode * opcode ATTRIBUTE_UNUSED,
4320	      const aarch64_insn insn)
4321{
4322  int t  = BITS (insn, 4, 0);
4323  int n  = BITS (insn, 9, 5);
4324  int t2 = BITS (insn, 14, 10);
4325
4326  if (BIT (insn, 23))
4327    {
4328      /* Write back enabled.  */
4329      if ((t == n || t2 == n) && n != 31)
4330	return FALSE;
4331    }
4332
4333  if (BIT (insn, 22))
4334    {
4335      /* Load */
4336      if (t == t2)
4337	return FALSE;
4338    }
4339
4340  return TRUE;
4341}
4342
4343/* Return true if VALUE cannot be moved into an SVE register using DUP
4344   (with any element size, not just ESIZE) and if using DUPM would
4345   therefore be OK.  ESIZE is the number of bytes in the immediate.  */
4346
4347bfd_boolean
4348aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
4349{
4350  int64_t svalue = uvalue;
4351  uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
4352
4353  if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
4354    return FALSE;
4355  if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
4356    {
4357      svalue = (int32_t) uvalue;
4358      if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
4359	{
4360	  svalue = (int16_t) uvalue;
4361	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
4362	    return FALSE;
4363	}
4364    }
4365  if ((svalue & 0xff) == 0)
4366    svalue /= 256;
4367  return svalue < -128 || svalue >= 128;
4368}
4369
4370/* Include the opcode description table as well as the operand description
4371   table.  */
4372#define VERIFIER(x) verify_##x
4373#include "aarch64-tbl.h"
4374