1/* aarch64-opc.c -- AArch64 opcode support.
2   Copyright (C) 2009-2020 Free Software Foundation, Inc.
3   Contributed by ARM Ltd.
4
5   This file is part of the GNU opcodes library.
6
7   This library is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   It is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include "bfd_stdint.h"
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30#include "libiberty.h"
31
32#include "aarch64-opc.h"
33
34#ifdef DEBUG_AARCH64
35int debug_dump = FALSE;
36#endif /* DEBUG_AARCH64 */
37
38/* The enumeration strings associated with each value of a 5-bit SVE
39   pattern operand.  A null entry indicates a reserved meaning.  */
40const char *const aarch64_sve_pattern_array[32] = {
41  /* 0-7.  */
42  "pow2",
43  "vl1",
44  "vl2",
45  "vl3",
46  "vl4",
47  "vl5",
48  "vl6",
49  "vl7",
50  /* 8-15.  */
51  "vl8",
52  "vl16",
53  "vl32",
54  "vl64",
55  "vl128",
56  "vl256",
57  0,
58  0,
59  /* 16-23.  */
60  0,
61  0,
62  0,
63  0,
64  0,
65  0,
66  0,
67  0,
68  /* 24-31.  */
69  0,
70  0,
71  0,
72  0,
73  0,
74  "mul4",
75  "mul3",
76  "all"
77};
78
79/* The enumeration strings associated with each value of a 4-bit SVE
80   prefetch operand.  A null entry indicates a reserved meaning.  */
81const char *const aarch64_sve_prfop_array[16] = {
82  /* 0-7.  */
83  "pldl1keep",
84  "pldl1strm",
85  "pldl2keep",
86  "pldl2strm",
87  "pldl3keep",
88  "pldl3strm",
89  0,
90  0,
91  /* 8-15.  */
92  "pstl1keep",
93  "pstl1strm",
94  "pstl2keep",
95  "pstl2strm",
96  "pstl3keep",
97  "pstl3strm",
98  0,
99  0
100};
101
102/* Helper functions to determine which operand to be used to encode/decode
103   the size:Q fields for AdvSIMD instructions.  */
104
105static inline bfd_boolean
106vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107{
108  return ((qualifier >= AARCH64_OPND_QLF_V_8B
109	  && qualifier <= AARCH64_OPND_QLF_V_1Q) ? TRUE
110	  : FALSE);
111}
112
113static inline bfd_boolean
114fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
115{
116  return ((qualifier >= AARCH64_OPND_QLF_S_B
117	  && qualifier <= AARCH64_OPND_QLF_S_Q) ? TRUE
118	  : FALSE);
119}
120
121enum data_pattern
122{
123  DP_UNKNOWN,
124  DP_VECTOR_3SAME,
125  DP_VECTOR_LONG,
126  DP_VECTOR_WIDE,
127  DP_VECTOR_ACROSS_LANES,
128};
129
130static const char significant_operand_index [] =
131{
132  0,	/* DP_UNKNOWN, by default using operand 0.  */
133  0,	/* DP_VECTOR_3SAME */
134  1,	/* DP_VECTOR_LONG */
135  2,	/* DP_VECTOR_WIDE */
136  1,	/* DP_VECTOR_ACROSS_LANES */
137};
138
139/* Given a sequence of qualifiers in QUALIFIERS, determine and return
140   the data pattern.
141   N.B. QUALIFIERS is a possible sequence of qualifiers each of which
142   corresponds to one of a sequence of operands.  */
143
144static enum data_pattern
145get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
146{
147  if (vector_qualifier_p (qualifiers[0]) == TRUE)
148    {
149      /* e.g. v.4s, v.4s, v.4s
150	   or v.4h, v.4h, v.h[3].  */
151      if (qualifiers[0] == qualifiers[1]
152	  && vector_qualifier_p (qualifiers[2]) == TRUE
153	  && (aarch64_get_qualifier_esize (qualifiers[0])
154	      == aarch64_get_qualifier_esize (qualifiers[1]))
155	  && (aarch64_get_qualifier_esize (qualifiers[0])
156	      == aarch64_get_qualifier_esize (qualifiers[2])))
157	return DP_VECTOR_3SAME;
158      /* e.g. v.8h, v.8b, v.8b.
159           or v.4s, v.4h, v.h[2].
160	   or v.8h, v.16b.  */
161      if (vector_qualifier_p (qualifiers[1]) == TRUE
162	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
163	  && (aarch64_get_qualifier_esize (qualifiers[0])
164	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
165	return DP_VECTOR_LONG;
166      /* e.g. v.8h, v.8h, v.8b.  */
167      if (qualifiers[0] == qualifiers[1]
168	  && vector_qualifier_p (qualifiers[2]) == TRUE
169	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
170	  && (aarch64_get_qualifier_esize (qualifiers[0])
171	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
172	  && (aarch64_get_qualifier_esize (qualifiers[0])
173	      == aarch64_get_qualifier_esize (qualifiers[1])))
174	return DP_VECTOR_WIDE;
175    }
176  else if (fp_qualifier_p (qualifiers[0]) == TRUE)
177    {
178      /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
179      if (vector_qualifier_p (qualifiers[1]) == TRUE
180	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
181	return DP_VECTOR_ACROSS_LANES;
182    }
183
184  return DP_UNKNOWN;
185}
186
187/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
188   the AdvSIMD instructions.  */
189/* N.B. it is possible to do some optimization that doesn't call
190   get_data_pattern each time when we need to select an operand.  We can
191   either buffer the caculated the result or statically generate the data,
192   however, it is not obvious that the optimization will bring significant
193   benefit.  */
194
195int
196aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
197{
198  return
199    significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
200}
201
202const aarch64_field fields[] =
203{
204    {  0,  0 },	/* NIL.  */
205    {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
206    {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
207    {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
208    { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
209    {  5, 19 },	/* imm19: e.g. in CBZ.  */
210    {  5, 19 },	/* immhi: e.g. in ADRP.  */
211    { 29,  2 },	/* immlo: e.g. in ADRP.  */
212    { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
213    { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
214    { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
215    { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
216    {  0,  5 },	/* Rt: in load/store instructions.  */
217    {  0,  5 },	/* Rd: in many integer instructions.  */
218    {  5,  5 },	/* Rn: in many integer instructions.  */
219    { 10,  5 },	/* Rt2: in load/store pair instructions.  */
220    { 10,  5 },	/* Ra: in fp instructions.  */
221    {  5,  3 },	/* op2: in the system instructions.  */
222    {  8,  4 },	/* CRm: in the system instructions.  */
223    { 12,  4 },	/* CRn: in the system instructions.  */
224    { 16,  3 },	/* op1: in the system instructions.  */
225    { 19,  2 },	/* op0: in the system instructions.  */
226    { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
227    { 12,  4 },	/* cond: condition flags as a source operand.  */
228    { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
229    { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
230    { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
231    { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
232    { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
233    { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
234    { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
235    { 12,  1 },	/* S: in load/store reg offset instructions.  */
236    { 21,  2 },	/* hw: in move wide constant instructions.  */
237    { 22,  2 },	/* opc: in load/store reg offset instructions.  */
238    { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
239    { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
240    { 22,  2 },	/* type: floating point type field in fp data inst.  */
241    { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
242    { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
243    { 15,  6 },	/* imm6_2: in rmif instructions.  */
244    { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
245    {  0,  4 },	/* imm4_2: in rmif instructions.  */
246    { 10,  4 },	/* imm4_3: in adddg/subg instructions.  */
247    { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
248    { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
249    { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
250    { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
251    { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
252    {  5, 14 },	/* imm14: in test bit and branch instructions.  */
253    {  5, 16 },	/* imm16: in exception instructions.  */
254    {  0, 26 },	/* imm26: in unconditional branch instructions.  */
255    { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
256    { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
257    { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
258    { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
259    { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
260    { 22,  1 },	/* N: in logical (immediate) instructions.  */
261    { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
262    { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
263    { 31,  1 },	/* sf: in integer data processing instructions.  */
264    { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
265    { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
266    { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
267    { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
268    { 31,  1 },	/* b5: in the test bit and branch instructions.  */
269    { 19,  5 },	/* b40: in the test bit and branch instructions.  */
270    { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
271    {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
272    { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
273    { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
274    { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
275    {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
276    { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
277    {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
278    { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
279    { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
280    { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
281    {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
282    {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
283    {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
284    { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
285    {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
286    {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
287    {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
288    {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
289    { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
290    {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
291    {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
292    { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
293    {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
294    {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
295    {  5,  1 }, /* SVE_i1: single-bit immediate.  */
296    { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
297    { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
298    { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
299    { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
300    { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
301    { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
302    {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
303    { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
304    { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
305    { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
306    {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
307    {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
308    { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
309    {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
310    { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
311    {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
312    {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
313    { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
314    { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
315    { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
316    { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
317    { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
318    { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
319    { 16,  4 }, /* SVE_tsz: triangular size select.  */
320    { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
321    {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
322    { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
323    { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
324    { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
325    { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
326    { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
327    { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
328    { 12,  2 }, /* SM3: Indexed element SM3 2 bits index immediate.  */
329    { 22,  1 }, /* sz: 1-bit element size select.  */
330};
331
332enum aarch64_operand_class
333aarch64_get_operand_class (enum aarch64_opnd type)
334{
335  return aarch64_operands[type].op_class;
336}
337
338const char *
339aarch64_get_operand_name (enum aarch64_opnd type)
340{
341  return aarch64_operands[type].name;
342}
343
344/* Get operand description string.
345   This is usually for the diagnosis purpose.  */
346const char *
347aarch64_get_operand_desc (enum aarch64_opnd type)
348{
349  return aarch64_operands[type].desc;
350}
351
352/* Table of all conditional affixes.  */
353const aarch64_cond aarch64_conds[16] =
354{
355  {{"eq", "none"}, 0x0},
356  {{"ne", "any"}, 0x1},
357  {{"cs", "hs", "nlast"}, 0x2},
358  {{"cc", "lo", "ul", "last"}, 0x3},
359  {{"mi", "first"}, 0x4},
360  {{"pl", "nfrst"}, 0x5},
361  {{"vs"}, 0x6},
362  {{"vc"}, 0x7},
363  {{"hi", "pmore"}, 0x8},
364  {{"ls", "plast"}, 0x9},
365  {{"ge", "tcont"}, 0xa},
366  {{"lt", "tstop"}, 0xb},
367  {{"gt"}, 0xc},
368  {{"le"}, 0xd},
369  {{"al"}, 0xe},
370  {{"nv"}, 0xf},
371};
372
373const aarch64_cond *
374get_cond_from_value (aarch64_insn value)
375{
376  assert (value < 16);
377  return &aarch64_conds[(unsigned int) value];
378}
379
380const aarch64_cond *
381get_inverted_cond (const aarch64_cond *cond)
382{
383  return &aarch64_conds[cond->value ^ 0x1];
384}
385
386/* Table describing the operand extension/shifting operators; indexed by
387   enum aarch64_modifier_kind.
388
389   The value column provides the most common values for encoding modifiers,
390   which enables table-driven encoding/decoding for the modifiers.  */
391const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
392{
393    {"none", 0x0},
394    {"msl",  0x0},
395    {"ror",  0x3},
396    {"asr",  0x2},
397    {"lsr",  0x1},
398    {"lsl",  0x0},
399    {"uxtb", 0x0},
400    {"uxth", 0x1},
401    {"uxtw", 0x2},
402    {"uxtx", 0x3},
403    {"sxtb", 0x4},
404    {"sxth", 0x5},
405    {"sxtw", 0x6},
406    {"sxtx", 0x7},
407    {"mul", 0x0},
408    {"mul vl", 0x0},
409    {NULL, 0},
410};
411
412enum aarch64_modifier_kind
413aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
414{
415  return desc - aarch64_operand_modifiers;
416}
417
418aarch64_insn
419aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
420{
421  return aarch64_operand_modifiers[kind].value;
422}
423
424enum aarch64_modifier_kind
425aarch64_get_operand_modifier_from_value (aarch64_insn value,
426					 bfd_boolean extend_p)
427{
428  if (extend_p == TRUE)
429    return AARCH64_MOD_UXTB + value;
430  else
431    return AARCH64_MOD_LSL - value;
432}
433
434bfd_boolean
435aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
436{
437  return (kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX)
438    ? TRUE : FALSE;
439}
440
441static inline bfd_boolean
442aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
443{
444  return (kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL)
445    ? TRUE : FALSE;
446}
447
448const struct aarch64_name_value_pair aarch64_barrier_options[16] =
449{
450    { "#0x00", 0x0 },
451    { "oshld", 0x1 },
452    { "oshst", 0x2 },
453    { "osh",   0x3 },
454    { "#0x04", 0x4 },
455    { "nshld", 0x5 },
456    { "nshst", 0x6 },
457    { "nsh",   0x7 },
458    { "#0x08", 0x8 },
459    { "ishld", 0x9 },
460    { "ishst", 0xa },
461    { "ish",   0xb },
462    { "#0x0c", 0xc },
463    { "ld",    0xd },
464    { "st",    0xe },
465    { "sy",    0xf },
466};
467
468/* Table describing the operands supported by the aliases of the HINT
469   instruction.
470
471   The name column is the operand that is accepted for the alias.  The value
472   column is the hint number of the alias.  The list of operands is terminated
473   by NULL in the name column.  */
474
475const struct aarch64_name_value_pair aarch64_hint_options[] =
476{
477  /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
478  { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
479  { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
480  { "c",	HINT_OPD_C },		/* BTI C.  */
481  { "j",	HINT_OPD_J },		/* BTI J.  */
482  { "jc",	HINT_OPD_JC },		/* BTI JC.  */
483  { NULL,	HINT_OPD_NULL },
484};
485
486/* op -> op:       load = 0 instruction = 1 store = 2
487   l  -> level:    1-3
488   t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
489#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
490const struct aarch64_name_value_pair aarch64_prfops[32] =
491{
492  { "pldl1keep", B(0, 1, 0) },
493  { "pldl1strm", B(0, 1, 1) },
494  { "pldl2keep", B(0, 2, 0) },
495  { "pldl2strm", B(0, 2, 1) },
496  { "pldl3keep", B(0, 3, 0) },
497  { "pldl3strm", B(0, 3, 1) },
498  { NULL, 0x06 },
499  { NULL, 0x07 },
500  { "plil1keep", B(1, 1, 0) },
501  { "plil1strm", B(1, 1, 1) },
502  { "plil2keep", B(1, 2, 0) },
503  { "plil2strm", B(1, 2, 1) },
504  { "plil3keep", B(1, 3, 0) },
505  { "plil3strm", B(1, 3, 1) },
506  { NULL, 0x0e },
507  { NULL, 0x0f },
508  { "pstl1keep", B(2, 1, 0) },
509  { "pstl1strm", B(2, 1, 1) },
510  { "pstl2keep", B(2, 2, 0) },
511  { "pstl2strm", B(2, 2, 1) },
512  { "pstl3keep", B(2, 3, 0) },
513  { "pstl3strm", B(2, 3, 1) },
514  { NULL, 0x16 },
515  { NULL, 0x17 },
516  { NULL, 0x18 },
517  { NULL, 0x19 },
518  { NULL, 0x1a },
519  { NULL, 0x1b },
520  { NULL, 0x1c },
521  { NULL, 0x1d },
522  { NULL, 0x1e },
523  { NULL, 0x1f },
524};
525#undef B
526
527/* Utilities on value constraint.  */
528
529static inline int
530value_in_range_p (int64_t value, int low, int high)
531{
532  return (value >= low && value <= high) ? 1 : 0;
533}
534
535/* Return true if VALUE is a multiple of ALIGN.  */
536static inline int
537value_aligned_p (int64_t value, int align)
538{
539  return (value % align) == 0;
540}
541
542/* A signed value fits in a field.  */
543static inline int
544value_fit_signed_field_p (int64_t value, unsigned width)
545{
546  assert (width < 32);
547  if (width < sizeof (value) * 8)
548    {
549      int64_t lim = (uint64_t) 1 << (width - 1);
550      if (value >= -lim && value < lim)
551	return 1;
552    }
553  return 0;
554}
555
556/* An unsigned value fits in a field.  */
557static inline int
558value_fit_unsigned_field_p (int64_t value, unsigned width)
559{
560  assert (width < 32);
561  if (width < sizeof (value) * 8)
562    {
563      int64_t lim = (uint64_t) 1 << width;
564      if (value >= 0 && value < lim)
565	return 1;
566    }
567  return 0;
568}
569
570/* Return 1 if OPERAND is SP or WSP.  */
571int
572aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
573{
574  return ((aarch64_get_operand_class (operand->type)
575	   == AARCH64_OPND_CLASS_INT_REG)
576	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
577	  && operand->reg.regno == 31);
578}
579
580/* Return 1 if OPERAND is XZR or WZP.  */
581int
582aarch64_zero_register_p (const aarch64_opnd_info *operand)
583{
584  return ((aarch64_get_operand_class (operand->type)
585	   == AARCH64_OPND_CLASS_INT_REG)
586	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
587	  && operand->reg.regno == 31);
588}
589
590/* Return true if the operand *OPERAND that has the operand code
591   OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
592   qualified by the qualifier TARGET.  */
593
594static inline int
595operand_also_qualified_p (const struct aarch64_opnd_info *operand,
596			  aarch64_opnd_qualifier_t target)
597{
598  switch (operand->qualifier)
599    {
600    case AARCH64_OPND_QLF_W:
601      if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
602	return 1;
603      break;
604    case AARCH64_OPND_QLF_X:
605      if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
606	return 1;
607      break;
608    case AARCH64_OPND_QLF_WSP:
609      if (target == AARCH64_OPND_QLF_W
610	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
611	return 1;
612      break;
613    case AARCH64_OPND_QLF_SP:
614      if (target == AARCH64_OPND_QLF_X
615	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
616	return 1;
617      break;
618    default:
619      break;
620    }
621
622  return 0;
623}
624
625/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
626   for operand KNOWN_IDX, return the expected qualifier for operand IDX.
627
628   Return NIL if more than one expected qualifiers are found.  */
629
630aarch64_opnd_qualifier_t
631aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
632				int idx,
633				const aarch64_opnd_qualifier_t known_qlf,
634				int known_idx)
635{
636  int i, saved_i;
637
638  /* Special case.
639
640     When the known qualifier is NIL, we have to assume that there is only
641     one qualifier sequence in the *QSEQ_LIST and return the corresponding
642     qualifier directly.  One scenario is that for instruction
643	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
644     which has only one possible valid qualifier sequence
645	NIL, S_D
646     the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
647     determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
648
649     Because the qualifier NIL has dual roles in the qualifier sequence:
650     it can mean no qualifier for the operand, or the qualifer sequence is
651     not in use (when all qualifiers in the sequence are NILs), we have to
652     handle this special case here.  */
653  if (known_qlf == AARCH64_OPND_NIL)
654    {
655      assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
656      return qseq_list[0][idx];
657    }
658
659  for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
660    {
661      if (qseq_list[i][known_idx] == known_qlf)
662	{
663	  if (saved_i != -1)
664	    /* More than one sequences are found to have KNOWN_QLF at
665	       KNOWN_IDX.  */
666	    return AARCH64_OPND_NIL;
667	  saved_i = i;
668	}
669    }
670
671  return qseq_list[saved_i][idx];
672}
673
674enum operand_qualifier_kind
675{
676  OQK_NIL,
677  OQK_OPD_VARIANT,
678  OQK_VALUE_IN_RANGE,
679  OQK_MISC,
680};
681
682/* Operand qualifier description.  */
683struct operand_qualifier_data
684{
685  /* The usage of the three data fields depends on the qualifier kind.  */
686  int data0;
687  int data1;
688  int data2;
689  /* Description.  */
690  const char *desc;
691  /* Kind.  */
692  enum operand_qualifier_kind kind;
693};
694
695/* Indexed by the operand qualifier enumerators.  */
696struct operand_qualifier_data aarch64_opnd_qualifiers[] =
697{
698  {0, 0, 0, "NIL", OQK_NIL},
699
700  /* Operand variant qualifiers.
701     First 3 fields:
702     element size, number of elements and common value for encoding.  */
703
704  {4, 1, 0x0, "w", OQK_OPD_VARIANT},
705  {8, 1, 0x1, "x", OQK_OPD_VARIANT},
706  {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
707  {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
708
709  {1, 1, 0x0, "b", OQK_OPD_VARIANT},
710  {2, 1, 0x1, "h", OQK_OPD_VARIANT},
711  {4, 1, 0x2, "s", OQK_OPD_VARIANT},
712  {8, 1, 0x3, "d", OQK_OPD_VARIANT},
713  {16, 1, 0x4, "q", OQK_OPD_VARIANT},
714  {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
715  {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
716
717  {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
718  {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
719  {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
720  {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
721  {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
722  {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
723  {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
724  {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
725  {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
726  {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
727  {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
728
729  {0, 0, 0, "z", OQK_OPD_VARIANT},
730  {0, 0, 0, "m", OQK_OPD_VARIANT},
731
732  /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
733  {16, 0, 0, "tag", OQK_OPD_VARIANT},
734
735  /* Qualifiers constraining the value range.
736     First 3 fields:
737     Lower bound, higher bound, unused.  */
738
739  {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
740  {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
741  {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
742  {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
743  {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
744  {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
745  {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
746
747  /* Qualifiers for miscellaneous purpose.
748     First 3 fields:
749     unused, unused and unused.  */
750
751  {0, 0, 0, "lsl", 0},
752  {0, 0, 0, "msl", 0},
753
754  {0, 0, 0, "retrieving", 0},
755};
756
757static inline bfd_boolean
758operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
759{
760  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT)
761    ? TRUE : FALSE;
762}
763
764static inline bfd_boolean
765qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
766{
767  return (aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE)
768    ? TRUE : FALSE;
769}
770
771const char*
772aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
773{
774  return aarch64_opnd_qualifiers[qualifier].desc;
775}
776
777/* Given an operand qualifier, return the expected data element size
778   of a qualified operand.  */
779unsigned char
780aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
781{
782  assert (operand_variant_qualifier_p (qualifier) == TRUE);
783  return aarch64_opnd_qualifiers[qualifier].data0;
784}
785
786unsigned char
787aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
788{
789  assert (operand_variant_qualifier_p (qualifier) == TRUE);
790  return aarch64_opnd_qualifiers[qualifier].data1;
791}
792
793aarch64_insn
794aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
795{
796  assert (operand_variant_qualifier_p (qualifier) == TRUE);
797  return aarch64_opnd_qualifiers[qualifier].data2;
798}
799
800static int
801get_lower_bound (aarch64_opnd_qualifier_t qualifier)
802{
803  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
804  return aarch64_opnd_qualifiers[qualifier].data0;
805}
806
807static int
808get_upper_bound (aarch64_opnd_qualifier_t qualifier)
809{
810  assert (qualifier_value_in_range_constraint_p (qualifier) == TRUE);
811  return aarch64_opnd_qualifiers[qualifier].data1;
812}
813
814#ifdef DEBUG_AARCH64
815void
816aarch64_verbose (const char *str, ...)
817{
818  va_list ap;
819  va_start (ap, str);
820  printf ("#### ");
821  vprintf (str, ap);
822  printf ("\n");
823  va_end (ap);
824}
825
826static inline void
827dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
828{
829  int i;
830  printf ("#### \t");
831  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
832    printf ("%s,", aarch64_get_qualifier_name (*qualifier));
833  printf ("\n");
834}
835
836static void
837dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
838		       const aarch64_opnd_qualifier_t *qualifier)
839{
840  int i;
841  aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
842
843  aarch64_verbose ("dump_match_qualifiers:");
844  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
845    curr[i] = opnd[i].qualifier;
846  dump_qualifier_sequence (curr);
847  aarch64_verbose ("against");
848  dump_qualifier_sequence (qualifier);
849}
850#endif /* DEBUG_AARCH64 */
851
852/* This function checks if the given instruction INSN is a destructive
853   instruction based on the usage of the registers.  It does not recognize
854   unary destructive instructions.  */
855bfd_boolean
856aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
857{
858  int i = 0;
859  const enum aarch64_opnd *opnds = opcode->operands;
860
861  if (opnds[0] == AARCH64_OPND_NIL)
862    return FALSE;
863
864  while (opnds[++i] != AARCH64_OPND_NIL)
865    if (opnds[i] == opnds[0])
866      return TRUE;
867
868  return FALSE;
869}
870
871/* TODO improve this, we can have an extra field at the runtime to
872   store the number of operands rather than calculating it every time.  */
873
874int
875aarch64_num_of_operands (const aarch64_opcode *opcode)
876{
877  int i = 0;
878  const enum aarch64_opnd *opnds = opcode->operands;
879  while (opnds[i++] != AARCH64_OPND_NIL)
880    ;
881  --i;
882  assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
883  return i;
884}
885
886/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
887   If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
888
889   N.B. on the entry, it is very likely that only some operands in *INST
890   have had their qualifiers been established.
891
892   If STOP_AT is not -1, the function will only try to match
893   the qualifier sequence for operands before and including the operand
894   of index STOP_AT; and on success *RET will only be filled with the first
895   (STOP_AT+1) qualifiers.
896
897   A couple examples of the matching algorithm:
898
899   X,W,NIL should match
900   X,W,NIL
901
902   NIL,NIL should match
903   X  ,NIL
904
905   Apart from serving the main encoding routine, this can also be called
906   during or after the operand decoding.  */
907
908int
909aarch64_find_best_match (const aarch64_inst *inst,
910			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
911			 int stop_at, aarch64_opnd_qualifier_t *ret)
912{
913  int found = 0;
914  int i, num_opnds;
915  const aarch64_opnd_qualifier_t *qualifiers;
916
917  num_opnds = aarch64_num_of_operands (inst->opcode);
918  if (num_opnds == 0)
919    {
920      DEBUG_TRACE ("SUCCEED: no operand");
921      return 1;
922    }
923
924  if (stop_at < 0 || stop_at >= num_opnds)
925    stop_at = num_opnds - 1;
926
927  /* For each pattern.  */
928  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
929    {
930      int j;
931      qualifiers = *qualifiers_list;
932
933      /* Start as positive.  */
934      found = 1;
935
936      DEBUG_TRACE ("%d", i);
937#ifdef DEBUG_AARCH64
938      if (debug_dump)
939	dump_match_qualifiers (inst->operands, qualifiers);
940#endif
941
942      /* Most opcodes has much fewer patterns in the list.
943	 First NIL qualifier indicates the end in the list.   */
944      if (empty_qualifier_sequence_p (qualifiers) == TRUE)
945	{
946	  DEBUG_TRACE_IF (i == 0, "SUCCEED: empty qualifier list");
947	  if (i)
948	    found = 0;
949	  break;
950	}
951
952      for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
953	{
954	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL)
955	    {
956	      /* Either the operand does not have qualifier, or the qualifier
957		 for the operand needs to be deduced from the qualifier
958		 sequence.
959		 In the latter case, any constraint checking related with
960		 the obtained qualifier should be done later in
961		 operand_general_constraint_met_p.  */
962	      continue;
963	    }
964	  else if (*qualifiers != inst->operands[j].qualifier)
965	    {
966	      /* Unless the target qualifier can also qualify the operand
967		 (which has already had a non-nil qualifier), non-equal
968		 qualifiers are generally un-matched.  */
969	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
970		continue;
971	      else
972		{
973		  found = 0;
974		  break;
975		}
976	    }
977	  else
978	    continue;	/* Equal qualifiers are certainly matched.  */
979	}
980
981      /* Qualifiers established.  */
982      if (found == 1)
983	break;
984    }
985
986  if (found == 1)
987    {
988      /* Fill the result in *RET.  */
989      int j;
990      qualifiers = *qualifiers_list;
991
992      DEBUG_TRACE ("complete qualifiers using list %d", i);
993#ifdef DEBUG_AARCH64
994      if (debug_dump)
995	dump_qualifier_sequence (qualifiers);
996#endif
997
998      for (j = 0; j <= stop_at; ++j, ++qualifiers)
999	ret[j] = *qualifiers;
1000      for (; j < AARCH64_MAX_OPND_NUM; ++j)
1001	ret[j] = AARCH64_OPND_QLF_NIL;
1002
1003      DEBUG_TRACE ("SUCCESS");
1004      return 1;
1005    }
1006
1007  DEBUG_TRACE ("FAIL");
1008  return 0;
1009}
1010
1011/* Operand qualifier matching and resolving.
1012
1013   Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1014   sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1015
1016   if UPDATE_P == TRUE, update the qualifier(s) in *INST after the matching
1017   succeeds.  */
1018
1019static int
1020match_operands_qualifier (aarch64_inst *inst, bfd_boolean update_p)
1021{
1022  int i, nops;
1023  aarch64_opnd_qualifier_seq_t qualifiers;
1024
1025  if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1026			       qualifiers))
1027    {
1028      DEBUG_TRACE ("matching FAIL");
1029      return 0;
1030    }
1031
1032  if (inst->opcode->flags & F_STRICT)
1033    {
1034      /* Require an exact qualifier match, even for NIL qualifiers.  */
1035      nops = aarch64_num_of_operands (inst->opcode);
1036      for (i = 0; i < nops; ++i)
1037	if (inst->operands[i].qualifier != qualifiers[i])
1038	  return FALSE;
1039    }
1040
1041  /* Update the qualifiers.  */
1042  if (update_p == TRUE)
1043    for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1044      {
1045	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1046	  break;
1047	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1048			"update %s with %s for operand %d",
1049			aarch64_get_qualifier_name (inst->operands[i].qualifier),
1050			aarch64_get_qualifier_name (qualifiers[i]), i);
1051	inst->operands[i].qualifier = qualifiers[i];
1052      }
1053
1054  DEBUG_TRACE ("matching SUCCESS");
1055  return 1;
1056}
1057
1058/* Return TRUE if VALUE is a wide constant that can be moved into a general
1059   register by MOVZ.
1060
1061   IS32 indicates whether value is a 32-bit immediate or not.
1062   If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1063   amount will be returned in *SHIFT_AMOUNT.  */
1064
1065bfd_boolean
1066aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1067{
1068  int amount;
1069
1070  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1071
1072  if (is32)
1073    {
1074      /* Allow all zeros or all ones in top 32-bits, so that
1075	 32-bit constant expressions like ~0x80000000 are
1076	 permitted.  */
1077      if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1078	/* Immediate out of range.  */
1079	return FALSE;
1080      value &= 0xffffffff;
1081    }
1082
1083  /* first, try movz then movn */
1084  amount = -1;
1085  if ((value & ((uint64_t) 0xffff << 0)) == value)
1086    amount = 0;
1087  else if ((value & ((uint64_t) 0xffff << 16)) == value)
1088    amount = 16;
1089  else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1090    amount = 32;
1091  else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1092    amount = 48;
1093
1094  if (amount == -1)
1095    {
1096      DEBUG_TRACE ("exit FALSE with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1097      return FALSE;
1098    }
1099
1100  if (shift_amount != NULL)
1101    *shift_amount = amount;
1102
1103  DEBUG_TRACE ("exit TRUE with amount %d", amount);
1104
1105  return TRUE;
1106}
1107
1108/* Build the accepted values for immediate logical SIMD instructions.
1109
1110   The standard encodings of the immediate value are:
1111     N      imms     immr         SIMD size  R             S
1112     1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
1113     0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
1114     0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
1115     0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
1116     0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
1117     0      11110s   00000r       2       UInt(r)       UInt(s)
1118   where all-ones value of S is reserved.
1119
1120   Let's call E the SIMD size.
1121
1122   The immediate value is: S+1 bits '1' rotated to the right by R.
1123
1124   The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1125   (remember S != E - 1).  */
1126
1127#define TOTAL_IMM_NB  5334
1128
1129typedef struct
1130{
1131  uint64_t imm;
1132  aarch64_insn encoding;
1133} simd_imm_encoding;
1134
1135static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1136
1137static int
1138simd_imm_encoding_cmp(const void *i1, const void *i2)
1139{
1140  const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1141  const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1142
1143  if (imm1->imm < imm2->imm)
1144    return -1;
1145  if (imm1->imm > imm2->imm)
1146    return +1;
1147  return 0;
1148}
1149
1150/* immediate bitfield standard encoding
1151   imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
1152   1         ssssss     rrrrrr      64        rrrrrr ssssss
1153   0         0sssss     0rrrrr      32        rrrrr  sssss
1154   0         10ssss     00rrrr      16        rrrr   ssss
1155   0         110sss     000rrr      8         rrr    sss
1156   0         1110ss     0000rr      4         rr     ss
1157   0         11110s     00000r      2         r      s  */
1158static inline int
1159encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1160{
1161  return (is64 << 12) | (r << 6) | s;
1162}
1163
1164static void
1165build_immediate_table (void)
1166{
1167  uint32_t log_e, e, s, r, s_mask;
1168  uint64_t mask, imm;
1169  int nb_imms;
1170  int is64;
1171
1172  nb_imms = 0;
1173  for (log_e = 1; log_e <= 6; log_e++)
1174    {
1175      /* Get element size.  */
1176      e = 1u << log_e;
1177      if (log_e == 6)
1178	{
1179	  is64 = 1;
1180	  mask = 0xffffffffffffffffull;
1181	  s_mask = 0;
1182	}
1183      else
1184	{
1185	  is64 = 0;
1186	  mask = (1ull << e) - 1;
1187	  /* log_e  s_mask
1188	     1     ((1 << 4) - 1) << 2 = 111100
1189	     2     ((1 << 3) - 1) << 3 = 111000
1190	     3     ((1 << 2) - 1) << 4 = 110000
1191	     4     ((1 << 1) - 1) << 5 = 100000
1192	     5     ((1 << 0) - 1) << 6 = 000000  */
1193	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1194	}
1195      for (s = 0; s < e - 1; s++)
1196	for (r = 0; r < e; r++)
1197	  {
1198	    /* s+1 consecutive bits to 1 (s < 63) */
1199	    imm = (1ull << (s + 1)) - 1;
1200	    /* rotate right by r */
1201	    if (r != 0)
1202	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1203	    /* replicate the constant depending on SIMD size */
1204	    switch (log_e)
1205	      {
1206	      case 1: imm = (imm <<  2) | imm;
1207		/* Fall through.  */
1208	      case 2: imm = (imm <<  4) | imm;
1209		/* Fall through.  */
1210	      case 3: imm = (imm <<  8) | imm;
1211		/* Fall through.  */
1212	      case 4: imm = (imm << 16) | imm;
1213		/* Fall through.  */
1214	      case 5: imm = (imm << 32) | imm;
1215		/* Fall through.  */
1216	      case 6: break;
1217	      default: abort ();
1218	      }
1219	    simd_immediates[nb_imms].imm = imm;
1220	    simd_immediates[nb_imms].encoding =
1221	      encode_immediate_bitfield(is64, s | s_mask, r);
1222	    nb_imms++;
1223	  }
1224    }
1225  assert (nb_imms == TOTAL_IMM_NB);
1226  qsort(simd_immediates, nb_imms,
1227	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1228}
1229
1230/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1231   be accepted by logical (immediate) instructions
1232   e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1233
1234   ESIZE is the number of bytes in the decoded immediate value.
1235   If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1236   VALUE will be returned in *ENCODING.  */
1237
1238bfd_boolean
1239aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1240{
1241  simd_imm_encoding imm_enc;
1242  const simd_imm_encoding *imm_encoding;
1243  static bfd_boolean initialized = FALSE;
1244  uint64_t upper;
1245  int i;
1246
1247  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1248	       value, esize);
1249
1250  if (!initialized)
1251    {
1252      build_immediate_table ();
1253      initialized = TRUE;
1254    }
1255
1256  /* Allow all zeros or all ones in top bits, so that
1257     constant expressions like ~1 are permitted.  */
1258  upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1259  if ((value & ~upper) != value && (value | upper) != value)
1260    return FALSE;
1261
1262  /* Replicate to a full 64-bit value.  */
1263  value &= ~upper;
1264  for (i = esize * 8; i < 64; i *= 2)
1265    value |= (value << i);
1266
1267  imm_enc.imm = value;
1268  imm_encoding = (const simd_imm_encoding *)
1269    bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1270            sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1271  if (imm_encoding == NULL)
1272    {
1273      DEBUG_TRACE ("exit with FALSE");
1274      return FALSE;
1275    }
1276  if (encoding != NULL)
1277    *encoding = imm_encoding->encoding;
1278  DEBUG_TRACE ("exit with TRUE");
1279  return TRUE;
1280}
1281
1282/* If 64-bit immediate IMM is in the format of
1283   "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1284   where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1285   of value "abcdefgh".  Otherwise return -1.  */
1286int
1287aarch64_shrink_expanded_imm8 (uint64_t imm)
1288{
1289  int i, ret;
1290  uint32_t byte;
1291
1292  ret = 0;
1293  for (i = 0; i < 8; i++)
1294    {
1295      byte = (imm >> (8 * i)) & 0xff;
1296      if (byte == 0xff)
1297	ret |= 1 << i;
1298      else if (byte != 0x00)
1299	return -1;
1300    }
1301  return ret;
1302}
1303
1304/* Utility inline functions for operand_general_constraint_met_p.  */
1305
1306static inline void
1307set_error (aarch64_operand_error *mismatch_detail,
1308	   enum aarch64_operand_error_kind kind, int idx,
1309	   const char* error)
1310{
1311  if (mismatch_detail == NULL)
1312    return;
1313  mismatch_detail->kind = kind;
1314  mismatch_detail->index = idx;
1315  mismatch_detail->error = error;
1316}
1317
1318static inline void
1319set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1320		  const char* error)
1321{
1322  if (mismatch_detail == NULL)
1323    return;
1324  set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1325}
1326
1327static inline void
1328set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1329			int idx, int lower_bound, int upper_bound,
1330			const char* error)
1331{
1332  if (mismatch_detail == NULL)
1333    return;
1334  set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1335  mismatch_detail->data[0] = lower_bound;
1336  mismatch_detail->data[1] = upper_bound;
1337}
1338
1339static inline void
1340set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1341			    int idx, int lower_bound, int upper_bound)
1342{
1343  if (mismatch_detail == NULL)
1344    return;
1345  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1346			  _("immediate value"));
1347}
1348
1349static inline void
1350set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1351			       int idx, int lower_bound, int upper_bound)
1352{
1353  if (mismatch_detail == NULL)
1354    return;
1355  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1356			  _("immediate offset"));
1357}
1358
1359static inline void
1360set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1361			      int idx, int lower_bound, int upper_bound)
1362{
1363  if (mismatch_detail == NULL)
1364    return;
1365  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1366			  _("register number"));
1367}
1368
1369static inline void
1370set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1371				 int idx, int lower_bound, int upper_bound)
1372{
1373  if (mismatch_detail == NULL)
1374    return;
1375  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1376			  _("register element index"));
1377}
1378
1379static inline void
1380set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1381				   int idx, int lower_bound, int upper_bound)
1382{
1383  if (mismatch_detail == NULL)
1384    return;
1385  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1386			  _("shift amount"));
1387}
1388
1389/* Report that the MUL modifier in operand IDX should be in the range
1390   [LOWER_BOUND, UPPER_BOUND].  */
1391static inline void
1392set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1393				   int idx, int lower_bound, int upper_bound)
1394{
1395  if (mismatch_detail == NULL)
1396    return;
1397  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1398			  _("multiplier"));
1399}
1400
1401static inline void
1402set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1403		     int alignment)
1404{
1405  if (mismatch_detail == NULL)
1406    return;
1407  set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1408  mismatch_detail->data[0] = alignment;
1409}
1410
1411static inline void
1412set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1413		    int expected_num)
1414{
1415  if (mismatch_detail == NULL)
1416    return;
1417  set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1418  mismatch_detail->data[0] = expected_num;
1419}
1420
1421static inline void
1422set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1423		 const char* error)
1424{
1425  if (mismatch_detail == NULL)
1426    return;
1427  set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1428}
1429
1430/* General constraint checking based on operand code.
1431
1432   Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1433   as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1434
1435   This function has to be called after the qualifiers for all operands
1436   have been resolved.
1437
1438   Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1439   i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1440   of error message during the disassembling where error message is not
1441   wanted.  We avoid the dynamic construction of strings of error messages
1442   here (i.e. in libopcodes), as it is costly and complicated; instead, we
1443   use a combination of error code, static string and some integer data to
1444   represent an error.  */
1445
1446static int
1447operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1448				  enum aarch64_opnd type,
1449				  const aarch64_opcode *opcode,
1450				  aarch64_operand_error *mismatch_detail)
1451{
1452  unsigned num, modifiers, shift;
1453  unsigned char size;
1454  int64_t imm, min_value, max_value;
1455  uint64_t uvalue, mask;
1456  const aarch64_opnd_info *opnd = opnds + idx;
1457  aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1458
1459  assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1460
1461  switch (aarch64_operands[type].op_class)
1462    {
1463    case AARCH64_OPND_CLASS_INT_REG:
1464      /* Check pair reg constraints for cas* instructions.  */
1465      if (type == AARCH64_OPND_PAIRREG)
1466	{
1467	  assert (idx == 1 || idx == 3);
1468	  if (opnds[idx - 1].reg.regno % 2 != 0)
1469	    {
1470	      set_syntax_error (mismatch_detail, idx - 1,
1471				_("reg pair must start from even reg"));
1472	      return 0;
1473	    }
1474	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1475	    {
1476	      set_syntax_error (mismatch_detail, idx,
1477				_("reg pair must be contiguous"));
1478	      return 0;
1479	    }
1480	  break;
1481	}
1482
1483      /* <Xt> may be optional in some IC and TLBI instructions.  */
1484      if (type == AARCH64_OPND_Rt_SYS)
1485	{
1486	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1487			       == AARCH64_OPND_CLASS_SYSTEM));
1488	  if (opnds[1].present
1489	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1490	    {
1491	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1492	      return 0;
1493	    }
1494	  if (!opnds[1].present
1495	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1496	    {
1497	      set_other_error (mismatch_detail, idx, _("missing register"));
1498	      return 0;
1499	    }
1500	}
1501      switch (qualifier)
1502	{
1503	case AARCH64_OPND_QLF_WSP:
1504	case AARCH64_OPND_QLF_SP:
1505	  if (!aarch64_stack_pointer_p (opnd))
1506	    {
1507	      set_other_error (mismatch_detail, idx,
1508			       _("stack pointer register expected"));
1509	      return 0;
1510	    }
1511	  break;
1512	default:
1513	  break;
1514	}
1515      break;
1516
1517    case AARCH64_OPND_CLASS_SVE_REG:
1518      switch (type)
1519	{
1520	case AARCH64_OPND_SVE_Zm3_INDEX:
1521	case AARCH64_OPND_SVE_Zm3_22_INDEX:
1522	case AARCH64_OPND_SVE_Zm3_11_INDEX:
1523	case AARCH64_OPND_SVE_Zm4_11_INDEX:
1524	case AARCH64_OPND_SVE_Zm4_INDEX:
1525	  size = get_operand_fields_width (get_operand_from_code (type));
1526	  shift = get_operand_specific_data (&aarch64_operands[type]);
1527	  mask = (1 << shift) - 1;
1528	  if (opnd->reg.regno > mask)
1529	    {
1530	      assert (mask == 7 || mask == 15);
1531	      set_other_error (mismatch_detail, idx,
1532			       mask == 15
1533			       ? _("z0-z15 expected")
1534			       : _("z0-z7 expected"));
1535	      return 0;
1536	    }
1537	  mask = (1u << (size - shift)) - 1;
1538	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
1539	    {
1540	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1541	      return 0;
1542	    }
1543	  break;
1544
1545	case AARCH64_OPND_SVE_Zn_INDEX:
1546	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1547	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1548	    {
1549	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
1550					       0, 64 / size - 1);
1551	      return 0;
1552	    }
1553	  break;
1554
1555	case AARCH64_OPND_SVE_ZnxN:
1556	case AARCH64_OPND_SVE_ZtxN:
1557	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1558	    {
1559	      set_other_error (mismatch_detail, idx,
1560			       _("invalid register list"));
1561	      return 0;
1562	    }
1563	  break;
1564
1565	default:
1566	  break;
1567	}
1568      break;
1569
1570    case AARCH64_OPND_CLASS_PRED_REG:
1571      if (opnd->reg.regno >= 8
1572	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
1573	{
1574	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1575	  return 0;
1576	}
1577      break;
1578
1579    case AARCH64_OPND_CLASS_COND:
1580      if (type == AARCH64_OPND_COND1
1581	  && (opnds[idx].cond->value & 0xe) == 0xe)
1582	{
1583	  /* Not allow AL or NV.  */
1584	  set_syntax_error (mismatch_detail, idx, NULL);
1585	}
1586      break;
1587
1588    case AARCH64_OPND_CLASS_ADDRESS:
1589      /* Check writeback.  */
1590      switch (opcode->iclass)
1591	{
1592	case ldst_pos:
1593	case ldst_unscaled:
1594	case ldstnapair_offs:
1595	case ldstpair_off:
1596	case ldst_unpriv:
1597	  if (opnd->addr.writeback == 1)
1598	    {
1599	      set_syntax_error (mismatch_detail, idx,
1600				_("unexpected address writeback"));
1601	      return 0;
1602	    }
1603	  break;
1604	case ldst_imm10:
1605	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1606	    {
1607	      set_syntax_error (mismatch_detail, idx,
1608				_("unexpected address writeback"));
1609	      return 0;
1610	    }
1611	  break;
1612	case ldst_imm9:
1613	case ldstpair_indexed:
1614	case asisdlsep:
1615	case asisdlsop:
1616	  if (opnd->addr.writeback == 0)
1617	    {
1618	      set_syntax_error (mismatch_detail, idx,
1619				_("address writeback expected"));
1620	      return 0;
1621	    }
1622	  break;
1623	default:
1624	  assert (opnd->addr.writeback == 0);
1625	  break;
1626	}
1627      switch (type)
1628	{
1629	case AARCH64_OPND_ADDR_SIMM7:
1630	  /* Scaled signed 7 bits immediate offset.  */
1631	  /* Get the size of the data element that is accessed, which may be
1632	     different from that of the source register size,
1633	     e.g. in strb/ldrb.  */
1634	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1635	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1636	    {
1637	      set_offset_out_of_range_error (mismatch_detail, idx,
1638					     -64 * size, 63 * size);
1639	      return 0;
1640	    }
1641	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1642	    {
1643	      set_unaligned_error (mismatch_detail, idx, size);
1644	      return 0;
1645	    }
1646	  break;
1647	case AARCH64_OPND_ADDR_OFFSET:
1648	case AARCH64_OPND_ADDR_SIMM9:
1649	  /* Unscaled signed 9 bits immediate offset.  */
1650	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1651	    {
1652	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1653	      return 0;
1654	    }
1655	  break;
1656
1657	case AARCH64_OPND_ADDR_SIMM9_2:
1658	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1659	     or unaligned.  */
1660	  size = aarch64_get_qualifier_esize (qualifier);
1661	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1662	       && !value_aligned_p (opnd->addr.offset.imm, size))
1663	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1664	    return 1;
1665	  set_other_error (mismatch_detail, idx,
1666			   _("negative or unaligned offset expected"));
1667	  return 0;
1668
1669	case AARCH64_OPND_ADDR_SIMM10:
1670	  /* Scaled signed 10 bits immediate offset.  */
1671	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1672	    {
1673	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1674	      return 0;
1675	    }
1676	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
1677	    {
1678	      set_unaligned_error (mismatch_detail, idx, 8);
1679	      return 0;
1680	    }
1681	  break;
1682
1683	case AARCH64_OPND_ADDR_SIMM11:
1684	  /* Signed 11 bits immediate offset (multiple of 16).  */
1685	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1686	    {
1687	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1688	      return 0;
1689	    }
1690
1691	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1692	    {
1693	      set_unaligned_error (mismatch_detail, idx, 16);
1694	      return 0;
1695	    }
1696	  break;
1697
1698	case AARCH64_OPND_ADDR_SIMM13:
1699	  /* Signed 13 bits immediate offset (multiple of 16).  */
1700	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1701	    {
1702	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1703	      return 0;
1704	    }
1705
1706	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1707	    {
1708	      set_unaligned_error (mismatch_detail, idx, 16);
1709	      return 0;
1710	    }
1711	  break;
1712
1713	case AARCH64_OPND_SIMD_ADDR_POST:
1714	  /* AdvSIMD load/store multiple structures, post-index.  */
1715	  assert (idx == 1);
1716	  if (opnd->addr.offset.is_reg)
1717	    {
1718	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1719		return 1;
1720	      else
1721		{
1722		  set_other_error (mismatch_detail, idx,
1723				   _("invalid register offset"));
1724		  return 0;
1725		}
1726	    }
1727	  else
1728	    {
1729	      const aarch64_opnd_info *prev = &opnds[idx-1];
1730	      unsigned num_bytes; /* total number of bytes transferred.  */
1731	      /* The opcode dependent area stores the number of elements in
1732		 each structure to be loaded/stored.  */
1733	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1734	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1735		/* Special handling of loading single structure to all lane.  */
1736		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1737		  * aarch64_get_qualifier_esize (prev->qualifier);
1738	      else
1739		num_bytes = prev->reglist.num_regs
1740		  * aarch64_get_qualifier_esize (prev->qualifier)
1741		  * aarch64_get_qualifier_nelem (prev->qualifier);
1742	      if ((int) num_bytes != opnd->addr.offset.imm)
1743		{
1744		  set_other_error (mismatch_detail, idx,
1745				   _("invalid post-increment amount"));
1746		  return 0;
1747		}
1748	    }
1749	  break;
1750
1751	case AARCH64_OPND_ADDR_REGOFF:
1752	  /* Get the size of the data element that is accessed, which may be
1753	     different from that of the source register size,
1754	     e.g. in strb/ldrb.  */
1755	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1756	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1757	  if (opnd->shifter.amount != 0
1758	      && opnd->shifter.amount != (int)get_logsz (size))
1759	    {
1760	      set_other_error (mismatch_detail, idx,
1761			       _("invalid shift amount"));
1762	      return 0;
1763	    }
1764	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1765	     operators.  */
1766	  switch (opnd->shifter.kind)
1767	    {
1768	    case AARCH64_MOD_UXTW:
1769	    case AARCH64_MOD_LSL:
1770	    case AARCH64_MOD_SXTW:
1771	    case AARCH64_MOD_SXTX: break;
1772	    default:
1773	      set_other_error (mismatch_detail, idx,
1774			       _("invalid extend/shift operator"));
1775	      return 0;
1776	    }
1777	  break;
1778
1779	case AARCH64_OPND_ADDR_UIMM12:
1780	  imm = opnd->addr.offset.imm;
1781	  /* Get the size of the data element that is accessed, which may be
1782	     different from that of the source register size,
1783	     e.g. in strb/ldrb.  */
1784	  size = aarch64_get_qualifier_esize (qualifier);
1785	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1786	    {
1787	      set_offset_out_of_range_error (mismatch_detail, idx,
1788					     0, 4095 * size);
1789	      return 0;
1790	    }
1791	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1792	    {
1793	      set_unaligned_error (mismatch_detail, idx, size);
1794	      return 0;
1795	    }
1796	  break;
1797
1798	case AARCH64_OPND_ADDR_PCREL14:
1799	case AARCH64_OPND_ADDR_PCREL19:
1800	case AARCH64_OPND_ADDR_PCREL21:
1801	case AARCH64_OPND_ADDR_PCREL26:
1802	  imm = opnd->imm.value;
1803	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1804	    {
1805	      /* The offset value in a PC-relative branch instruction is alway
1806		 4-byte aligned and is encoded without the lowest 2 bits.  */
1807	      if (!value_aligned_p (imm, 4))
1808		{
1809		  set_unaligned_error (mismatch_detail, idx, 4);
1810		  return 0;
1811		}
1812	      /* Right shift by 2 so that we can carry out the following check
1813		 canonically.  */
1814	      imm >>= 2;
1815	    }
1816	  size = get_operand_fields_width (get_operand_from_code (type));
1817	  if (!value_fit_signed_field_p (imm, size))
1818	    {
1819	      set_other_error (mismatch_detail, idx,
1820			       _("immediate out of range"));
1821	      return 0;
1822	    }
1823	  break;
1824
1825	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1826	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1827	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1828	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1829	  min_value = -8;
1830	  max_value = 7;
1831	sve_imm_offset_vl:
1832	  assert (!opnd->addr.offset.is_reg);
1833	  assert (opnd->addr.preind);
1834	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1835	  min_value *= num;
1836	  max_value *= num;
1837	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1838	      || (opnd->shifter.operator_present
1839		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1840	    {
1841	      set_other_error (mismatch_detail, idx,
1842			       _("invalid addressing mode"));
1843	      return 0;
1844	    }
1845	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1846	    {
1847	      set_offset_out_of_range_error (mismatch_detail, idx,
1848					     min_value, max_value);
1849	      return 0;
1850	    }
1851	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1852	    {
1853	      set_unaligned_error (mismatch_detail, idx, num);
1854	      return 0;
1855	    }
1856	  break;
1857
1858	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1859	  min_value = -32;
1860	  max_value = 31;
1861	  goto sve_imm_offset_vl;
1862
1863	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1864	  min_value = -256;
1865	  max_value = 255;
1866	  goto sve_imm_offset_vl;
1867
1868	case AARCH64_OPND_SVE_ADDR_RI_U6:
1869	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1870	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1871	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1872	  min_value = 0;
1873	  max_value = 63;
1874	sve_imm_offset:
1875	  assert (!opnd->addr.offset.is_reg);
1876	  assert (opnd->addr.preind);
1877	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1878	  min_value *= num;
1879	  max_value *= num;
1880	  if (opnd->shifter.operator_present
1881	      || opnd->shifter.amount_present)
1882	    {
1883	      set_other_error (mismatch_detail, idx,
1884			       _("invalid addressing mode"));
1885	      return 0;
1886	    }
1887	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1888	    {
1889	      set_offset_out_of_range_error (mismatch_detail, idx,
1890					     min_value, max_value);
1891	      return 0;
1892	    }
1893	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1894	    {
1895	      set_unaligned_error (mismatch_detail, idx, num);
1896	      return 0;
1897	    }
1898	  break;
1899
1900	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1901	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1902	  min_value = -8;
1903	  max_value = 7;
1904	  goto sve_imm_offset;
1905
1906	case AARCH64_OPND_SVE_ADDR_ZX:
1907	  /* Everything is already ensured by parse_operands or
1908	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1909	     argument type).  */
1910	  assert (opnd->addr.offset.is_reg);
1911	  assert (opnd->addr.preind);
1912	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1913	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1914	  assert (opnd->shifter.operator_present == 0);
1915	  break;
1916
1917	case AARCH64_OPND_SVE_ADDR_R:
1918	case AARCH64_OPND_SVE_ADDR_RR:
1919	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1920	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1921	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1922	case AARCH64_OPND_SVE_ADDR_RX:
1923	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1924	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1925	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1926	case AARCH64_OPND_SVE_ADDR_RZ:
1927	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1928	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1929	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1930	  modifiers = 1 << AARCH64_MOD_LSL;
1931	sve_rr_operand:
1932	  assert (opnd->addr.offset.is_reg);
1933	  assert (opnd->addr.preind);
1934	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1935	      && opnd->addr.offset.regno == 31)
1936	    {
1937	      set_other_error (mismatch_detail, idx,
1938			       _("index register xzr is not allowed"));
1939	      return 0;
1940	    }
1941	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1942	      || (opnd->shifter.amount
1943		  != get_operand_specific_data (&aarch64_operands[type])))
1944	    {
1945	      set_other_error (mismatch_detail, idx,
1946			       _("invalid addressing mode"));
1947	      return 0;
1948	    }
1949	  break;
1950
1951	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1952	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1953	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1954	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1955	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1956	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1957	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1958	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1959	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1960	  goto sve_rr_operand;
1961
1962	case AARCH64_OPND_SVE_ADDR_ZI_U5:
1963	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1964	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1965	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1966	  min_value = 0;
1967	  max_value = 31;
1968	  goto sve_imm_offset;
1969
1970	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1971	  modifiers = 1 << AARCH64_MOD_LSL;
1972	sve_zz_operand:
1973	  assert (opnd->addr.offset.is_reg);
1974	  assert (opnd->addr.preind);
1975	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1976	      || opnd->shifter.amount < 0
1977	      || opnd->shifter.amount > 3)
1978	    {
1979	      set_other_error (mismatch_detail, idx,
1980			       _("invalid addressing mode"));
1981	      return 0;
1982	    }
1983	  break;
1984
1985	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
1986	  modifiers = (1 << AARCH64_MOD_SXTW);
1987	  goto sve_zz_operand;
1988
1989	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
1990	  modifiers = 1 << AARCH64_MOD_UXTW;
1991	  goto sve_zz_operand;
1992
1993	default:
1994	  break;
1995	}
1996      break;
1997
1998    case AARCH64_OPND_CLASS_SIMD_REGLIST:
1999      if (type == AARCH64_OPND_LEt)
2000	{
2001	  /* Get the upper bound for the element index.  */
2002	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2003	  if (!value_in_range_p (opnd->reglist.index, 0, num))
2004	    {
2005	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2006	      return 0;
2007	    }
2008	}
2009      /* The opcode dependent area stores the number of elements in
2010	 each structure to be loaded/stored.  */
2011      num = get_opcode_dependent_value (opcode);
2012      switch (type)
2013	{
2014	case AARCH64_OPND_LVt:
2015	  assert (num >= 1 && num <= 4);
2016	  /* Unless LD1/ST1, the number of registers should be equal to that
2017	     of the structure elements.  */
2018	  if (num != 1 && opnd->reglist.num_regs != num)
2019	    {
2020	      set_reg_list_error (mismatch_detail, idx, num);
2021	      return 0;
2022	    }
2023	  break;
2024	case AARCH64_OPND_LVt_AL:
2025	case AARCH64_OPND_LEt:
2026	  assert (num >= 1 && num <= 4);
2027	  /* The number of registers should be equal to that of the structure
2028	     elements.  */
2029	  if (opnd->reglist.num_regs != num)
2030	    {
2031	      set_reg_list_error (mismatch_detail, idx, num);
2032	      return 0;
2033	    }
2034	  break;
2035	default:
2036	  break;
2037	}
2038      break;
2039
2040    case AARCH64_OPND_CLASS_IMMEDIATE:
2041      /* Constraint check on immediate operand.  */
2042      imm = opnd->imm.value;
2043      /* E.g. imm_0_31 constrains value to be 0..31.  */
2044      if (qualifier_value_in_range_constraint_p (qualifier)
2045	  && !value_in_range_p (imm, get_lower_bound (qualifier),
2046				get_upper_bound (qualifier)))
2047	{
2048	  set_imm_out_of_range_error (mismatch_detail, idx,
2049				      get_lower_bound (qualifier),
2050				      get_upper_bound (qualifier));
2051	  return 0;
2052	}
2053
2054      switch (type)
2055	{
2056	case AARCH64_OPND_AIMM:
2057	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2058	    {
2059	      set_other_error (mismatch_detail, idx,
2060			       _("invalid shift operator"));
2061	      return 0;
2062	    }
2063	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2064	    {
2065	      set_other_error (mismatch_detail, idx,
2066			       _("shift amount must be 0 or 12"));
2067	      return 0;
2068	    }
2069	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2070	    {
2071	      set_other_error (mismatch_detail, idx,
2072			       _("immediate out of range"));
2073	      return 0;
2074	    }
2075	  break;
2076
2077	case AARCH64_OPND_HALF:
2078	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2079	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2080	    {
2081	      set_other_error (mismatch_detail, idx,
2082			       _("invalid shift operator"));
2083	      return 0;
2084	    }
2085	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2086	  if (!value_aligned_p (opnd->shifter.amount, 16))
2087	    {
2088	      set_other_error (mismatch_detail, idx,
2089			       _("shift amount must be a multiple of 16"));
2090	      return 0;
2091	    }
2092	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2093	    {
2094	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
2095						 0, size * 8 - 16);
2096	      return 0;
2097	    }
2098	  if (opnd->imm.value < 0)
2099	    {
2100	      set_other_error (mismatch_detail, idx,
2101			       _("negative immediate value not allowed"));
2102	      return 0;
2103	    }
2104	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2105	    {
2106	      set_other_error (mismatch_detail, idx,
2107			       _("immediate out of range"));
2108	      return 0;
2109	    }
2110	  break;
2111
2112	case AARCH64_OPND_IMM_MOV:
2113	    {
2114	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2115	      imm = opnd->imm.value;
2116	      assert (idx == 1);
2117	      switch (opcode->op)
2118		{
2119		case OP_MOV_IMM_WIDEN:
2120		  imm = ~imm;
2121		  /* Fall through.  */
2122		case OP_MOV_IMM_WIDE:
2123		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2124		    {
2125		      set_other_error (mismatch_detail, idx,
2126				       _("immediate out of range"));
2127		      return 0;
2128		    }
2129		  break;
2130		case OP_MOV_IMM_LOG:
2131		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
2132		    {
2133		      set_other_error (mismatch_detail, idx,
2134				       _("immediate out of range"));
2135		      return 0;
2136		    }
2137		  break;
2138		default:
2139		  assert (0);
2140		  return 0;
2141		}
2142	    }
2143	  break;
2144
2145	case AARCH64_OPND_NZCV:
2146	case AARCH64_OPND_CCMP_IMM:
2147	case AARCH64_OPND_EXCEPTION:
2148	case AARCH64_OPND_TME_UIMM16:
2149	case AARCH64_OPND_UIMM4:
2150	case AARCH64_OPND_UIMM4_ADDG:
2151	case AARCH64_OPND_UIMM7:
2152	case AARCH64_OPND_UIMM3_OP1:
2153	case AARCH64_OPND_UIMM3_OP2:
2154	case AARCH64_OPND_SVE_UIMM3:
2155	case AARCH64_OPND_SVE_UIMM7:
2156	case AARCH64_OPND_SVE_UIMM8:
2157	case AARCH64_OPND_SVE_UIMM8_53:
2158	  size = get_operand_fields_width (get_operand_from_code (type));
2159	  assert (size < 32);
2160	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2161	    {
2162	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2163					  (1u << size) - 1);
2164	      return 0;
2165	    }
2166	  break;
2167
2168	case AARCH64_OPND_UIMM10:
2169	  /* Scaled unsigned 10 bits immediate offset.  */
2170	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
2171	    {
2172	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2173	      return 0;
2174	    }
2175
2176	  if (!value_aligned_p (opnd->imm.value, 16))
2177	    {
2178	      set_unaligned_error (mismatch_detail, idx, 16);
2179	      return 0;
2180	    }
2181	  break;
2182
2183	case AARCH64_OPND_SIMM5:
2184	case AARCH64_OPND_SVE_SIMM5:
2185	case AARCH64_OPND_SVE_SIMM5B:
2186	case AARCH64_OPND_SVE_SIMM6:
2187	case AARCH64_OPND_SVE_SIMM8:
2188	  size = get_operand_fields_width (get_operand_from_code (type));
2189	  assert (size < 32);
2190	  if (!value_fit_signed_field_p (opnd->imm.value, size))
2191	    {
2192	      set_imm_out_of_range_error (mismatch_detail, idx,
2193					  -(1 << (size - 1)),
2194					  (1 << (size - 1)) - 1);
2195	      return 0;
2196	    }
2197	  break;
2198
2199	case AARCH64_OPND_WIDTH:
2200	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2201		  && opnds[0].type == AARCH64_OPND_Rd);
2202	  size = get_upper_bound (qualifier);
2203	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
2204	    /* lsb+width <= reg.size  */
2205	    {
2206	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
2207					  size - opnds[idx-1].imm.value);
2208	      return 0;
2209	    }
2210	  break;
2211
2212	case AARCH64_OPND_LIMM:
2213	case AARCH64_OPND_SVE_LIMM:
2214	  {
2215	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2216	    uint64_t uimm = opnd->imm.value;
2217	    if (opcode->op == OP_BIC)
2218	      uimm = ~uimm;
2219	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2220	      {
2221		set_other_error (mismatch_detail, idx,
2222				 _("immediate out of range"));
2223		return 0;
2224	      }
2225	  }
2226	  break;
2227
2228	case AARCH64_OPND_IMM0:
2229	case AARCH64_OPND_FPIMM0:
2230	  if (opnd->imm.value != 0)
2231	    {
2232	      set_other_error (mismatch_detail, idx,
2233			       _("immediate zero expected"));
2234	      return 0;
2235	    }
2236	  break;
2237
2238	case AARCH64_OPND_IMM_ROT1:
2239	case AARCH64_OPND_IMM_ROT2:
2240	case AARCH64_OPND_SVE_IMM_ROT2:
2241	  if (opnd->imm.value != 0
2242	      && opnd->imm.value != 90
2243	      && opnd->imm.value != 180
2244	      && opnd->imm.value != 270)
2245	    {
2246	      set_other_error (mismatch_detail, idx,
2247			       _("rotate expected to be 0, 90, 180 or 270"));
2248	      return 0;
2249	    }
2250	  break;
2251
2252	case AARCH64_OPND_IMM_ROT3:
2253	case AARCH64_OPND_SVE_IMM_ROT1:
2254	case AARCH64_OPND_SVE_IMM_ROT3:
2255	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
2256	    {
2257	      set_other_error (mismatch_detail, idx,
2258			       _("rotate expected to be 90 or 270"));
2259	      return 0;
2260	    }
2261	  break;
2262
2263	case AARCH64_OPND_SHLL_IMM:
2264	  assert (idx == 2);
2265	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2266	  if (opnd->imm.value != size)
2267	    {
2268	      set_other_error (mismatch_detail, idx,
2269			       _("invalid shift amount"));
2270	      return 0;
2271	    }
2272	  break;
2273
2274	case AARCH64_OPND_IMM_VLSL:
2275	  size = aarch64_get_qualifier_esize (qualifier);
2276	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2277	    {
2278	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2279					  size * 8 - 1);
2280	      return 0;
2281	    }
2282	  break;
2283
2284	case AARCH64_OPND_IMM_VLSR:
2285	  size = aarch64_get_qualifier_esize (qualifier);
2286	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2287	    {
2288	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2289	      return 0;
2290	    }
2291	  break;
2292
2293	case AARCH64_OPND_SIMD_IMM:
2294	case AARCH64_OPND_SIMD_IMM_SFT:
2295	  /* Qualifier check.  */
2296	  switch (qualifier)
2297	    {
2298	    case AARCH64_OPND_QLF_LSL:
2299	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
2300		{
2301		  set_other_error (mismatch_detail, idx,
2302				   _("invalid shift operator"));
2303		  return 0;
2304		}
2305	      break;
2306	    case AARCH64_OPND_QLF_MSL:
2307	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
2308		{
2309		  set_other_error (mismatch_detail, idx,
2310				   _("invalid shift operator"));
2311		  return 0;
2312		}
2313	      break;
2314	    case AARCH64_OPND_QLF_NIL:
2315	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2316		{
2317		  set_other_error (mismatch_detail, idx,
2318				   _("shift is not permitted"));
2319		  return 0;
2320		}
2321	      break;
2322	    default:
2323	      assert (0);
2324	      return 0;
2325	    }
2326	  /* Is the immediate valid?  */
2327	  assert (idx == 1);
2328	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2329	    {
2330	      /* uimm8 or simm8 */
2331	      if (!value_in_range_p (opnd->imm.value, -128, 255))
2332		{
2333		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2334		  return 0;
2335		}
2336	    }
2337	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2338	    {
2339	      /* uimm64 is not
2340		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2341		 ffffffffgggggggghhhhhhhh'.  */
2342	      set_other_error (mismatch_detail, idx,
2343			       _("invalid value for immediate"));
2344	      return 0;
2345	    }
2346	  /* Is the shift amount valid?  */
2347	  switch (opnd->shifter.kind)
2348	    {
2349	    case AARCH64_MOD_LSL:
2350	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2351	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2352		{
2353		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2354						     (size - 1) * 8);
2355		  return 0;
2356		}
2357	      if (!value_aligned_p (opnd->shifter.amount, 8))
2358		{
2359		  set_unaligned_error (mismatch_detail, idx, 8);
2360		  return 0;
2361		}
2362	      break;
2363	    case AARCH64_MOD_MSL:
2364	      /* Only 8 and 16 are valid shift amount.  */
2365	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2366		{
2367		  set_other_error (mismatch_detail, idx,
2368				   _("shift amount must be 0 or 16"));
2369		  return 0;
2370		}
2371	      break;
2372	    default:
2373	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2374		{
2375		  set_other_error (mismatch_detail, idx,
2376				   _("invalid shift operator"));
2377		  return 0;
2378		}
2379	      break;
2380	    }
2381	  break;
2382
2383	case AARCH64_OPND_FPIMM:
2384	case AARCH64_OPND_SIMD_FPIMM:
2385	case AARCH64_OPND_SVE_FPIMM8:
2386	  if (opnd->imm.is_fp == 0)
2387	    {
2388	      set_other_error (mismatch_detail, idx,
2389			       _("floating-point immediate expected"));
2390	      return 0;
2391	    }
2392	  /* The value is expected to be an 8-bit floating-point constant with
2393	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
2394	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2395	     instruction).  */
2396	  if (!value_in_range_p (opnd->imm.value, 0, 255))
2397	    {
2398	      set_other_error (mismatch_detail, idx,
2399			       _("immediate out of range"));
2400	      return 0;
2401	    }
2402	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
2403	    {
2404	      set_other_error (mismatch_detail, idx,
2405			       _("invalid shift operator"));
2406	      return 0;
2407	    }
2408	  break;
2409
2410	case AARCH64_OPND_SVE_AIMM:
2411	  min_value = 0;
2412	sve_aimm:
2413	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2414	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2415	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2416	  uvalue = opnd->imm.value;
2417	  shift = opnd->shifter.amount;
2418	  if (size == 1)
2419	    {
2420	      if (shift != 0)
2421		{
2422		  set_other_error (mismatch_detail, idx,
2423				   _("no shift amount allowed for"
2424				     " 8-bit constants"));
2425		  return 0;
2426		}
2427	    }
2428	  else
2429	    {
2430	      if (shift != 0 && shift != 8)
2431		{
2432		  set_other_error (mismatch_detail, idx,
2433				   _("shift amount must be 0 or 8"));
2434		  return 0;
2435		}
2436	      if (shift == 0 && (uvalue & 0xff) == 0)
2437		{
2438		  shift = 8;
2439		  uvalue = (int64_t) uvalue / 256;
2440		}
2441	    }
2442	  mask >>= shift;
2443	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2444	    {
2445	      set_other_error (mismatch_detail, idx,
2446			       _("immediate too big for element size"));
2447	      return 0;
2448	    }
2449	  uvalue = (uvalue - min_value) & mask;
2450	  if (uvalue > 0xff)
2451	    {
2452	      set_other_error (mismatch_detail, idx,
2453			       _("invalid arithmetic immediate"));
2454	      return 0;
2455	    }
2456	  break;
2457
2458	case AARCH64_OPND_SVE_ASIMM:
2459	  min_value = -128;
2460	  goto sve_aimm;
2461
2462	case AARCH64_OPND_SVE_I1_HALF_ONE:
2463	  assert (opnd->imm.is_fp);
2464	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2465	    {
2466	      set_other_error (mismatch_detail, idx,
2467			       _("floating-point value must be 0.5 or 1.0"));
2468	      return 0;
2469	    }
2470	  break;
2471
2472	case AARCH64_OPND_SVE_I1_HALF_TWO:
2473	  assert (opnd->imm.is_fp);
2474	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2475	    {
2476	      set_other_error (mismatch_detail, idx,
2477			       _("floating-point value must be 0.5 or 2.0"));
2478	      return 0;
2479	    }
2480	  break;
2481
2482	case AARCH64_OPND_SVE_I1_ZERO_ONE:
2483	  assert (opnd->imm.is_fp);
2484	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2485	    {
2486	      set_other_error (mismatch_detail, idx,
2487			       _("floating-point value must be 0.0 or 1.0"));
2488	      return 0;
2489	    }
2490	  break;
2491
2492	case AARCH64_OPND_SVE_INV_LIMM:
2493	  {
2494	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2495	    uint64_t uimm = ~opnd->imm.value;
2496	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2497	      {
2498		set_other_error (mismatch_detail, idx,
2499				 _("immediate out of range"));
2500		return 0;
2501	      }
2502	  }
2503	  break;
2504
2505	case AARCH64_OPND_SVE_LIMM_MOV:
2506	  {
2507	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2508	    uint64_t uimm = opnd->imm.value;
2509	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2510	      {
2511		set_other_error (mismatch_detail, idx,
2512				 _("immediate out of range"));
2513		return 0;
2514	      }
2515	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2516	      {
2517		set_other_error (mismatch_detail, idx,
2518				 _("invalid replicated MOV immediate"));
2519		return 0;
2520	      }
2521	  }
2522	  break;
2523
2524	case AARCH64_OPND_SVE_PATTERN_SCALED:
2525	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2526	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2527	    {
2528	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2529	      return 0;
2530	    }
2531	  break;
2532
2533	case AARCH64_OPND_SVE_SHLIMM_PRED:
2534	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2535	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2536	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2537	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2538	    {
2539	      set_imm_out_of_range_error (mismatch_detail, idx,
2540					  0, 8 * size - 1);
2541	      return 0;
2542	    }
2543	  break;
2544
2545	case AARCH64_OPND_SVE_SHRIMM_PRED:
2546	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2547	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2548	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2549	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2550	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2551	    {
2552	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2553	      return 0;
2554	    }
2555	  break;
2556
2557	default:
2558	  break;
2559	}
2560      break;
2561
2562    case AARCH64_OPND_CLASS_SYSTEM:
2563      switch (type)
2564	{
2565	case AARCH64_OPND_PSTATEFIELD:
2566	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2567	  /* MSR UAO, #uimm4
2568	     MSR PAN, #uimm4
2569	     MSR SSBS,#uimm4
2570	     The immediate must be #0 or #1.  */
2571	  if ((opnd->pstatefield == 0x03	/* UAO.  */
2572	       || opnd->pstatefield == 0x04	/* PAN.  */
2573	       || opnd->pstatefield == 0x19     /* SSBS.  */
2574	       || opnd->pstatefield == 0x1a)	/* DIT.  */
2575	      && opnds[1].imm.value > 1)
2576	    {
2577	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2578	      return 0;
2579	    }
2580	  /* MSR SPSel, #uimm4
2581	     Uses uimm4 as a control value to select the stack pointer: if
2582	     bit 0 is set it selects the current exception level's stack
2583	     pointer, if bit 0 is clear it selects shared EL0 stack pointer.
2584	     Bits 1 to 3 of uimm4 are reserved and should be zero.  */
2585	  if (opnd->pstatefield == 0x05 /* spsel */ && opnds[1].imm.value > 1)
2586	    {
2587	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1);
2588	      return 0;
2589	    }
2590	  break;
2591	default:
2592	  break;
2593	}
2594      break;
2595
2596    case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2597      /* Get the upper bound for the element index.  */
2598      if (opcode->op == OP_FCMLA_ELEM)
2599	/* FCMLA index range depends on the vector size of other operands
2600	   and is halfed because complex numbers take two elements.  */
2601	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2602	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2603      else
2604	num = 16;
2605      num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2606      assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2607
2608      /* Index out-of-range.  */
2609      if (!value_in_range_p (opnd->reglane.index, 0, num))
2610	{
2611	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2612	  return 0;
2613	}
2614      /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2615	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
2616	 number is encoded in "size:M:Rm":
2617	 size	<Vm>
2618	 00		RESERVED
2619	 01		0:Rm
2620	 10		M:Rm
2621	 11		RESERVED  */
2622      if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2623	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
2624	{
2625	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2626	  return 0;
2627	}
2628      break;
2629
2630    case AARCH64_OPND_CLASS_MODIFIED_REG:
2631      assert (idx == 1 || idx == 2);
2632      switch (type)
2633	{
2634	case AARCH64_OPND_Rm_EXT:
2635	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
2636	      && opnd->shifter.kind != AARCH64_MOD_LSL)
2637	    {
2638	      set_other_error (mismatch_detail, idx,
2639			       _("extend operator expected"));
2640	      return 0;
2641	    }
2642	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2643	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
2644	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
2645	     case.  */
2646	  if (!aarch64_stack_pointer_p (opnds + 0)
2647	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2648	    {
2649	      if (!opnd->shifter.operator_present)
2650		{
2651		  set_other_error (mismatch_detail, idx,
2652				   _("missing extend operator"));
2653		  return 0;
2654		}
2655	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2656		{
2657		  set_other_error (mismatch_detail, idx,
2658				   _("'LSL' operator not allowed"));
2659		  return 0;
2660		}
2661	    }
2662	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
2663		  || opnd->shifter.kind == AARCH64_MOD_LSL);
2664	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2665	    {
2666	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2667	      return 0;
2668	    }
2669	  /* In the 64-bit form, the final register operand is written as Wm
2670	     for all but the (possibly omitted) UXTX/LSL and SXTX
2671	     operators.
2672	     N.B. GAS allows X register to be used with any operator as a
2673	     programming convenience.  */
2674	  if (qualifier == AARCH64_OPND_QLF_X
2675	      && opnd->shifter.kind != AARCH64_MOD_LSL
2676	      && opnd->shifter.kind != AARCH64_MOD_UXTX
2677	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
2678	    {
2679	      set_other_error (mismatch_detail, idx, _("W register expected"));
2680	      return 0;
2681	    }
2682	  break;
2683
2684	case AARCH64_OPND_Rm_SFT:
2685	  /* ROR is not available to the shifted register operand in
2686	     arithmetic instructions.  */
2687	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
2688	    {
2689	      set_other_error (mismatch_detail, idx,
2690			       _("shift operator expected"));
2691	      return 0;
2692	    }
2693	  if (opnd->shifter.kind == AARCH64_MOD_ROR
2694	      && opcode->iclass != log_shift)
2695	    {
2696	      set_other_error (mismatch_detail, idx,
2697			       _("'ROR' operator not allowed"));
2698	      return 0;
2699	    }
2700	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2701	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
2702	    {
2703	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2704	      return 0;
2705	    }
2706	  break;
2707
2708	default:
2709	  break;
2710	}
2711      break;
2712
2713    default:
2714      break;
2715    }
2716
2717  return 1;
2718}
2719
2720/* Main entrypoint for the operand constraint checking.
2721
2722   Return 1 if operands of *INST meet the constraint applied by the operand
2723   codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2724   not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2725   adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2726   with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2727   error kind when it is notified that an instruction does not pass the check).
2728
2729   Un-determined operand qualifiers may get established during the process.  */
2730
2731int
2732aarch64_match_operands_constraint (aarch64_inst *inst,
2733				   aarch64_operand_error *mismatch_detail)
2734{
2735  int i;
2736
2737  DEBUG_TRACE ("enter");
2738
2739  /* Check for cases where a source register needs to be the same as the
2740     destination register.  Do this before matching qualifiers since if
2741     an instruction has both invalid tying and invalid qualifiers,
2742     the error about qualifiers would suggest several alternative
2743     instructions that also have invalid tying.  */
2744  i = inst->opcode->tied_operand;
2745  if (i > 0 && (inst->operands[0].reg.regno != inst->operands[i].reg.regno))
2746    {
2747      if (mismatch_detail)
2748	{
2749	  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2750	  mismatch_detail->index = i;
2751	  mismatch_detail->error = NULL;
2752	}
2753      return 0;
2754    }
2755
2756  /* Match operands' qualifier.
2757     *INST has already had qualifier establish for some, if not all, of
2758     its operands; we need to find out whether these established
2759     qualifiers match one of the qualifier sequence in
2760     INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2761     with the corresponding qualifier in such a sequence.
2762     Only basic operand constraint checking is done here; the more thorough
2763     constraint checking will carried out by operand_general_constraint_met_p,
2764     which has be to called after this in order to get all of the operands'
2765     qualifiers established.  */
2766  if (match_operands_qualifier (inst, TRUE /* update_p */) == 0)
2767    {
2768      DEBUG_TRACE ("FAIL on operand qualifier matching");
2769      if (mismatch_detail)
2770	{
2771	  /* Return an error type to indicate that it is the qualifier
2772	     matching failure; we don't care about which operand as there
2773	     are enough information in the opcode table to reproduce it.  */
2774	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2775	  mismatch_detail->index = -1;
2776	  mismatch_detail->error = NULL;
2777	}
2778      return 0;
2779    }
2780
2781  /* Match operands' constraint.  */
2782  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2783    {
2784      enum aarch64_opnd type = inst->opcode->operands[i];
2785      if (type == AARCH64_OPND_NIL)
2786	break;
2787      if (inst->operands[i].skip)
2788	{
2789	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2790	  continue;
2791	}
2792      if (operand_general_constraint_met_p (inst->operands, i, type,
2793					    inst->opcode, mismatch_detail) == 0)
2794	{
2795	  DEBUG_TRACE ("FAIL on operand %d", i);
2796	  return 0;
2797	}
2798    }
2799
2800  DEBUG_TRACE ("PASS");
2801
2802  return 1;
2803}
2804
2805/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2806   Also updates the TYPE of each INST->OPERANDS with the corresponding
2807   value of OPCODE->OPERANDS.
2808
2809   Note that some operand qualifiers may need to be manually cleared by
2810   the caller before it further calls the aarch64_opcode_encode; by
2811   doing this, it helps the qualifier matching facilities work
2812   properly.  */
2813
2814const aarch64_opcode*
2815aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2816{
2817  int i;
2818  const aarch64_opcode *old = inst->opcode;
2819
2820  inst->opcode = opcode;
2821
2822  /* Update the operand types.  */
2823  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2824    {
2825      inst->operands[i].type = opcode->operands[i];
2826      if (opcode->operands[i] == AARCH64_OPND_NIL)
2827	break;
2828    }
2829
2830  DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2831
2832  return old;
2833}
2834
2835int
2836aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2837{
2838  int i;
2839  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2840    if (operands[i] == operand)
2841      return i;
2842    else if (operands[i] == AARCH64_OPND_NIL)
2843      break;
2844  return -1;
2845}
2846
2847/* R0...R30, followed by FOR31.  */
2848#define BANK(R, FOR31) \
2849  { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
2850    R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2851    R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2852    R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
2853/* [0][0]  32-bit integer regs with sp   Wn
2854   [0][1]  64-bit integer regs with sp   Xn  sf=1
2855   [1][0]  32-bit integer regs with #0   Wn
2856   [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2857static const char *int_reg[2][2][32] = {
2858#define R32(X) "w" #X
2859#define R64(X) "x" #X
2860  { BANK (R32, "wsp"), BANK (R64, "sp") },
2861  { BANK (R32, "wzr"), BANK (R64, "xzr") }
2862#undef R64
2863#undef R32
2864};
2865
2866/* Names of the SVE vector registers, first with .S suffixes,
2867   then with .D suffixes.  */
2868
2869static const char *sve_reg[2][32] = {
2870#define ZS(X) "z" #X ".s"
2871#define ZD(X) "z" #X ".d"
2872  BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2873#undef ZD
2874#undef ZS
2875};
2876#undef BANK
2877
2878/* Return the integer register name.
2879   if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2880
2881static inline const char *
2882get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2883{
2884  const int has_zr = sp_reg_p ? 0 : 1;
2885  const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2886  return int_reg[has_zr][is_64][regno];
2887}
2888
2889/* Like get_int_reg_name, but IS_64 is always 1.  */
2890
2891static inline const char *
2892get_64bit_int_reg_name (int regno, int sp_reg_p)
2893{
2894  const int has_zr = sp_reg_p ? 0 : 1;
2895  return int_reg[has_zr][1][regno];
2896}
2897
2898/* Get the name of the integer offset register in OPND, using the shift type
2899   to decide whether it's a word or doubleword.  */
2900
2901static inline const char *
2902get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2903{
2904  switch (opnd->shifter.kind)
2905    {
2906    case AARCH64_MOD_UXTW:
2907    case AARCH64_MOD_SXTW:
2908      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2909
2910    case AARCH64_MOD_LSL:
2911    case AARCH64_MOD_SXTX:
2912      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2913
2914    default:
2915      abort ();
2916    }
2917}
2918
2919/* Get the name of the SVE vector offset register in OPND, using the operand
2920   qualifier to decide whether the suffix should be .S or .D.  */
2921
2922static inline const char *
2923get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2924{
2925  assert (qualifier == AARCH64_OPND_QLF_S_S
2926	  || qualifier == AARCH64_OPND_QLF_S_D);
2927  return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2928}
2929
2930/* Types for expanding an encoded 8-bit value to a floating-point value.  */
2931
2932typedef union
2933{
2934  uint64_t i;
2935  double   d;
2936} double_conv_t;
2937
2938typedef union
2939{
2940  uint32_t i;
2941  float    f;
2942} single_conv_t;
2943
2944typedef union
2945{
2946  uint32_t i;
2947  float    f;
2948} half_conv_t;
2949
2950/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2951   normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2952   (depending on the type of the instruction).  IMM8 will be expanded to a
2953   single-precision floating-point value (SIZE == 4) or a double-precision
2954   floating-point value (SIZE == 8).  A half-precision floating-point value
2955   (SIZE == 2) is expanded to a single-precision floating-point value.  The
2956   expanded value is returned.  */
2957
2958static uint64_t
2959expand_fp_imm (int size, uint32_t imm8)
2960{
2961  uint64_t imm = 0;
2962  uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
2963
2964  imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
2965  imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
2966  imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
2967  imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
2968    | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
2969  if (size == 8)
2970    {
2971      imm = (imm8_7 << (63-32))		/* imm8<7>  */
2972	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
2973	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
2974	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
2975	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
2976      imm <<= 32;
2977    }
2978  else if (size == 4 || size == 2)
2979    {
2980      imm = (imm8_7 << 31)	/* imm8<7>              */
2981	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
2982	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
2983	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
2984    }
2985  else
2986    {
2987      /* An unsupported size.  */
2988      assert (0);
2989    }
2990
2991  return imm;
2992}
2993
2994/* Produce the string representation of the register list operand *OPND
2995   in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
2996   the register name that comes before the register number, such as "v".  */
2997static void
2998print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
2999		     const char *prefix)
3000{
3001  const int num_regs = opnd->reglist.num_regs;
3002  const int first_reg = opnd->reglist.first_regno;
3003  const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3004  const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3005  char tb[8];	/* Temporary buffer.  */
3006
3007  assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3008  assert (num_regs >= 1 && num_regs <= 4);
3009
3010  /* Prepare the index if any.  */
3011  if (opnd->reglist.has_index)
3012    /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3013    snprintf (tb, 8, "[%" PRIi64 "]", (opnd->reglist.index % 100));
3014  else
3015    tb[0] = '\0';
3016
3017  /* The hyphenated form is preferred for disassembly if there are
3018     more than two registers in the list, and the register numbers
3019     are monotonically increasing in increments of one.  */
3020  if (num_regs > 2 && last_reg > first_reg)
3021    snprintf (buf, size, "{%s%d.%s-%s%d.%s}%s", prefix, first_reg, qlf_name,
3022	      prefix, last_reg, qlf_name, tb);
3023  else
3024    {
3025      const int reg0 = first_reg;
3026      const int reg1 = (first_reg + 1) & 0x1f;
3027      const int reg2 = (first_reg + 2) & 0x1f;
3028      const int reg3 = (first_reg + 3) & 0x1f;
3029
3030      switch (num_regs)
3031	{
3032	case 1:
3033	  snprintf (buf, size, "{%s%d.%s}%s", prefix, reg0, qlf_name, tb);
3034	  break;
3035	case 2:
3036	  snprintf (buf, size, "{%s%d.%s, %s%d.%s}%s", prefix, reg0, qlf_name,
3037		    prefix, reg1, qlf_name, tb);
3038	  break;
3039	case 3:
3040	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s}%s",
3041		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3042		    prefix, reg2, qlf_name, tb);
3043	  break;
3044	case 4:
3045	  snprintf (buf, size, "{%s%d.%s, %s%d.%s, %s%d.%s, %s%d.%s}%s",
3046		    prefix, reg0, qlf_name, prefix, reg1, qlf_name,
3047		    prefix, reg2, qlf_name, prefix, reg3, qlf_name, tb);
3048	  break;
3049	}
3050    }
3051}
3052
3053/* Print the register+immediate address in OPND to BUF, which has SIZE
3054   characters.  BASE is the name of the base register.  */
3055
3056static void
3057print_immediate_offset_address (char *buf, size_t size,
3058				const aarch64_opnd_info *opnd,
3059				const char *base)
3060{
3061  if (opnd->addr.writeback)
3062    {
3063      if (opnd->addr.preind)
3064        {
3065	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3066            snprintf (buf, size, "[%s]!", base);
3067          else
3068	    snprintf (buf, size, "[%s, #%d]!", base, opnd->addr.offset.imm);
3069        }
3070      else
3071	snprintf (buf, size, "[%s], #%d", base, opnd->addr.offset.imm);
3072    }
3073  else
3074    {
3075      if (opnd->shifter.operator_present)
3076	{
3077	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3078	  snprintf (buf, size, "[%s, #%d, mul vl]",
3079		    base, opnd->addr.offset.imm);
3080	}
3081      else if (opnd->addr.offset.imm)
3082	snprintf (buf, size, "[%s, #%d]", base, opnd->addr.offset.imm);
3083      else
3084	snprintf (buf, size, "[%s]", base);
3085    }
3086}
3087
3088/* Produce the string representation of the register offset address operand
3089   *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
3090   the names of the base and offset registers.  */
3091static void
3092print_register_offset_address (char *buf, size_t size,
3093			       const aarch64_opnd_info *opnd,
3094			       const char *base, const char *offset)
3095{
3096  char tb[16];			/* Temporary buffer.  */
3097  bfd_boolean print_extend_p = TRUE;
3098  bfd_boolean print_amount_p = TRUE;
3099  const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3100
3101  if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3102				|| !opnd->shifter.amount_present))
3103    {
3104      /* Not print the shift/extend amount when the amount is zero and
3105         when it is not the special case of 8-bit load/store instruction.  */
3106      print_amount_p = FALSE;
3107      /* Likewise, no need to print the shift operator LSL in such a
3108	 situation.  */
3109      if (opnd->shifter.kind == AARCH64_MOD_LSL)
3110	print_extend_p = FALSE;
3111    }
3112
3113  /* Prepare for the extend/shift.  */
3114  if (print_extend_p)
3115    {
3116      if (print_amount_p)
3117	snprintf (tb, sizeof (tb), ", %s #%" PRIi64, shift_name,
3118  /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3119		  (opnd->shifter.amount % 100));
3120      else
3121	snprintf (tb, sizeof (tb), ", %s", shift_name);
3122    }
3123  else
3124    tb[0] = '\0';
3125
3126  snprintf (buf, size, "[%s, %s%s]", base, offset, tb);
3127}
3128
3129/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3130   in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
3131   PC, PCREL_P and ADDRESS are used to pass in and return information about
3132   the PC-relative address calculation, where the PC value is passed in
3133   PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3134   will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3135   calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3136
3137   The function serves both the disassembler and the assembler diagnostics
3138   issuer, which is the reason why it lives in this file.  */
3139
3140void
3141aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3142		       const aarch64_opcode *opcode,
3143		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3144		       bfd_vma *address, char** notes)
3145{
3146  unsigned int i, num_conds;
3147  const char *name = NULL;
3148  const aarch64_opnd_info *opnd = opnds + idx;
3149  enum aarch64_modifier_kind kind;
3150  uint64_t addr, enum_value;
3151
3152  buf[0] = '\0';
3153  if (pcrel_p)
3154    *pcrel_p = 0;
3155
3156  switch (opnd->type)
3157    {
3158    case AARCH64_OPND_Rd:
3159    case AARCH64_OPND_Rn:
3160    case AARCH64_OPND_Rm:
3161    case AARCH64_OPND_Rt:
3162    case AARCH64_OPND_Rt2:
3163    case AARCH64_OPND_Rs:
3164    case AARCH64_OPND_Ra:
3165    case AARCH64_OPND_Rt_SYS:
3166    case AARCH64_OPND_PAIRREG:
3167    case AARCH64_OPND_SVE_Rm:
3168      /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3169	 the <ic_op>, therefore we use opnd->present to override the
3170	 generic optional-ness information.  */
3171      if (opnd->type == AARCH64_OPND_Rt_SYS)
3172	{
3173	  if (!opnd->present)
3174	    break;
3175	}
3176      /* Omit the operand, e.g. RET.  */
3177      else if (optional_operand_p (opcode, idx)
3178	       && (opnd->reg.regno
3179		   == get_optional_operand_default_value (opcode)))
3180	break;
3181      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3182	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3183      snprintf (buf, size, "%s",
3184		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3185      break;
3186
3187    case AARCH64_OPND_Rd_SP:
3188    case AARCH64_OPND_Rn_SP:
3189    case AARCH64_OPND_Rt_SP:
3190    case AARCH64_OPND_SVE_Rn_SP:
3191    case AARCH64_OPND_Rm_SP:
3192      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3193	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
3194	      || opnd->qualifier == AARCH64_OPND_QLF_X
3195	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
3196      snprintf (buf, size, "%s",
3197		get_int_reg_name (opnd->reg.regno, opnd->qualifier, 1));
3198      break;
3199
3200    case AARCH64_OPND_Rm_EXT:
3201      kind = opnd->shifter.kind;
3202      assert (idx == 1 || idx == 2);
3203      if ((aarch64_stack_pointer_p (opnds)
3204	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3205	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
3206	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
3207	       && kind == AARCH64_MOD_UXTW)
3208	      || (opnd->qualifier == AARCH64_OPND_QLF_X
3209		  && kind == AARCH64_MOD_UXTX)))
3210	{
3211	  /* 'LSL' is the preferred form in this case.  */
3212	  kind = AARCH64_MOD_LSL;
3213	  if (opnd->shifter.amount == 0)
3214	    {
3215	      /* Shifter omitted.  */
3216	      snprintf (buf, size, "%s",
3217			get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3218	      break;
3219	    }
3220	}
3221      if (opnd->shifter.amount)
3222	snprintf (buf, size, "%s, %s #%" PRIi64,
3223		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3224		  aarch64_operand_modifiers[kind].name,
3225		  opnd->shifter.amount);
3226      else
3227	snprintf (buf, size, "%s, %s",
3228		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3229		  aarch64_operand_modifiers[kind].name);
3230      break;
3231
3232    case AARCH64_OPND_Rm_SFT:
3233      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3234	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3235      if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3236	snprintf (buf, size, "%s",
3237		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0));
3238      else
3239	snprintf (buf, size, "%s, %s #%" PRIi64,
3240		  get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0),
3241		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3242		  opnd->shifter.amount);
3243      break;
3244
3245    case AARCH64_OPND_Fd:
3246    case AARCH64_OPND_Fn:
3247    case AARCH64_OPND_Fm:
3248    case AARCH64_OPND_Fa:
3249    case AARCH64_OPND_Ft:
3250    case AARCH64_OPND_Ft2:
3251    case AARCH64_OPND_Sd:
3252    case AARCH64_OPND_Sn:
3253    case AARCH64_OPND_Sm:
3254    case AARCH64_OPND_SVE_VZn:
3255    case AARCH64_OPND_SVE_Vd:
3256    case AARCH64_OPND_SVE_Vm:
3257    case AARCH64_OPND_SVE_Vn:
3258      snprintf (buf, size, "%s%d", aarch64_get_qualifier_name (opnd->qualifier),
3259		opnd->reg.regno);
3260      break;
3261
3262    case AARCH64_OPND_Va:
3263    case AARCH64_OPND_Vd:
3264    case AARCH64_OPND_Vn:
3265    case AARCH64_OPND_Vm:
3266      snprintf (buf, size, "v%d.%s", opnd->reg.regno,
3267		aarch64_get_qualifier_name (opnd->qualifier));
3268      break;
3269
3270    case AARCH64_OPND_Ed:
3271    case AARCH64_OPND_En:
3272    case AARCH64_OPND_Em:
3273    case AARCH64_OPND_Em16:
3274    case AARCH64_OPND_SM3_IMM2:
3275      snprintf (buf, size, "v%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3276		aarch64_get_qualifier_name (opnd->qualifier),
3277		opnd->reglane.index);
3278      break;
3279
3280    case AARCH64_OPND_VdD1:
3281    case AARCH64_OPND_VnD1:
3282      snprintf (buf, size, "v%d.d[1]", opnd->reg.regno);
3283      break;
3284
3285    case AARCH64_OPND_LVn:
3286    case AARCH64_OPND_LVt:
3287    case AARCH64_OPND_LVt_AL:
3288    case AARCH64_OPND_LEt:
3289      print_register_list (buf, size, opnd, "v");
3290      break;
3291
3292    case AARCH64_OPND_SVE_Pd:
3293    case AARCH64_OPND_SVE_Pg3:
3294    case AARCH64_OPND_SVE_Pg4_5:
3295    case AARCH64_OPND_SVE_Pg4_10:
3296    case AARCH64_OPND_SVE_Pg4_16:
3297    case AARCH64_OPND_SVE_Pm:
3298    case AARCH64_OPND_SVE_Pn:
3299    case AARCH64_OPND_SVE_Pt:
3300      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3301	snprintf (buf, size, "p%d", opnd->reg.regno);
3302      else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3303	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3304	snprintf (buf, size, "p%d/%s", opnd->reg.regno,
3305		  aarch64_get_qualifier_name (opnd->qualifier));
3306      else
3307	snprintf (buf, size, "p%d.%s", opnd->reg.regno,
3308		  aarch64_get_qualifier_name (opnd->qualifier));
3309      break;
3310
3311    case AARCH64_OPND_SVE_Za_5:
3312    case AARCH64_OPND_SVE_Za_16:
3313    case AARCH64_OPND_SVE_Zd:
3314    case AARCH64_OPND_SVE_Zm_5:
3315    case AARCH64_OPND_SVE_Zm_16:
3316    case AARCH64_OPND_SVE_Zn:
3317    case AARCH64_OPND_SVE_Zt:
3318      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3319	snprintf (buf, size, "z%d", opnd->reg.regno);
3320      else
3321	snprintf (buf, size, "z%d.%s", opnd->reg.regno,
3322		  aarch64_get_qualifier_name (opnd->qualifier));
3323      break;
3324
3325    case AARCH64_OPND_SVE_ZnxN:
3326    case AARCH64_OPND_SVE_ZtxN:
3327      print_register_list (buf, size, opnd, "z");
3328      break;
3329
3330    case AARCH64_OPND_SVE_Zm3_INDEX:
3331    case AARCH64_OPND_SVE_Zm3_22_INDEX:
3332    case AARCH64_OPND_SVE_Zm3_11_INDEX:
3333    case AARCH64_OPND_SVE_Zm4_11_INDEX:
3334    case AARCH64_OPND_SVE_Zm4_INDEX:
3335    case AARCH64_OPND_SVE_Zn_INDEX:
3336      snprintf (buf, size, "z%d.%s[%" PRIi64 "]", opnd->reglane.regno,
3337		aarch64_get_qualifier_name (opnd->qualifier),
3338		opnd->reglane.index);
3339      break;
3340
3341    case AARCH64_OPND_CRn:
3342    case AARCH64_OPND_CRm:
3343      snprintf (buf, size, "C%" PRIi64, opnd->imm.value);
3344      break;
3345
3346    case AARCH64_OPND_IDX:
3347    case AARCH64_OPND_MASK:
3348    case AARCH64_OPND_IMM:
3349    case AARCH64_OPND_IMM_2:
3350    case AARCH64_OPND_WIDTH:
3351    case AARCH64_OPND_UIMM3_OP1:
3352    case AARCH64_OPND_UIMM3_OP2:
3353    case AARCH64_OPND_BIT_NUM:
3354    case AARCH64_OPND_IMM_VLSL:
3355    case AARCH64_OPND_IMM_VLSR:
3356    case AARCH64_OPND_SHLL_IMM:
3357    case AARCH64_OPND_IMM0:
3358    case AARCH64_OPND_IMMR:
3359    case AARCH64_OPND_IMMS:
3360    case AARCH64_OPND_FBITS:
3361    case AARCH64_OPND_TME_UIMM16:
3362    case AARCH64_OPND_SIMM5:
3363    case AARCH64_OPND_SVE_SHLIMM_PRED:
3364    case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3365    case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3366    case AARCH64_OPND_SVE_SHRIMM_PRED:
3367    case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3368    case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3369    case AARCH64_OPND_SVE_SIMM5:
3370    case AARCH64_OPND_SVE_SIMM5B:
3371    case AARCH64_OPND_SVE_SIMM6:
3372    case AARCH64_OPND_SVE_SIMM8:
3373    case AARCH64_OPND_SVE_UIMM3:
3374    case AARCH64_OPND_SVE_UIMM7:
3375    case AARCH64_OPND_SVE_UIMM8:
3376    case AARCH64_OPND_SVE_UIMM8_53:
3377    case AARCH64_OPND_IMM_ROT1:
3378    case AARCH64_OPND_IMM_ROT2:
3379    case AARCH64_OPND_IMM_ROT3:
3380    case AARCH64_OPND_SVE_IMM_ROT1:
3381    case AARCH64_OPND_SVE_IMM_ROT2:
3382    case AARCH64_OPND_SVE_IMM_ROT3:
3383      snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3384      break;
3385
3386    case AARCH64_OPND_SVE_I1_HALF_ONE:
3387    case AARCH64_OPND_SVE_I1_HALF_TWO:
3388    case AARCH64_OPND_SVE_I1_ZERO_ONE:
3389      {
3390	single_conv_t c;
3391	c.i = opnd->imm.value;
3392	snprintf (buf, size, "#%.1f", c.f);
3393	break;
3394      }
3395
3396    case AARCH64_OPND_SVE_PATTERN:
3397      if (optional_operand_p (opcode, idx)
3398	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3399	break;
3400      enum_value = opnd->imm.value;
3401      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3402      if (aarch64_sve_pattern_array[enum_value])
3403	snprintf (buf, size, "%s", aarch64_sve_pattern_array[enum_value]);
3404      else
3405	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3406      break;
3407
3408    case AARCH64_OPND_SVE_PATTERN_SCALED:
3409      if (optional_operand_p (opcode, idx)
3410	  && !opnd->shifter.operator_present
3411	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3412	break;
3413      enum_value = opnd->imm.value;
3414      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3415      if (aarch64_sve_pattern_array[opnd->imm.value])
3416	snprintf (buf, size, "%s", aarch64_sve_pattern_array[opnd->imm.value]);
3417      else
3418	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3419      if (opnd->shifter.operator_present)
3420	{
3421	  size_t len = strlen (buf);
3422	  snprintf (buf + len, size - len, ", %s #%" PRIi64,
3423		    aarch64_operand_modifiers[opnd->shifter.kind].name,
3424		    opnd->shifter.amount);
3425	}
3426      break;
3427
3428    case AARCH64_OPND_SVE_PRFOP:
3429      enum_value = opnd->imm.value;
3430      assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3431      if (aarch64_sve_prfop_array[enum_value])
3432	snprintf (buf, size, "%s", aarch64_sve_prfop_array[enum_value]);
3433      else
3434	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3435      break;
3436
3437    case AARCH64_OPND_IMM_MOV:
3438      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3439	{
3440	case 4:	/* e.g. MOV Wd, #<imm32>.  */
3441	    {
3442	      int imm32 = opnd->imm.value;
3443	      snprintf (buf, size, "#0x%-20x\t// #%d", imm32, imm32);
3444	    }
3445	  break;
3446	case 8:	/* e.g. MOV Xd, #<imm64>.  */
3447	  snprintf (buf, size, "#0x%-20" PRIx64 "\t// #%" PRIi64,
3448		    opnd->imm.value, opnd->imm.value);
3449	  break;
3450	default: assert (0);
3451	}
3452      break;
3453
3454    case AARCH64_OPND_FPIMM0:
3455      snprintf (buf, size, "#0.0");
3456      break;
3457
3458    case AARCH64_OPND_LIMM:
3459    case AARCH64_OPND_AIMM:
3460    case AARCH64_OPND_HALF:
3461    case AARCH64_OPND_SVE_INV_LIMM:
3462    case AARCH64_OPND_SVE_LIMM:
3463    case AARCH64_OPND_SVE_LIMM_MOV:
3464      if (opnd->shifter.amount)
3465	snprintf (buf, size, "#0x%" PRIx64 ", lsl #%" PRIi64, opnd->imm.value,
3466		  opnd->shifter.amount);
3467      else
3468	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3469      break;
3470
3471    case AARCH64_OPND_SIMD_IMM:
3472    case AARCH64_OPND_SIMD_IMM_SFT:
3473      if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3474	  || opnd->shifter.kind == AARCH64_MOD_NONE)
3475	snprintf (buf, size, "#0x%" PRIx64, opnd->imm.value);
3476      else
3477	snprintf (buf, size, "#0x%" PRIx64 ", %s #%" PRIi64, opnd->imm.value,
3478		  aarch64_operand_modifiers[opnd->shifter.kind].name,
3479		  opnd->shifter.amount);
3480      break;
3481
3482    case AARCH64_OPND_SVE_AIMM:
3483    case AARCH64_OPND_SVE_ASIMM:
3484      if (opnd->shifter.amount)
3485	snprintf (buf, size, "#%" PRIi64 ", lsl #%" PRIi64, opnd->imm.value,
3486		  opnd->shifter.amount);
3487      else
3488	snprintf (buf, size, "#%" PRIi64, opnd->imm.value);
3489      break;
3490
3491    case AARCH64_OPND_FPIMM:
3492    case AARCH64_OPND_SIMD_FPIMM:
3493    case AARCH64_OPND_SVE_FPIMM8:
3494      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3495	{
3496	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
3497	    {
3498	      half_conv_t c;
3499	      c.i = expand_fp_imm (2, opnd->imm.value);
3500	      snprintf (buf, size,  "#%.18e", c.f);
3501	    }
3502	  break;
3503	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
3504	    {
3505	      single_conv_t c;
3506	      c.i = expand_fp_imm (4, opnd->imm.value);
3507	      snprintf (buf, size,  "#%.18e", c.f);
3508	    }
3509	  break;
3510	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
3511	    {
3512	      double_conv_t c;
3513	      c.i = expand_fp_imm (8, opnd->imm.value);
3514	      snprintf (buf, size,  "#%.18e", c.d);
3515	    }
3516	  break;
3517	default: assert (0);
3518	}
3519      break;
3520
3521    case AARCH64_OPND_CCMP_IMM:
3522    case AARCH64_OPND_NZCV:
3523    case AARCH64_OPND_EXCEPTION:
3524    case AARCH64_OPND_UIMM4:
3525    case AARCH64_OPND_UIMM4_ADDG:
3526    case AARCH64_OPND_UIMM7:
3527    case AARCH64_OPND_UIMM10:
3528      if (optional_operand_p (opcode, idx) == TRUE
3529	  && (opnd->imm.value ==
3530	      (int64_t) get_optional_operand_default_value (opcode)))
3531	/* Omit the operand, e.g. DCPS1.  */
3532	break;
3533      snprintf (buf, size, "#0x%x", (unsigned int)opnd->imm.value);
3534      break;
3535
3536    case AARCH64_OPND_COND:
3537    case AARCH64_OPND_COND1:
3538      snprintf (buf, size, "%s", opnd->cond->names[0]);
3539      num_conds = ARRAY_SIZE (opnd->cond->names);
3540      for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3541	{
3542	  size_t len = strlen (buf);
3543	  if (i == 1)
3544	    snprintf (buf + len, size - len, "  // %s = %s",
3545		      opnd->cond->names[0], opnd->cond->names[i]);
3546	  else
3547	    snprintf (buf + len, size - len, ", %s",
3548		      opnd->cond->names[i]);
3549	}
3550      break;
3551
3552    case AARCH64_OPND_ADDR_ADRP:
3553      addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3554	+ opnd->imm.value;
3555      if (pcrel_p)
3556	*pcrel_p = 1;
3557      if (address)
3558	*address = addr;
3559      /* This is not necessary during the disassembling, as print_address_func
3560	 in the disassemble_info will take care of the printing.  But some
3561	 other callers may be still interested in getting the string in *STR,
3562	 so here we do snprintf regardless.  */
3563      snprintf (buf, size, "#0x%" PRIx64, addr);
3564      break;
3565
3566    case AARCH64_OPND_ADDR_PCREL14:
3567    case AARCH64_OPND_ADDR_PCREL19:
3568    case AARCH64_OPND_ADDR_PCREL21:
3569    case AARCH64_OPND_ADDR_PCREL26:
3570      addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3571      if (pcrel_p)
3572	*pcrel_p = 1;
3573      if (address)
3574	*address = addr;
3575      /* This is not necessary during the disassembling, as print_address_func
3576	 in the disassemble_info will take care of the printing.  But some
3577	 other callers may be still interested in getting the string in *STR,
3578	 so here we do snprintf regardless.  */
3579      snprintf (buf, size, "#0x%" PRIx64, addr);
3580      break;
3581
3582    case AARCH64_OPND_ADDR_SIMPLE:
3583    case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3584    case AARCH64_OPND_SIMD_ADDR_POST:
3585      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3586      if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3587	{
3588	  if (opnd->addr.offset.is_reg)
3589	    snprintf (buf, size, "[%s], x%d", name, opnd->addr.offset.regno);
3590	  else
3591	    snprintf (buf, size, "[%s], #%d", name, opnd->addr.offset.imm);
3592	}
3593      else
3594	snprintf (buf, size, "[%s]", name);
3595      break;
3596
3597    case AARCH64_OPND_ADDR_REGOFF:
3598    case AARCH64_OPND_SVE_ADDR_R:
3599    case AARCH64_OPND_SVE_ADDR_RR:
3600    case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3601    case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3602    case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3603    case AARCH64_OPND_SVE_ADDR_RX:
3604    case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3605    case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3606    case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3607      print_register_offset_address
3608	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3609	 get_offset_int_reg_name (opnd));
3610      break;
3611
3612    case AARCH64_OPND_SVE_ADDR_ZX:
3613      print_register_offset_address
3614	(buf, size, opnd,
3615	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3616	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0));
3617      break;
3618
3619    case AARCH64_OPND_SVE_ADDR_RZ:
3620    case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3621    case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3622    case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3623    case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3624    case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3625    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3626    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3627    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3628    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3629    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3630    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3631      print_register_offset_address
3632	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3633	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3634      break;
3635
3636    case AARCH64_OPND_ADDR_SIMM7:
3637    case AARCH64_OPND_ADDR_SIMM9:
3638    case AARCH64_OPND_ADDR_SIMM9_2:
3639    case AARCH64_OPND_ADDR_SIMM10:
3640    case AARCH64_OPND_ADDR_SIMM11:
3641    case AARCH64_OPND_ADDR_SIMM13:
3642    case AARCH64_OPND_ADDR_OFFSET:
3643    case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3644    case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3645    case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3646    case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3647    case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3648    case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3649    case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3650    case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3651    case AARCH64_OPND_SVE_ADDR_RI_U6:
3652    case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3653    case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3654    case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3655      print_immediate_offset_address
3656	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1));
3657      break;
3658
3659    case AARCH64_OPND_SVE_ADDR_ZI_U5:
3660    case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3661    case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3662    case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3663      print_immediate_offset_address
3664	(buf, size, opnd,
3665	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier));
3666      break;
3667
3668    case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3669    case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3670    case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3671      print_register_offset_address
3672	(buf, size, opnd,
3673	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3674	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier));
3675      break;
3676
3677    case AARCH64_OPND_ADDR_UIMM12:
3678      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3679      if (opnd->addr.offset.imm)
3680	snprintf (buf, size, "[%s, #%d]", name, opnd->addr.offset.imm);
3681      else
3682	snprintf (buf, size, "[%s]", name);
3683      break;
3684
3685    case AARCH64_OPND_SYSREG:
3686      for (i = 0; aarch64_sys_regs[i].name; ++i)
3687	{
3688	  bfd_boolean exact_match
3689	    = (aarch64_sys_regs[i].flags & opnd->sysreg.flags)
3690	       == opnd->sysreg.flags;
3691
3692	  /* Try and find an exact match, But if that fails, return the first
3693	     partial match that was found.  */
3694	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
3695	      && ! aarch64_sys_reg_deprecated_p (&aarch64_sys_regs[i])
3696	      && (name == NULL || exact_match))
3697	    {
3698	      name = aarch64_sys_regs[i].name;
3699	      if (exact_match)
3700		{
3701		  if (notes)
3702		    *notes = NULL;
3703		  break;
3704		}
3705
3706	      /* If we didn't match exactly, that means the presense of a flag
3707		 indicates what we didn't want for this instruction.  e.g. If
3708		 F_REG_READ is there, that means we were looking for a write
3709		 register.  See aarch64_ext_sysreg.  */
3710	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3711		*notes = _("reading from a write-only register");
3712	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
3713		*notes = _("writing to a read-only register");
3714	    }
3715	}
3716
3717      if (name)
3718	snprintf (buf, size, "%s", name);
3719      else
3720	{
3721	  /* Implementation defined system register.  */
3722	  unsigned int value = opnd->sysreg.value;
3723	  snprintf (buf, size, "s%u_%u_c%u_c%u_%u", (value >> 14) & 0x3,
3724		    (value >> 11) & 0x7, (value >> 7) & 0xf, (value >> 3) & 0xf,
3725		    value & 0x7);
3726	}
3727      break;
3728
3729    case AARCH64_OPND_PSTATEFIELD:
3730      for (i = 0; aarch64_pstatefields[i].name; ++i)
3731	if (aarch64_pstatefields[i].value == opnd->pstatefield)
3732	  break;
3733      assert (aarch64_pstatefields[i].name);
3734      snprintf (buf, size, "%s", aarch64_pstatefields[i].name);
3735      break;
3736
3737    case AARCH64_OPND_SYSREG_AT:
3738    case AARCH64_OPND_SYSREG_DC:
3739    case AARCH64_OPND_SYSREG_IC:
3740    case AARCH64_OPND_SYSREG_TLBI:
3741    case AARCH64_OPND_SYSREG_SR:
3742      snprintf (buf, size, "%s", opnd->sysins_op->name);
3743      break;
3744
3745    case AARCH64_OPND_BARRIER:
3746      snprintf (buf, size, "%s", opnd->barrier->name);
3747      break;
3748
3749    case AARCH64_OPND_BARRIER_ISB:
3750      /* Operand can be omitted, e.g. in DCPS1.  */
3751      if (! optional_operand_p (opcode, idx)
3752	  || (opnd->barrier->value
3753	      != get_optional_operand_default_value (opcode)))
3754	snprintf (buf, size, "#0x%x", opnd->barrier->value);
3755      break;
3756
3757    case AARCH64_OPND_PRFOP:
3758      if (opnd->prfop->name != NULL)
3759	snprintf (buf, size, "%s", opnd->prfop->name);
3760      else
3761	snprintf (buf, size, "#0x%02x", opnd->prfop->value);
3762      break;
3763
3764    case AARCH64_OPND_BARRIER_PSB:
3765    case AARCH64_OPND_BTI_TARGET:
3766      if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
3767	snprintf (buf, size, "%s", opnd->hint_option->name);
3768      break;
3769
3770    default:
3771      assert (0);
3772    }
3773}
3774
3775#define CPENC(op0,op1,crn,crm,op2) \
3776  ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
3777  /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
3778#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
3779  /* for 3.9.10 System Instructions */
3780#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
3781
3782#define C0  0
3783#define C1  1
3784#define C2  2
3785#define C3  3
3786#define C4  4
3787#define C5  5
3788#define C6  6
3789#define C7  7
3790#define C8  8
3791#define C9  9
3792#define C10 10
3793#define C11 11
3794#define C12 12
3795#define C13 13
3796#define C14 14
3797#define C15 15
3798
3799/* TODO there is one more issues need to be resolved
3800   1. handle cpu-implementation-defined system registers.  */
3801const aarch64_sys_reg aarch64_sys_regs [] =
3802{
3803  { "spsr_el1",         CPEN_(0,C0,0),	0 }, /* = spsr_svc */
3804  { "spsr_el12",	CPEN_ (5, C0, 0), F_ARCHEXT },
3805  { "elr_el1",          CPEN_(0,C0,1),	0 },
3806  { "elr_el12",	CPEN_ (5, C0, 1), F_ARCHEXT },
3807  { "sp_el0",           CPEN_(0,C1,0),	0 },
3808  { "spsel",            CPEN_(0,C2,0),	0 },
3809  { "daif",             CPEN_(3,C2,1),	0 },
3810  { "currentel",        CPEN_(0,C2,2),	F_REG_READ }, /* RO */
3811  { "pan",		CPEN_(0,C2,3),	F_ARCHEXT },
3812  { "uao",		CPEN_ (0, C2, 4), F_ARCHEXT },
3813  { "nzcv",             CPEN_(3,C2,0),	0 },
3814  { "ssbs",		CPEN_(3,C2,6),  F_ARCHEXT },
3815  { "fpcr",             CPEN_(3,C4,0),	0 },
3816  { "fpsr",             CPEN_(3,C4,1),	0 },
3817  { "dspsr_el0",        CPEN_(3,C5,0),	0 },
3818  { "dlr_el0",          CPEN_(3,C5,1),	0 },
3819  { "spsr_el2",         CPEN_(4,C0,0),	0 }, /* = spsr_hyp */
3820  { "elr_el2",          CPEN_(4,C0,1),	0 },
3821  { "sp_el1",           CPEN_(4,C1,0),	0 },
3822  { "spsr_irq",         CPEN_(4,C3,0),	0 },
3823  { "spsr_abt",         CPEN_(4,C3,1),	0 },
3824  { "spsr_und",         CPEN_(4,C3,2),	0 },
3825  { "spsr_fiq",         CPEN_(4,C3,3),	0 },
3826  { "spsr_el3",         CPEN_(6,C0,0),	0 },
3827  { "elr_el3",          CPEN_(6,C0,1),	0 },
3828  { "sp_el2",           CPEN_(6,C1,0),	0 },
3829  { "spsr_svc",         CPEN_(0,C0,0),	F_DEPRECATED }, /* = spsr_el1 */
3830  { "spsr_hyp",         CPEN_(4,C0,0),	F_DEPRECATED }, /* = spsr_el2 */
3831  { "midr_el1",         CPENC(3,0,C0,C0,0),	F_REG_READ }, /* RO */
3832  { "ctr_el0",          CPENC(3,3,C0,C0,1),	F_REG_READ }, /* RO */
3833  { "mpidr_el1",        CPENC(3,0,C0,C0,5),	F_REG_READ }, /* RO */
3834  { "revidr_el1",       CPENC(3,0,C0,C0,6),	F_REG_READ }, /* RO */
3835  { "aidr_el1",         CPENC(3,1,C0,C0,7),	F_REG_READ }, /* RO */
3836  { "dczid_el0",        CPENC(3,3,C0,C0,7),	F_REG_READ }, /* RO */
3837  { "id_dfr0_el1",      CPENC(3,0,C0,C1,2),	F_REG_READ }, /* RO */
3838  { "id_pfr0_el1",      CPENC(3,0,C0,C1,0),	F_REG_READ }, /* RO */
3839  { "id_pfr1_el1",      CPENC(3,0,C0,C1,1),	F_REG_READ }, /* RO */
3840  { "id_pfr2_el1",      CPENC(3,0,C0,C3,4),	F_ARCHEXT | F_REG_READ}, /* RO */
3841  { "id_afr0_el1",      CPENC(3,0,C0,C1,3),	F_REG_READ }, /* RO */
3842  { "id_mmfr0_el1",     CPENC(3,0,C0,C1,4),	F_REG_READ }, /* RO */
3843  { "id_mmfr1_el1",     CPENC(3,0,C0,C1,5),	F_REG_READ }, /* RO */
3844  { "id_mmfr2_el1",     CPENC(3,0,C0,C1,6),	F_REG_READ }, /* RO */
3845  { "id_mmfr3_el1",     CPENC(3,0,C0,C1,7),	F_REG_READ }, /* RO */
3846  { "id_mmfr4_el1",     CPENC(3,0,C0,C2,6),	F_REG_READ }, /* RO */
3847  { "id_isar0_el1",     CPENC(3,0,C0,C2,0),	F_REG_READ }, /* RO */
3848  { "id_isar1_el1",     CPENC(3,0,C0,C2,1),	F_REG_READ }, /* RO */
3849  { "id_isar2_el1",     CPENC(3,0,C0,C2,2),	F_REG_READ }, /* RO */
3850  { "id_isar3_el1",     CPENC(3,0,C0,C2,3),	F_REG_READ }, /* RO */
3851  { "id_isar4_el1",     CPENC(3,0,C0,C2,4),	F_REG_READ }, /* RO */
3852  { "id_isar5_el1",     CPENC(3,0,C0,C2,5),	F_REG_READ }, /* RO */
3853  { "mvfr0_el1",        CPENC(3,0,C0,C3,0),	F_REG_READ }, /* RO */
3854  { "mvfr1_el1",        CPENC(3,0,C0,C3,1),	F_REG_READ }, /* RO */
3855  { "mvfr2_el1",        CPENC(3,0,C0,C3,2),	F_REG_READ }, /* RO */
3856  { "ccsidr_el1",       CPENC(3,1,C0,C0,0),	F_REG_READ }, /* RO */
3857  { "id_aa64pfr0_el1",  CPENC(3,0,C0,C4,0),	F_REG_READ }, /* RO */
3858  { "id_aa64pfr1_el1",  CPENC(3,0,C0,C4,1),	F_REG_READ }, /* RO */
3859  { "id_aa64dfr0_el1",  CPENC(3,0,C0,C5,0),	F_REG_READ }, /* RO */
3860  { "id_aa64dfr1_el1",  CPENC(3,0,C0,C5,1),	F_REG_READ }, /* RO */
3861  { "id_aa64isar0_el1", CPENC(3,0,C0,C6,0),	F_REG_READ }, /* RO */
3862  { "id_aa64isar1_el1", CPENC(3,0,C0,C6,1),	F_REG_READ }, /* RO */
3863  { "id_aa64mmfr0_el1", CPENC(3,0,C0,C7,0),	F_REG_READ }, /* RO */
3864  { "id_aa64mmfr1_el1", CPENC(3,0,C0,C7,1),	F_REG_READ }, /* RO */
3865  { "id_aa64mmfr2_el1", CPENC (3, 0, C0, C7, 2), F_ARCHEXT | F_REG_READ }, /* RO */
3866  { "id_aa64afr0_el1",  CPENC(3,0,C0,C5,4),	F_REG_READ }, /* RO */
3867  { "id_aa64afr1_el1",  CPENC(3,0,C0,C5,5),	F_REG_READ }, /* RO */
3868  { "id_aa64zfr0_el1",  CPENC (3, 0, C0, C4, 4), F_ARCHEXT | F_REG_READ }, /* RO */
3869  { "clidr_el1",        CPENC(3,1,C0,C0,1),	F_REG_READ }, /* RO */
3870  { "csselr_el1",       CPENC(3,2,C0,C0,0),	0 },
3871  { "vpidr_el2",        CPENC(3,4,C0,C0,0),	0 },
3872  { "vmpidr_el2",       CPENC(3,4,C0,C0,5),	0 },
3873  { "sctlr_el1",        CPENC(3,0,C1,C0,0),	0 },
3874  { "sctlr_el2",        CPENC(3,4,C1,C0,0),	0 },
3875  { "sctlr_el3",        CPENC(3,6,C1,C0,0),	0 },
3876  { "sctlr_el12",	CPENC (3, 5, C1, C0, 0), F_ARCHEXT },
3877  { "actlr_el1",        CPENC(3,0,C1,C0,1),	0 },
3878  { "actlr_el2",        CPENC(3,4,C1,C0,1),	0 },
3879  { "actlr_el3",        CPENC(3,6,C1,C0,1),	0 },
3880  { "cpacr_el1",        CPENC(3,0,C1,C0,2),	0 },
3881  { "cpacr_el12",	CPENC (3, 5, C1, C0, 2), F_ARCHEXT },
3882  { "cptr_el2",         CPENC(3,4,C1,C1,2),	0 },
3883  { "cptr_el3",         CPENC(3,6,C1,C1,2),	0 },
3884  { "scr_el3",          CPENC(3,6,C1,C1,0),	0 },
3885  { "hcr_el2",          CPENC(3,4,C1,C1,0),	0 },
3886  { "mdcr_el2",         CPENC(3,4,C1,C1,1),	0 },
3887  { "mdcr_el3",         CPENC(3,6,C1,C3,1),	0 },
3888  { "hstr_el2",         CPENC(3,4,C1,C1,3),	0 },
3889  { "hacr_el2",         CPENC(3,4,C1,C1,7),	0 },
3890  { "zcr_el1",          CPENC (3, 0, C1, C2, 0), F_ARCHEXT },
3891  { "zcr_el12",         CPENC (3, 5, C1, C2, 0), F_ARCHEXT },
3892  { "zcr_el2",          CPENC (3, 4, C1, C2, 0), F_ARCHEXT },
3893  { "zcr_el3",          CPENC (3, 6, C1, C2, 0), F_ARCHEXT },
3894  { "zidr_el1",         CPENC (3, 0, C0, C0, 7), F_ARCHEXT },
3895  { "ttbr0_el1",        CPENC(3,0,C2,C0,0),	0 },
3896  { "ttbr1_el1",        CPENC(3,0,C2,C0,1),	0 },
3897  { "ttbr0_el2",        CPENC(3,4,C2,C0,0),	0 },
3898  { "ttbr1_el2",	CPENC (3, 4, C2, C0, 1), F_ARCHEXT },
3899  { "ttbr0_el3",        CPENC(3,6,C2,C0,0),	0 },
3900  { "ttbr0_el12",	CPENC (3, 5, C2, C0, 0), F_ARCHEXT },
3901  { "ttbr1_el12",	CPENC (3, 5, C2, C0, 1), F_ARCHEXT },
3902  { "vttbr_el2",        CPENC(3,4,C2,C1,0),	0 },
3903  { "tcr_el1",          CPENC(3,0,C2,C0,2),	0 },
3904  { "tcr_el2",          CPENC(3,4,C2,C0,2),	0 },
3905  { "tcr_el3",          CPENC(3,6,C2,C0,2),	0 },
3906  { "tcr_el12",		CPENC (3, 5, C2, C0, 2), F_ARCHEXT },
3907  { "vtcr_el2",         CPENC(3,4,C2,C1,2),	0 },
3908  { "apiakeylo_el1",	CPENC (3, 0, C2, C1, 0), F_ARCHEXT },
3909  { "apiakeyhi_el1",	CPENC (3, 0, C2, C1, 1), F_ARCHEXT },
3910  { "apibkeylo_el1",	CPENC (3, 0, C2, C1, 2), F_ARCHEXT },
3911  { "apibkeyhi_el1",	CPENC (3, 0, C2, C1, 3), F_ARCHEXT },
3912  { "apdakeylo_el1",	CPENC (3, 0, C2, C2, 0), F_ARCHEXT },
3913  { "apdakeyhi_el1",	CPENC (3, 0, C2, C2, 1), F_ARCHEXT },
3914  { "apdbkeylo_el1",	CPENC (3, 0, C2, C2, 2), F_ARCHEXT },
3915  { "apdbkeyhi_el1",	CPENC (3, 0, C2, C2, 3), F_ARCHEXT },
3916  { "apgakeylo_el1",	CPENC (3, 0, C2, C3, 0), F_ARCHEXT },
3917  { "apgakeyhi_el1",	CPENC (3, 0, C2, C3, 1), F_ARCHEXT },
3918  { "afsr0_el1",        CPENC(3,0,C5,C1,0),	0 },
3919  { "afsr1_el1",        CPENC(3,0,C5,C1,1),	0 },
3920  { "afsr0_el2",        CPENC(3,4,C5,C1,0),	0 },
3921  { "afsr1_el2",        CPENC(3,4,C5,C1,1),	0 },
3922  { "afsr0_el3",        CPENC(3,6,C5,C1,0),	0 },
3923  { "afsr0_el12",	CPENC (3, 5, C5, C1, 0), F_ARCHEXT },
3924  { "afsr1_el3",        CPENC(3,6,C5,C1,1),	0 },
3925  { "afsr1_el12",	CPENC (3, 5, C5, C1, 1), F_ARCHEXT },
3926  { "esr_el1",          CPENC(3,0,C5,C2,0),	0 },
3927  { "esr_el2",          CPENC(3,4,C5,C2,0),	0 },
3928  { "esr_el3",          CPENC(3,6,C5,C2,0),	0 },
3929  { "esr_el12",		CPENC (3, 5, C5, C2, 0), F_ARCHEXT },
3930  { "vsesr_el2",	CPENC (3, 4, C5, C2, 3), F_ARCHEXT },
3931  { "fpexc32_el2",      CPENC(3,4,C5,C3,0),	0 },
3932  { "erridr_el1",	CPENC (3, 0, C5, C3, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3933  { "errselr_el1",	CPENC (3, 0, C5, C3, 1), F_ARCHEXT },
3934  { "erxfr_el1",	CPENC (3, 0, C5, C4, 0), F_ARCHEXT | F_REG_READ }, /* RO */
3935  { "erxctlr_el1",	CPENC (3, 0, C5, C4, 1), F_ARCHEXT },
3936  { "erxstatus_el1",	CPENC (3, 0, C5, C4, 2), F_ARCHEXT },
3937  { "erxaddr_el1",	CPENC (3, 0, C5, C4, 3), F_ARCHEXT },
3938  { "erxmisc0_el1",	CPENC (3, 0, C5, C5, 0), F_ARCHEXT },
3939  { "erxmisc1_el1",	CPENC (3, 0, C5, C5, 1), F_ARCHEXT },
3940  { "far_el1",          CPENC(3,0,C6,C0,0),	0 },
3941  { "far_el2",          CPENC(3,4,C6,C0,0),	0 },
3942  { "far_el3",          CPENC(3,6,C6,C0,0),	0 },
3943  { "far_el12",		CPENC (3, 5, C6, C0, 0), F_ARCHEXT },
3944  { "hpfar_el2",        CPENC(3,4,C6,C0,4),	0 },
3945  { "par_el1",          CPENC(3,0,C7,C4,0),	0 },
3946  { "mair_el1",         CPENC(3,0,C10,C2,0),	0 },
3947  { "mair_el2",         CPENC(3,4,C10,C2,0),	0 },
3948  { "mair_el3",         CPENC(3,6,C10,C2,0),	0 },
3949  { "mair_el12",	CPENC (3, 5, C10, C2, 0), F_ARCHEXT },
3950  { "amair_el1",        CPENC(3,0,C10,C3,0),	0 },
3951  { "amair_el2",        CPENC(3,4,C10,C3,0),	0 },
3952  { "amair_el3",        CPENC(3,6,C10,C3,0),	0 },
3953  { "amair_el12",	CPENC (3, 5, C10, C3, 0), F_ARCHEXT },
3954  { "vbar_el1",         CPENC(3,0,C12,C0,0),	0 },
3955  { "vbar_el2",         CPENC(3,4,C12,C0,0),	0 },
3956  { "vbar_el3",         CPENC(3,6,C12,C0,0),	0 },
3957  { "vbar_el12",	CPENC (3, 5, C12, C0, 0), F_ARCHEXT },
3958  { "rvbar_el1",        CPENC(3,0,C12,C0,1),	F_REG_READ }, /* RO */
3959  { "rvbar_el2",        CPENC(3,4,C12,C0,1),	F_REG_READ }, /* RO */
3960  { "rvbar_el3",        CPENC(3,6,C12,C0,1),	F_REG_READ }, /* RO */
3961  { "rmr_el1",          CPENC(3,0,C12,C0,2),	0 },
3962  { "rmr_el2",          CPENC(3,4,C12,C0,2),	0 },
3963  { "rmr_el3",          CPENC(3,6,C12,C0,2),	0 },
3964  { "isr_el1",          CPENC(3,0,C12,C1,0),	F_REG_READ }, /* RO */
3965  { "disr_el1",		CPENC (3, 0, C12, C1, 1), F_ARCHEXT },
3966  { "vdisr_el2",	CPENC (3, 4, C12, C1, 1), F_ARCHEXT },
3967  { "contextidr_el1",   CPENC(3,0,C13,C0,1),	0 },
3968  { "contextidr_el2",	CPENC (3, 4, C13, C0, 1), F_ARCHEXT },
3969  { "contextidr_el12",	CPENC (3, 5, C13, C0, 1), F_ARCHEXT },
3970  { "rndr",		CPENC(3,3,C2,C4,0), F_ARCHEXT | F_REG_READ }, /* RO */
3971  { "rndrrs",		CPENC(3,3,C2,C4,1), F_ARCHEXT | F_REG_READ }, /* RO */
3972  { "tco",		CPENC(3,3,C4,C2,7), F_ARCHEXT },
3973  { "tfsre0_el1",	CPENC(3,0,C5,C6,1), F_ARCHEXT },
3974  { "tfsr_el1",		CPENC(3,0,C5,C6,0), F_ARCHEXT },
3975  { "tfsr_el2",		CPENC(3,4,C5,C6,0), F_ARCHEXT },
3976  { "tfsr_el3",		CPENC(3,6,C5,C6,0), F_ARCHEXT },
3977  { "tfsr_el12",	CPENC(3,5,C5,C6,0), F_ARCHEXT },
3978  { "rgsr_el1",		CPENC(3,0,C1,C0,5), F_ARCHEXT },
3979  { "gcr_el1",		CPENC(3,0,C1,C0,6), F_ARCHEXT },
3980  { "gmid_el1",		CPENC(3,1,C0,C0,4), F_ARCHEXT | F_REG_READ }, /* RO */
3981  { "tpidr_el0",        CPENC(3,3,C13,C0,2),	0 },
3982  { "tpidrro_el0",      CPENC(3,3,C13,C0,3),	0 }, /* RW */
3983  { "tpidr_el1",        CPENC(3,0,C13,C0,4),	0 },
3984  { "tpidr_el2",        CPENC(3,4,C13,C0,2),	0 },
3985  { "tpidr_el3",        CPENC(3,6,C13,C0,2),	0 },
3986  { "scxtnum_el0",      CPENC(3,3,C13,C0,7), F_ARCHEXT },
3987  { "scxtnum_el1",      CPENC(3,0,C13,C0,7), F_ARCHEXT },
3988  { "scxtnum_el2",      CPENC(3,4,C13,C0,7), F_ARCHEXT },
3989  { "scxtnum_el12",     CPENC(3,5,C13,C0,7), F_ARCHEXT },
3990  { "scxtnum_el3",      CPENC(3,6,C13,C0,7), F_ARCHEXT },
3991  { "teecr32_el1",      CPENC(2,2,C0, C0,0),	0 }, /* See section 3.9.7.1 */
3992  { "cntfrq_el0",       CPENC(3,3,C14,C0,0),	0 }, /* RW */
3993  { "cntpct_el0",       CPENC(3,3,C14,C0,1),	F_REG_READ }, /* RO */
3994  { "cntvct_el0",       CPENC(3,3,C14,C0,2),	F_REG_READ }, /* RO */
3995  { "cntvoff_el2",      CPENC(3,4,C14,C0,3),	0 },
3996  { "cntkctl_el1",      CPENC(3,0,C14,C1,0),	0 },
3997  { "cntkctl_el12",	CPENC (3, 5, C14, C1, 0), F_ARCHEXT },
3998  { "cnthctl_el2",      CPENC(3,4,C14,C1,0),	0 },
3999  { "cntp_tval_el0",    CPENC(3,3,C14,C2,0),	0 },
4000  { "cntp_tval_el02",	CPENC (3, 5, C14, C2, 0), F_ARCHEXT },
4001  { "cntp_ctl_el0",     CPENC(3,3,C14,C2,1),	0 },
4002  { "cntp_ctl_el02",	CPENC (3, 5, C14, C2, 1), F_ARCHEXT },
4003  { "cntp_cval_el0",    CPENC(3,3,C14,C2,2),	0 },
4004  { "cntp_cval_el02",	CPENC (3, 5, C14, C2, 2), F_ARCHEXT },
4005  { "cntv_tval_el0",    CPENC(3,3,C14,C3,0),	0 },
4006  { "cntv_tval_el02",	CPENC (3, 5, C14, C3, 0), F_ARCHEXT },
4007  { "cntv_ctl_el0",     CPENC(3,3,C14,C3,1),	0 },
4008  { "cntv_ctl_el02",	CPENC (3, 5, C14, C3, 1), F_ARCHEXT },
4009  { "cntv_cval_el0",    CPENC(3,3,C14,C3,2),	0 },
4010  { "cntv_cval_el02",	CPENC (3, 5, C14, C3, 2), F_ARCHEXT },
4011  { "cnthp_tval_el2",   CPENC(3,4,C14,C2,0),	0 },
4012  { "cnthp_ctl_el2",    CPENC(3,4,C14,C2,1),	0 },
4013  { "cnthp_cval_el2",   CPENC(3,4,C14,C2,2),	0 },
4014  { "cntps_tval_el1",   CPENC(3,7,C14,C2,0),	0 },
4015  { "cntps_ctl_el1",    CPENC(3,7,C14,C2,1),	0 },
4016  { "cntps_cval_el1",   CPENC(3,7,C14,C2,2),	0 },
4017  { "cnthv_tval_el2",	CPENC (3, 4, C14, C3, 0), F_ARCHEXT },
4018  { "cnthv_ctl_el2",	CPENC (3, 4, C14, C3, 1), F_ARCHEXT },
4019  { "cnthv_cval_el2",	CPENC (3, 4, C14, C3, 2), F_ARCHEXT },
4020  { "dacr32_el2",       CPENC(3,4,C3,C0,0),	0 },
4021  { "ifsr32_el2",       CPENC(3,4,C5,C0,1),	0 },
4022  { "teehbr32_el1",     CPENC(2,2,C1,C0,0),	0 },
4023  { "sder32_el3",       CPENC(3,6,C1,C1,1),	0 },
4024  { "mdscr_el1",         CPENC(2,0,C0, C2, 2),	0 },
4025  { "mdccsr_el0",        CPENC(2,3,C0, C1, 0),	F_REG_READ  },  /* r */
4026  { "mdccint_el1",       CPENC(2,0,C0, C2, 0),	0 },
4027  { "dbgdtr_el0",        CPENC(2,3,C0, C4, 0),	0 },
4028  { "dbgdtrrx_el0",      CPENC(2,3,C0, C5, 0),	F_REG_READ  },  /* r */
4029  { "dbgdtrtx_el0",      CPENC(2,3,C0, C5, 0),	F_REG_WRITE },  /* w */
4030  { "osdtrrx_el1",       CPENC(2,0,C0, C0, 2),	0 },
4031  { "osdtrtx_el1",       CPENC(2,0,C0, C3, 2),	0 },
4032  { "oseccr_el1",        CPENC(2,0,C0, C6, 2),	0 },
4033  { "dbgvcr32_el2",      CPENC(2,4,C0, C7, 0),	0 },
4034  { "dbgbvr0_el1",       CPENC(2,0,C0, C0, 4),	0 },
4035  { "dbgbvr1_el1",       CPENC(2,0,C0, C1, 4),	0 },
4036  { "dbgbvr2_el1",       CPENC(2,0,C0, C2, 4),	0 },
4037  { "dbgbvr3_el1",       CPENC(2,0,C0, C3, 4),	0 },
4038  { "dbgbvr4_el1",       CPENC(2,0,C0, C4, 4),	0 },
4039  { "dbgbvr5_el1",       CPENC(2,0,C0, C5, 4),	0 },
4040  { "dbgbvr6_el1",       CPENC(2,0,C0, C6, 4),	0 },
4041  { "dbgbvr7_el1",       CPENC(2,0,C0, C7, 4),	0 },
4042  { "dbgbvr8_el1",       CPENC(2,0,C0, C8, 4),	0 },
4043  { "dbgbvr9_el1",       CPENC(2,0,C0, C9, 4),	0 },
4044  { "dbgbvr10_el1",      CPENC(2,0,C0, C10,4),	0 },
4045  { "dbgbvr11_el1",      CPENC(2,0,C0, C11,4),	0 },
4046  { "dbgbvr12_el1",      CPENC(2,0,C0, C12,4),	0 },
4047  { "dbgbvr13_el1",      CPENC(2,0,C0, C13,4),	0 },
4048  { "dbgbvr14_el1",      CPENC(2,0,C0, C14,4),	0 },
4049  { "dbgbvr15_el1",      CPENC(2,0,C0, C15,4),	0 },
4050  { "dbgbcr0_el1",       CPENC(2,0,C0, C0, 5),	0 },
4051  { "dbgbcr1_el1",       CPENC(2,0,C0, C1, 5),	0 },
4052  { "dbgbcr2_el1",       CPENC(2,0,C0, C2, 5),	0 },
4053  { "dbgbcr3_el1",       CPENC(2,0,C0, C3, 5),	0 },
4054  { "dbgbcr4_el1",       CPENC(2,0,C0, C4, 5),	0 },
4055  { "dbgbcr5_el1",       CPENC(2,0,C0, C5, 5),	0 },
4056  { "dbgbcr6_el1",       CPENC(2,0,C0, C6, 5),	0 },
4057  { "dbgbcr7_el1",       CPENC(2,0,C0, C7, 5),	0 },
4058  { "dbgbcr8_el1",       CPENC(2,0,C0, C8, 5),	0 },
4059  { "dbgbcr9_el1",       CPENC(2,0,C0, C9, 5),	0 },
4060  { "dbgbcr10_el1",      CPENC(2,0,C0, C10,5),	0 },
4061  { "dbgbcr11_el1",      CPENC(2,0,C0, C11,5),	0 },
4062  { "dbgbcr12_el1",      CPENC(2,0,C0, C12,5),	0 },
4063  { "dbgbcr13_el1",      CPENC(2,0,C0, C13,5),	0 },
4064  { "dbgbcr14_el1",      CPENC(2,0,C0, C14,5),	0 },
4065  { "dbgbcr15_el1",      CPENC(2,0,C0, C15,5),	0 },
4066  { "dbgwvr0_el1",       CPENC(2,0,C0, C0, 6),	0 },
4067  { "dbgwvr1_el1",       CPENC(2,0,C0, C1, 6),	0 },
4068  { "dbgwvr2_el1",       CPENC(2,0,C0, C2, 6),	0 },
4069  { "dbgwvr3_el1",       CPENC(2,0,C0, C3, 6),	0 },
4070  { "dbgwvr4_el1",       CPENC(2,0,C0, C4, 6),	0 },
4071  { "dbgwvr5_el1",       CPENC(2,0,C0, C5, 6),	0 },
4072  { "dbgwvr6_el1",       CPENC(2,0,C0, C6, 6),	0 },
4073  { "dbgwvr7_el1",       CPENC(2,0,C0, C7, 6),	0 },
4074  { "dbgwvr8_el1",       CPENC(2,0,C0, C8, 6),	0 },
4075  { "dbgwvr9_el1",       CPENC(2,0,C0, C9, 6),	0 },
4076  { "dbgwvr10_el1",      CPENC(2,0,C0, C10,6),	0 },
4077  { "dbgwvr11_el1",      CPENC(2,0,C0, C11,6),	0 },
4078  { "dbgwvr12_el1",      CPENC(2,0,C0, C12,6),	0 },
4079  { "dbgwvr13_el1",      CPENC(2,0,C0, C13,6),	0 },
4080  { "dbgwvr14_el1",      CPENC(2,0,C0, C14,6),	0 },
4081  { "dbgwvr15_el1",      CPENC(2,0,C0, C15,6),	0 },
4082  { "dbgwcr0_el1",       CPENC(2,0,C0, C0, 7),	0 },
4083  { "dbgwcr1_el1",       CPENC(2,0,C0, C1, 7),	0 },
4084  { "dbgwcr2_el1",       CPENC(2,0,C0, C2, 7),	0 },
4085  { "dbgwcr3_el1",       CPENC(2,0,C0, C3, 7),	0 },
4086  { "dbgwcr4_el1",       CPENC(2,0,C0, C4, 7),	0 },
4087  { "dbgwcr5_el1",       CPENC(2,0,C0, C5, 7),	0 },
4088  { "dbgwcr6_el1",       CPENC(2,0,C0, C6, 7),	0 },
4089  { "dbgwcr7_el1",       CPENC(2,0,C0, C7, 7),	0 },
4090  { "dbgwcr8_el1",       CPENC(2,0,C0, C8, 7),	0 },
4091  { "dbgwcr9_el1",       CPENC(2,0,C0, C9, 7),	0 },
4092  { "dbgwcr10_el1",      CPENC(2,0,C0, C10,7),	0 },
4093  { "dbgwcr11_el1",      CPENC(2,0,C0, C11,7),	0 },
4094  { "dbgwcr12_el1",      CPENC(2,0,C0, C12,7),	0 },
4095  { "dbgwcr13_el1",      CPENC(2,0,C0, C13,7),	0 },
4096  { "dbgwcr14_el1",      CPENC(2,0,C0, C14,7),	0 },
4097  { "dbgwcr15_el1",      CPENC(2,0,C0, C15,7),	0 },
4098  { "mdrar_el1",         CPENC(2,0,C1, C0, 0),	F_REG_READ  },  /* r */
4099  { "oslar_el1",         CPENC(2,0,C1, C0, 4),	F_REG_WRITE },  /* w */
4100  { "oslsr_el1",         CPENC(2,0,C1, C1, 4),	F_REG_READ  },  /* r */
4101  { "osdlr_el1",         CPENC(2,0,C1, C3, 4),	0 },
4102  { "dbgprcr_el1",       CPENC(2,0,C1, C4, 4),	0 },
4103  { "dbgclaimset_el1",   CPENC(2,0,C7, C8, 6),	0 },
4104  { "dbgclaimclr_el1",   CPENC(2,0,C7, C9, 6),	0 },
4105  { "dbgauthstatus_el1", CPENC(2,0,C7, C14,6),	F_REG_READ  },  /* r */
4106  { "pmblimitr_el1",	 CPENC (3, 0, C9, C10, 0), F_ARCHEXT },  /* rw */
4107  { "pmbptr_el1",	 CPENC (3, 0, C9, C10, 1), F_ARCHEXT },  /* rw */
4108  { "pmbsr_el1",	 CPENC (3, 0, C9, C10, 3), F_ARCHEXT },  /* rw */
4109  { "pmbidr_el1",	 CPENC (3, 0, C9, C10, 7), F_ARCHEXT | F_REG_READ },  /* ro */
4110  { "pmscr_el1",	 CPENC (3, 0, C9, C9, 0),  F_ARCHEXT },  /* rw */
4111  { "pmsicr_el1",	 CPENC (3, 0, C9, C9, 2),  F_ARCHEXT },  /* rw */
4112  { "pmsirr_el1",	 CPENC (3, 0, C9, C9, 3),  F_ARCHEXT },  /* rw */
4113  { "pmsfcr_el1",	 CPENC (3, 0, C9, C9, 4),  F_ARCHEXT },  /* rw */
4114  { "pmsevfr_el1",	 CPENC (3, 0, C9, C9, 5),  F_ARCHEXT },  /* rw */
4115  { "pmslatfr_el1",	 CPENC (3, 0, C9, C9, 6),  F_ARCHEXT },  /* rw */
4116  { "pmsidr_el1",	 CPENC (3, 0, C9, C9, 7),  F_ARCHEXT },  /* rw */
4117  { "pmscr_el2",	 CPENC (3, 4, C9, C9, 0),  F_ARCHEXT },  /* rw */
4118  { "pmscr_el12",	 CPENC (3, 5, C9, C9, 0),  F_ARCHEXT },  /* rw */
4119  { "pmcr_el0",          CPENC(3,3,C9,C12, 0),	0 },
4120  { "pmcntenset_el0",    CPENC(3,3,C9,C12, 1),	0 },
4121  { "pmcntenclr_el0",    CPENC(3,3,C9,C12, 2),	0 },
4122  { "pmovsclr_el0",      CPENC(3,3,C9,C12, 3),	0 },
4123  { "pmswinc_el0",       CPENC(3,3,C9,C12, 4),	F_REG_WRITE },  /* w */
4124  { "pmselr_el0",        CPENC(3,3,C9,C12, 5),	0 },
4125  { "pmceid0_el0",       CPENC(3,3,C9,C12, 6),	F_REG_READ  },  /* r */
4126  { "pmceid1_el0",       CPENC(3,3,C9,C12, 7),	F_REG_READ  },  /* r */
4127  { "pmccntr_el0",       CPENC(3,3,C9,C13, 0),	0 },
4128  { "pmxevtyper_el0",    CPENC(3,3,C9,C13, 1),	0 },
4129  { "pmxevcntr_el0",     CPENC(3,3,C9,C13, 2),	0 },
4130  { "pmuserenr_el0",     CPENC(3,3,C9,C14, 0),	0 },
4131  { "pmintenset_el1",    CPENC(3,0,C9,C14, 1),	0 },
4132  { "pmintenclr_el1",    CPENC(3,0,C9,C14, 2),	0 },
4133  { "pmovsset_el0",      CPENC(3,3,C9,C14, 3),	0 },
4134  { "pmevcntr0_el0",     CPENC(3,3,C14,C8, 0),	0 },
4135  { "pmevcntr1_el0",     CPENC(3,3,C14,C8, 1),	0 },
4136  { "pmevcntr2_el0",     CPENC(3,3,C14,C8, 2),	0 },
4137  { "pmevcntr3_el0",     CPENC(3,3,C14,C8, 3),	0 },
4138  { "pmevcntr4_el0",     CPENC(3,3,C14,C8, 4),	0 },
4139  { "pmevcntr5_el0",     CPENC(3,3,C14,C8, 5),	0 },
4140  { "pmevcntr6_el0",     CPENC(3,3,C14,C8, 6),	0 },
4141  { "pmevcntr7_el0",     CPENC(3,3,C14,C8, 7),	0 },
4142  { "pmevcntr8_el0",     CPENC(3,3,C14,C9, 0),	0 },
4143  { "pmevcntr9_el0",     CPENC(3,3,C14,C9, 1),	0 },
4144  { "pmevcntr10_el0",    CPENC(3,3,C14,C9, 2),	0 },
4145  { "pmevcntr11_el0",    CPENC(3,3,C14,C9, 3),	0 },
4146  { "pmevcntr12_el0",    CPENC(3,3,C14,C9, 4),	0 },
4147  { "pmevcntr13_el0",    CPENC(3,3,C14,C9, 5),	0 },
4148  { "pmevcntr14_el0",    CPENC(3,3,C14,C9, 6),	0 },
4149  { "pmevcntr15_el0",    CPENC(3,3,C14,C9, 7),	0 },
4150  { "pmevcntr16_el0",    CPENC(3,3,C14,C10,0),	0 },
4151  { "pmevcntr17_el0",    CPENC(3,3,C14,C10,1),	0 },
4152  { "pmevcntr18_el0",    CPENC(3,3,C14,C10,2),	0 },
4153  { "pmevcntr19_el0",    CPENC(3,3,C14,C10,3),	0 },
4154  { "pmevcntr20_el0",    CPENC(3,3,C14,C10,4),	0 },
4155  { "pmevcntr21_el0",    CPENC(3,3,C14,C10,5),	0 },
4156  { "pmevcntr22_el0",    CPENC(3,3,C14,C10,6),	0 },
4157  { "pmevcntr23_el0",    CPENC(3,3,C14,C10,7),	0 },
4158  { "pmevcntr24_el0",    CPENC(3,3,C14,C11,0),	0 },
4159  { "pmevcntr25_el0",    CPENC(3,3,C14,C11,1),	0 },
4160  { "pmevcntr26_el0",    CPENC(3,3,C14,C11,2),	0 },
4161  { "pmevcntr27_el0",    CPENC(3,3,C14,C11,3),	0 },
4162  { "pmevcntr28_el0",    CPENC(3,3,C14,C11,4),	0 },
4163  { "pmevcntr29_el0",    CPENC(3,3,C14,C11,5),	0 },
4164  { "pmevcntr30_el0",    CPENC(3,3,C14,C11,6),	0 },
4165  { "pmevtyper0_el0",    CPENC(3,3,C14,C12,0),	0 },
4166  { "pmevtyper1_el0",    CPENC(3,3,C14,C12,1),	0 },
4167  { "pmevtyper2_el0",    CPENC(3,3,C14,C12,2),	0 },
4168  { "pmevtyper3_el0",    CPENC(3,3,C14,C12,3),	0 },
4169  { "pmevtyper4_el0",    CPENC(3,3,C14,C12,4),	0 },
4170  { "pmevtyper5_el0",    CPENC(3,3,C14,C12,5),	0 },
4171  { "pmevtyper6_el0",    CPENC(3,3,C14,C12,6),	0 },
4172  { "pmevtyper7_el0",    CPENC(3,3,C14,C12,7),	0 },
4173  { "pmevtyper8_el0",    CPENC(3,3,C14,C13,0),	0 },
4174  { "pmevtyper9_el0",    CPENC(3,3,C14,C13,1),	0 },
4175  { "pmevtyper10_el0",   CPENC(3,3,C14,C13,2),	0 },
4176  { "pmevtyper11_el0",   CPENC(3,3,C14,C13,3),	0 },
4177  { "pmevtyper12_el0",   CPENC(3,3,C14,C13,4),	0 },
4178  { "pmevtyper13_el0",   CPENC(3,3,C14,C13,5),	0 },
4179  { "pmevtyper14_el0",   CPENC(3,3,C14,C13,6),	0 },
4180  { "pmevtyper15_el0",   CPENC(3,3,C14,C13,7),	0 },
4181  { "pmevtyper16_el0",   CPENC(3,3,C14,C14,0),	0 },
4182  { "pmevtyper17_el0",   CPENC(3,3,C14,C14,1),	0 },
4183  { "pmevtyper18_el0",   CPENC(3,3,C14,C14,2),	0 },
4184  { "pmevtyper19_el0",   CPENC(3,3,C14,C14,3),	0 },
4185  { "pmevtyper20_el0",   CPENC(3,3,C14,C14,4),	0 },
4186  { "pmevtyper21_el0",   CPENC(3,3,C14,C14,5),	0 },
4187  { "pmevtyper22_el0",   CPENC(3,3,C14,C14,6),	0 },
4188  { "pmevtyper23_el0",   CPENC(3,3,C14,C14,7),	0 },
4189  { "pmevtyper24_el0",   CPENC(3,3,C14,C15,0),	0 },
4190  { "pmevtyper25_el0",   CPENC(3,3,C14,C15,1),	0 },
4191  { "pmevtyper26_el0",   CPENC(3,3,C14,C15,2),	0 },
4192  { "pmevtyper27_el0",   CPENC(3,3,C14,C15,3),	0 },
4193  { "pmevtyper28_el0",   CPENC(3,3,C14,C15,4),	0 },
4194  { "pmevtyper29_el0",   CPENC(3,3,C14,C15,5),	0 },
4195  { "pmevtyper30_el0",   CPENC(3,3,C14,C15,6),	0 },
4196  { "pmccfiltr_el0",     CPENC(3,3,C14,C15,7),	0 },
4197
4198  { "dit",		 CPEN_ (3, C2, 5), F_ARCHEXT },
4199  { "vstcr_el2",	 CPENC(3, 4, C2, C6, 2), F_ARCHEXT },
4200  { "vsttbr_el2",	 CPENC(3, 4, C2, C6, 0), F_ARCHEXT },
4201  { "cnthvs_tval_el2",	 CPENC(3, 4, C14, C4, 0), F_ARCHEXT },
4202  { "cnthvs_cval_el2",	 CPENC(3, 4, C14, C4, 2), F_ARCHEXT },
4203  { "cnthvs_ctl_el2",	 CPENC(3, 4, C14, C4, 1), F_ARCHEXT },
4204  { "cnthps_tval_el2",	 CPENC(3, 4, C14, C5, 0), F_ARCHEXT },
4205  { "cnthps_cval_el2",	 CPENC(3, 4, C14, C5, 2), F_ARCHEXT },
4206  { "cnthps_ctl_el2",	 CPENC(3, 4, C14, C5, 1), F_ARCHEXT },
4207  { "sder32_el2",	 CPENC(3, 4, C1, C3, 1), F_ARCHEXT },
4208  { "vncr_el2",		 CPENC(3, 4, C2, C2, 0), F_ARCHEXT },
4209  { 0,          CPENC(0,0,0,0,0),	0 },
4210};
4211
4212bfd_boolean
4213aarch64_sys_reg_deprecated_p (const aarch64_sys_reg *reg)
4214{
4215  return (reg->flags & F_DEPRECATED) != 0;
4216}
4217
4218bfd_boolean
4219aarch64_sys_reg_supported_p (const aarch64_feature_set features,
4220			     const aarch64_sys_reg *reg)
4221{
4222  if (!(reg->flags & F_ARCHEXT))
4223    return TRUE;
4224
4225  /* PAN.  Values are from aarch64_sys_regs.  */
4226  if (reg->value == CPEN_(0,C2,3)
4227      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4228    return FALSE;
4229
4230  /* SCXTNUM_ELx registers.  */
4231  if ((reg->value == CPENC (3, 3, C13, C0, 7)
4232       || reg->value == CPENC (3, 0, C13, C0, 7)
4233       || reg->value == CPENC (3, 4, C13, C0, 7)
4234       || reg->value == CPENC (3, 6, C13, C0, 7)
4235       || reg->value == CPENC (3, 5, C13, C0, 7))
4236      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SCXTNUM))
4237      return FALSE;
4238
4239  /* ID_PFR2_EL1 register.  */
4240  if (reg->value == CPENC(3, 0, C0, C3, 4)
4241      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_ID_PFR2))
4242    return FALSE;
4243
4244  /* SSBS.  Values are from aarch64_sys_regs.  */
4245  if (reg->value == CPEN_(3,C2,6)
4246      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4247    return FALSE;
4248
4249  /* Virtualization host extensions: system registers.  */
4250  if ((reg->value == CPENC (3, 4, C2, C0, 1)
4251       || reg->value == CPENC (3, 4, C13, C0, 1)
4252       || reg->value == CPENC (3, 4, C14, C3, 0)
4253       || reg->value == CPENC (3, 4, C14, C3, 1)
4254       || reg->value == CPENC (3, 4, C14, C3, 2))
4255      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4256      return FALSE;
4257
4258  /* Virtualization host extensions: *_el12 names of *_el1 registers.  */
4259  if ((reg->value == CPEN_ (5, C0, 0)
4260       || reg->value == CPEN_ (5, C0, 1)
4261       || reg->value == CPENC (3, 5, C1, C0, 0)
4262       || reg->value == CPENC (3, 5, C1, C0, 2)
4263       || reg->value == CPENC (3, 5, C2, C0, 0)
4264       || reg->value == CPENC (3, 5, C2, C0, 1)
4265       || reg->value == CPENC (3, 5, C2, C0, 2)
4266       || reg->value == CPENC (3, 5, C5, C1, 0)
4267       || reg->value == CPENC (3, 5, C5, C1, 1)
4268       || reg->value == CPENC (3, 5, C5, C2, 0)
4269       || reg->value == CPENC (3, 5, C6, C0, 0)
4270       || reg->value == CPENC (3, 5, C10, C2, 0)
4271       || reg->value == CPENC (3, 5, C10, C3, 0)
4272       || reg->value == CPENC (3, 5, C12, C0, 0)
4273       || reg->value == CPENC (3, 5, C13, C0, 1)
4274       || reg->value == CPENC (3, 5, C14, C1, 0))
4275      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4276    return FALSE;
4277
4278  /* Virtualization host extensions: *_el02 names of *_el0 registers.  */
4279  if ((reg->value == CPENC (3, 5, C14, C2, 0)
4280       || reg->value == CPENC (3, 5, C14, C2, 1)
4281       || reg->value == CPENC (3, 5, C14, C2, 2)
4282       || reg->value == CPENC (3, 5, C14, C3, 0)
4283       || reg->value == CPENC (3, 5, C14, C3, 1)
4284       || reg->value == CPENC (3, 5, C14, C3, 2))
4285      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_1))
4286    return FALSE;
4287
4288  /* ARMv8.2 features.  */
4289
4290  /* ID_AA64MMFR2_EL1.  */
4291  if (reg->value == CPENC (3, 0, C0, C7, 2)
4292      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4293    return FALSE;
4294
4295  /* PSTATE.UAO.  */
4296  if (reg->value == CPEN_ (0, C2, 4)
4297      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4298    return FALSE;
4299
4300  /* RAS extension.  */
4301
4302  /* ERRIDR_EL1, ERRSELR_EL1, ERXFR_EL1, ERXCTLR_EL1, ERXSTATUS_EL, ERXADDR_EL1,
4303     ERXMISC0_EL1 AND ERXMISC1_EL1.  */
4304  if ((reg->value == CPENC (3, 0, C5, C3, 0)
4305       || reg->value == CPENC (3, 0, C5, C3, 1)
4306       || reg->value == CPENC (3, 0, C5, C3, 2)
4307       || reg->value == CPENC (3, 0, C5, C3, 3)
4308       || reg->value == CPENC (3, 0, C5, C4, 0)
4309       || reg->value == CPENC (3, 0, C5, C4, 1)
4310       || reg->value == CPENC (3, 0, C5, C4, 2)
4311       || reg->value == CPENC (3, 0, C5, C4, 3)
4312       || reg->value == CPENC (3, 0, C5, C5, 0)
4313       || reg->value == CPENC (3, 0, C5, C5, 1))
4314      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4315    return FALSE;
4316
4317  /* VSESR_EL2, DISR_EL1 and VDISR_EL2.  */
4318  if ((reg->value == CPENC (3, 4, C5, C2, 3)
4319       || reg->value == CPENC (3, 0, C12, C1, 1)
4320       || reg->value == CPENC (3, 4, C12, C1, 1))
4321      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RAS))
4322    return FALSE;
4323
4324  /* Statistical Profiling extension.  */
4325  if ((reg->value == CPENC (3, 0, C9, C10, 0)
4326       || reg->value == CPENC (3, 0, C9, C10, 1)
4327       || reg->value == CPENC (3, 0, C9, C10, 3)
4328       || reg->value == CPENC (3, 0, C9, C10, 7)
4329       || reg->value == CPENC (3, 0, C9, C9, 0)
4330       || reg->value == CPENC (3, 0, C9, C9, 2)
4331       || reg->value == CPENC (3, 0, C9, C9, 3)
4332       || reg->value == CPENC (3, 0, C9, C9, 4)
4333       || reg->value == CPENC (3, 0, C9, C9, 5)
4334       || reg->value == CPENC (3, 0, C9, C9, 6)
4335       || reg->value == CPENC (3, 0, C9, C9, 7)
4336       || reg->value == CPENC (3, 4, C9, C9, 0)
4337       || reg->value == CPENC (3, 5, C9, C9, 0))
4338      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PROFILE))
4339    return FALSE;
4340
4341  /* ARMv8.3 Pointer authentication keys.  */
4342  if ((reg->value == CPENC (3, 0, C2, C1, 0)
4343       || reg->value == CPENC (3, 0, C2, C1, 1)
4344       || reg->value == CPENC (3, 0, C2, C1, 2)
4345       || reg->value == CPENC (3, 0, C2, C1, 3)
4346       || reg->value == CPENC (3, 0, C2, C2, 0)
4347       || reg->value == CPENC (3, 0, C2, C2, 1)
4348       || reg->value == CPENC (3, 0, C2, C2, 2)
4349       || reg->value == CPENC (3, 0, C2, C2, 3)
4350       || reg->value == CPENC (3, 0, C2, C3, 0)
4351       || reg->value == CPENC (3, 0, C2, C3, 1))
4352      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_3))
4353    return FALSE;
4354
4355  /* SVE.  */
4356  if ((reg->value == CPENC (3, 0, C0, C4, 4)
4357       || reg->value == CPENC (3, 0, C1, C2, 0)
4358       || reg->value == CPENC (3, 4, C1, C2, 0)
4359       || reg->value == CPENC (3, 6, C1, C2, 0)
4360       || reg->value == CPENC (3, 5, C1, C2, 0)
4361       || reg->value == CPENC (3, 0, C0, C0, 7))
4362      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SVE))
4363    return FALSE;
4364
4365  /* ARMv8.4 features.  */
4366
4367  /* PSTATE.DIT.  */
4368  if (reg->value == CPEN_ (3, C2, 5)
4369      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4370    return FALSE;
4371
4372  /* Virtualization extensions.  */
4373  if ((reg->value == CPENC(3, 4, C2, C6, 2)
4374       || reg->value == CPENC(3, 4, C2, C6, 0)
4375       || reg->value == CPENC(3, 4, C14, C4, 0)
4376       || reg->value == CPENC(3, 4, C14, C4, 2)
4377       || reg->value == CPENC(3, 4, C14, C4, 1)
4378       || reg->value == CPENC(3, 4, C14, C5, 0)
4379       || reg->value == CPENC(3, 4, C14, C5, 2)
4380       || reg->value == CPENC(3, 4, C14, C5, 1)
4381       || reg->value == CPENC(3, 4, C1, C3, 1)
4382       || reg->value == CPENC(3, 4, C2, C2, 0))
4383      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4384    return FALSE;
4385
4386  /* ARMv8.4 TLB instructions.  */
4387  if ((reg->value == CPENS (0, C8, C1, 0)
4388       || reg->value == CPENS (0, C8, C1, 1)
4389       || reg->value == CPENS (0, C8, C1, 2)
4390       || reg->value == CPENS (0, C8, C1, 3)
4391       || reg->value == CPENS (0, C8, C1, 5)
4392       || reg->value == CPENS (0, C8, C1, 7)
4393       || reg->value == CPENS (4, C8, C4, 0)
4394       || reg->value == CPENS (4, C8, C4, 4)
4395       || reg->value == CPENS (4, C8, C1, 1)
4396       || reg->value == CPENS (4, C8, C1, 5)
4397       || reg->value == CPENS (4, C8, C1, 6)
4398       || reg->value == CPENS (6, C8, C1, 1)
4399       || reg->value == CPENS (6, C8, C1, 5)
4400       || reg->value == CPENS (4, C8, C1, 0)
4401       || reg->value == CPENS (4, C8, C1, 4)
4402       || reg->value == CPENS (6, C8, C1, 0)
4403       || reg->value == CPENS (0, C8, C6, 1)
4404       || reg->value == CPENS (0, C8, C6, 3)
4405       || reg->value == CPENS (0, C8, C6, 5)
4406       || reg->value == CPENS (0, C8, C6, 7)
4407       || reg->value == CPENS (0, C8, C2, 1)
4408       || reg->value == CPENS (0, C8, C2, 3)
4409       || reg->value == CPENS (0, C8, C2, 5)
4410       || reg->value == CPENS (0, C8, C2, 7)
4411       || reg->value == CPENS (0, C8, C5, 1)
4412       || reg->value == CPENS (0, C8, C5, 3)
4413       || reg->value == CPENS (0, C8, C5, 5)
4414       || reg->value == CPENS (0, C8, C5, 7)
4415       || reg->value == CPENS (4, C8, C0, 2)
4416       || reg->value == CPENS (4, C8, C0, 6)
4417       || reg->value == CPENS (4, C8, C4, 2)
4418       || reg->value == CPENS (4, C8, C4, 6)
4419       || reg->value == CPENS (4, C8, C4, 3)
4420       || reg->value == CPENS (4, C8, C4, 7)
4421       || reg->value == CPENS (4, C8, C6, 1)
4422       || reg->value == CPENS (4, C8, C6, 5)
4423       || reg->value == CPENS (4, C8, C2, 1)
4424       || reg->value == CPENS (4, C8, C2, 5)
4425       || reg->value == CPENS (4, C8, C5, 1)
4426       || reg->value == CPENS (4, C8, C5, 5)
4427       || reg->value == CPENS (6, C8, C6, 1)
4428       || reg->value == CPENS (6, C8, C6, 5)
4429       || reg->value == CPENS (6, C8, C2, 1)
4430       || reg->value == CPENS (6, C8, C2, 5)
4431       || reg->value == CPENS (6, C8, C5, 1)
4432       || reg->value == CPENS (6, C8, C5, 5))
4433      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4434    return FALSE;
4435
4436  /* Random Number Instructions.  For now they are available
4437     (and optional) only with ARMv8.5-A.  */
4438  if ((reg->value == CPENC (3, 3, C2, C4, 0)
4439       || reg->value == CPENC (3, 3, C2, C4, 1))
4440      && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_RNG)
4441	   && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_5)))
4442    return FALSE;
4443
4444  /* System Registers in ARMv8.5-A with AARCH64_FEATURE_MEMTAG.  */
4445  if ((reg->value == CPENC (3, 3, C4, C2, 7)
4446       || reg->value == CPENC (3, 0, C5, C6, 1)
4447       || reg->value == CPENC (3, 0, C5, C6, 0)
4448       || reg->value == CPENC (3, 4, C5, C6, 0)
4449       || reg->value == CPENC (3, 6, C5, C6, 0)
4450       || reg->value == CPENC (3, 5, C5, C6, 0)
4451       || reg->value == CPENC (3, 0, C1, C0, 5)
4452       || reg->value == CPENC (3, 0, C1, C0, 6)
4453       || reg->value == CPENC (3, 1, C0, C0, 4))
4454      && !(AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG)))
4455    return FALSE;
4456
4457  return TRUE;
4458}
4459
4460/* The CPENC below is fairly misleading, the fields
4461   here are not in CPENC form. They are in op2op1 form. The fields are encoded
4462   by ins_pstatefield, which just shifts the value by the width of the fields
4463   in a loop. So if you CPENC them only the first value will be set, the rest
4464   are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
4465   value of 0b110000000001000000 (0x30040) while what you want is
4466   0b011010 (0x1a).  */
4467const aarch64_sys_reg aarch64_pstatefields [] =
4468{
4469  { "spsel",            0x05,	0 },
4470  { "daifset",          0x1e,	0 },
4471  { "daifclr",          0x1f,	0 },
4472  { "pan",		0x04,	F_ARCHEXT },
4473  { "uao",		0x03,	F_ARCHEXT },
4474  { "ssbs",		0x19,   F_ARCHEXT },
4475  { "dit",		0x1a,	F_ARCHEXT },
4476  { "tco",		0x1c,	F_ARCHEXT },
4477  { 0,          CPENC(0,0,0,0,0), 0 },
4478};
4479
4480bfd_boolean
4481aarch64_pstatefield_supported_p (const aarch64_feature_set features,
4482				 const aarch64_sys_reg *reg)
4483{
4484  if (!(reg->flags & F_ARCHEXT))
4485    return TRUE;
4486
4487  /* PAN.  Values are from aarch64_pstatefields.  */
4488  if (reg->value == 0x04
4489      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PAN))
4490    return FALSE;
4491
4492  /* UAO.  Values are from aarch64_pstatefields.  */
4493  if (reg->value == 0x03
4494      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4495    return FALSE;
4496
4497  /* SSBS.  Values are from aarch64_pstatefields.  */
4498  if (reg->value == 0x19
4499      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_SSBS))
4500    return FALSE;
4501
4502  /* DIT.  Values are from aarch64_pstatefields.  */
4503  if (reg->value == 0x1a
4504      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
4505    return FALSE;
4506
4507  /* TCO.  Values are from aarch64_pstatefields.  */
4508  if (reg->value == 0x1c
4509      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4510    return FALSE;
4511
4512  return TRUE;
4513}
4514
4515const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
4516{
4517    { "ialluis", CPENS(0,C7,C1,0), 0 },
4518    { "iallu",   CPENS(0,C7,C5,0), 0 },
4519    { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
4520    { 0, CPENS(0,0,0,0), 0 }
4521};
4522
4523const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
4524{
4525    { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
4526    { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT },
4527    { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT },
4528    { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
4529    { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT },
4530    { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT },
4531    { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
4532    { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT },
4533    { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT },
4534    { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
4535    { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
4536    { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
4537    { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
4538    { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
4539    { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
4540    { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
4541    { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
4542    { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
4543    { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
4544    { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
4545    { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
4546    { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
4547    { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
4548    { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
4549    { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
4550    { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
4551    { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
4552    { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
4553    { 0,       CPENS(0,0,0,0), 0 }
4554};
4555
4556const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
4557{
4558    { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
4559    { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
4560    { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
4561    { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
4562    { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
4563    { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
4564    { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
4565    { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
4566    { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
4567    { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
4568    { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
4569    { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
4570    { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
4571    { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
4572    { 0,       CPENS(0,0,0,0), 0 }
4573};
4574
4575const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
4576{
4577    { "vmalle1",   CPENS(0,C8,C7,0), 0 },
4578    { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
4579    { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
4580    { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
4581    { "vmalle1is", CPENS(0,C8,C3,0), 0 },
4582    { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
4583    { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
4584    { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
4585    { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
4586    { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
4587    { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
4588    { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
4589    { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
4590    { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
4591    { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
4592    { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
4593    { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
4594    { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
4595    { "alle2",     CPENS(4,C8,C7,0), 0 },
4596    { "alle2is",   CPENS(4,C8,C3,0), 0 },
4597    { "alle1",     CPENS(4,C8,C7,4), 0 },
4598    { "alle1is",   CPENS(4,C8,C3,4), 0 },
4599    { "alle3",     CPENS(6,C8,C7,0), 0 },
4600    { "alle3is",   CPENS(6,C8,C3,0), 0 },
4601    { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
4602    { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
4603    { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
4604    { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
4605    { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
4606    { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
4607    { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
4608    { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
4609
4610    { "vmalle1os",    CPENS (0, C8, C1, 0), F_ARCHEXT },
4611    { "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
4612    { "aside1os",     CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
4613    { "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
4614    { "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
4615    { "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
4616    { "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
4617    { "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
4618    { "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
4619    { "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
4620    { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
4621    { "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
4622    { "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
4623    { "alle2os",      CPENS (4, C8, C1, 0), F_ARCHEXT },
4624    { "alle1os",      CPENS (4, C8, C1, 4), F_ARCHEXT },
4625    { "alle3os",      CPENS (6, C8, C1, 0), F_ARCHEXT },
4626
4627    { "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
4628    { "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
4629    { "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
4630    { "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
4631    { "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
4632    { "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
4633    { "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
4634    { "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
4635    { "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
4636    { "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
4637    { "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
4638    { "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
4639    { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
4640    { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
4641    { "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
4642    { "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
4643    { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
4644    { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
4645    { "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
4646    { "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
4647    { "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
4648    { "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
4649    { "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
4650    { "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
4651    { "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
4652    { "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
4653    { "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
4654    { "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
4655    { "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
4656    { "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
4657
4658    { 0,       CPENS(0,0,0,0), 0 }
4659};
4660
4661const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
4662{
4663    /* RCTX is somewhat unique in a way that it has different values
4664       (op2) based on the instruction in which it is used (cfp/dvp/cpp).
4665       Thus op2 is masked out and instead encoded directly in the
4666       aarch64_opcode_table entries for the respective instructions.  */
4667    { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
4668
4669    { 0,       CPENS(0,0,0,0), 0 }
4670};
4671
4672bfd_boolean
4673aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
4674{
4675  return (sys_ins_reg->flags & F_HASXT) != 0;
4676}
4677
4678extern bfd_boolean
4679aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
4680				 const aarch64_sys_ins_reg *reg)
4681{
4682  if (!(reg->flags & F_ARCHEXT))
4683    return TRUE;
4684
4685  /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
4686  if (reg->value == CPENS (3, C7, C12, 1)
4687      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4688    return FALSE;
4689
4690  /* DC CVADP.  Values are from aarch64_sys_regs_dc.  */
4691  if (reg->value == CPENS (3, C7, C13, 1)
4692      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
4693    return FALSE;
4694
4695  /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension.  */
4696  if ((reg->value == CPENS (0, C7, C6, 3)
4697       || reg->value == CPENS (0, C7, C6, 4)
4698       || reg->value == CPENS (0, C7, C10, 4)
4699       || reg->value == CPENS (0, C7, C14, 4)
4700       || reg->value == CPENS (3, C7, C10, 3)
4701       || reg->value == CPENS (3, C7, C12, 3)
4702       || reg->value == CPENS (3, C7, C13, 3)
4703       || reg->value == CPENS (3, C7, C14, 3)
4704       || reg->value == CPENS (3, C7, C4, 3)
4705       || reg->value == CPENS (0, C7, C6, 5)
4706       || reg->value == CPENS (0, C7, C6, 6)
4707       || reg->value == CPENS (0, C7, C10, 6)
4708       || reg->value == CPENS (0, C7, C14, 6)
4709       || reg->value == CPENS (3, C7, C10, 5)
4710       || reg->value == CPENS (3, C7, C12, 5)
4711       || reg->value == CPENS (3, C7, C13, 5)
4712       || reg->value == CPENS (3, C7, C14, 5)
4713       || reg->value == CPENS (3, C7, C4, 4))
4714      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
4715    return FALSE;
4716
4717  /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
4718  if ((reg->value == CPENS (0, C7, C9, 0)
4719       || reg->value == CPENS (0, C7, C9, 1))
4720      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
4721    return FALSE;
4722
4723  /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
4724  if (reg->value == CPENS (3, C7, C3, 0)
4725      && !AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
4726    return FALSE;
4727
4728  return TRUE;
4729}
4730
4731#undef C0
4732#undef C1
4733#undef C2
4734#undef C3
4735#undef C4
4736#undef C5
4737#undef C6
4738#undef C7
4739#undef C8
4740#undef C9
4741#undef C10
4742#undef C11
4743#undef C12
4744#undef C13
4745#undef C14
4746#undef C15
4747
4748#define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
4749#define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
4750
4751static enum err_type
4752verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
4753	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
4754	      bfd_boolean encoding ATTRIBUTE_UNUSED,
4755	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4756	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4757{
4758  int t  = BITS (insn, 4, 0);
4759  int n  = BITS (insn, 9, 5);
4760  int t2 = BITS (insn, 14, 10);
4761
4762  if (BIT (insn, 23))
4763    {
4764      /* Write back enabled.  */
4765      if ((t == n || t2 == n) && n != 31)
4766	return ERR_UND;
4767    }
4768
4769  if (BIT (insn, 22))
4770    {
4771      /* Load */
4772      if (t == t2)
4773	return ERR_UND;
4774    }
4775
4776  return ERR_OK;
4777}
4778
4779/* Verifier for vector by element 3 operands functions where the
4780   conditions `if sz:L == 11 then UNDEFINED` holds.  */
4781
4782static enum err_type
4783verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
4784		bfd_vma pc ATTRIBUTE_UNUSED, bfd_boolean encoding,
4785		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
4786		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
4787{
4788  const aarch64_insn undef_pattern = 0x3;
4789  aarch64_insn value;
4790
4791  assert (inst->opcode);
4792  assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
4793  value = encoding ? inst->value : insn;
4794  assert (value);
4795
4796  if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
4797    return ERR_UND;
4798
4799  return ERR_OK;
4800}
4801
4802/* Initialize an instruction sequence insn_sequence with the instruction INST.
4803   If INST is NULL the given insn_sequence is cleared and the sequence is left
4804   uninitialized.  */
4805
4806void
4807init_insn_sequence (const struct aarch64_inst *inst,
4808		    aarch64_instr_sequence *insn_sequence)
4809{
4810  int num_req_entries = 0;
4811  insn_sequence->next_insn = 0;
4812  insn_sequence->num_insns = num_req_entries;
4813  if (insn_sequence->instr)
4814    XDELETE (insn_sequence->instr);
4815  insn_sequence->instr = NULL;
4816
4817  if (inst)
4818    {
4819      insn_sequence->instr = XNEW (aarch64_inst);
4820      memcpy (insn_sequence->instr, inst, sizeof (aarch64_inst));
4821    }
4822
4823  /* Handle all the cases here.  May need to think of something smarter than
4824     a giant if/else chain if this grows.  At that time, a lookup table may be
4825     best.  */
4826  if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
4827    num_req_entries = 1;
4828
4829  if (insn_sequence->current_insns)
4830    XDELETEVEC (insn_sequence->current_insns);
4831  insn_sequence->current_insns = NULL;
4832
4833  if (num_req_entries != 0)
4834    {
4835      size_t size = num_req_entries * sizeof (aarch64_inst);
4836      insn_sequence->current_insns
4837	= (aarch64_inst**) XNEWVEC (aarch64_inst, num_req_entries);
4838      memset (insn_sequence->current_insns, 0, size);
4839    }
4840}
4841
4842
4843/*  This function verifies that the instruction INST adheres to its specified
4844    constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
4845    returned and MISMATCH_DETAIL contains the reason why verification failed.
4846
4847    The function is called both during assembly and disassembly.  If assembling
4848    then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
4849    and will contain the PC of the current instruction w.r.t to the section.
4850
4851    If ENCODING and PC=0 then you are at a start of a section.  The constraints
4852    are verified against the given state insn_sequence which is updated as it
4853    transitions through the verification.  */
4854
4855enum err_type
4856verify_constraints (const struct aarch64_inst *inst,
4857		    const aarch64_insn insn ATTRIBUTE_UNUSED,
4858		    bfd_vma pc,
4859		    bfd_boolean encoding,
4860		    aarch64_operand_error *mismatch_detail,
4861		    aarch64_instr_sequence *insn_sequence)
4862{
4863  assert (inst);
4864  assert (inst->opcode);
4865
4866  const struct aarch64_opcode *opcode = inst->opcode;
4867  if (!opcode->constraints && !insn_sequence->instr)
4868    return ERR_OK;
4869
4870  assert (insn_sequence);
4871
4872  enum err_type res = ERR_OK;
4873
4874  /* This instruction puts a constraint on the insn_sequence.  */
4875  if (opcode->flags & F_SCAN)
4876    {
4877      if (insn_sequence->instr)
4878	{
4879	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4880	  mismatch_detail->error = _("instruction opens new dependency "
4881				     "sequence without ending previous one");
4882	  mismatch_detail->index = -1;
4883	  mismatch_detail->non_fatal = TRUE;
4884	  res = ERR_VFI;
4885	}
4886
4887      init_insn_sequence (inst, insn_sequence);
4888      return res;
4889    }
4890
4891  /* Verify constraints on an existing sequence.  */
4892  if (insn_sequence->instr)
4893    {
4894      const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
4895      /* If we're decoding and we hit PC=0 with an open sequence then we haven't
4896	 closed a previous one that we should have.  */
4897      if (!encoding && pc == 0)
4898	{
4899	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4900	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
4901	  mismatch_detail->index = -1;
4902	  mismatch_detail->non_fatal = TRUE;
4903	  res = ERR_VFI;
4904	  /* Reset the sequence.  */
4905	  init_insn_sequence (NULL, insn_sequence);
4906	  return res;
4907	}
4908
4909      /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
4910      if (inst_opcode->constraints & C_SCAN_MOVPRFX)
4911	{
4912	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4913	     instruction for better error messages.  */
4914	  if (!opcode->avariant
4915	      || !(*opcode->avariant &
4916		   (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
4917	    {
4918	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4919	      mismatch_detail->error = _("SVE instruction expected after "
4920					 "`movprfx'");
4921	      mismatch_detail->index = -1;
4922	      mismatch_detail->non_fatal = TRUE;
4923	      res = ERR_VFI;
4924	      goto done;
4925	    }
4926
4927	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
4928	     instruction that is allowed to be used with a MOVPRFX.  */
4929	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
4930	    {
4931	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
4932	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
4933					 "expected");
4934	      mismatch_detail->index = -1;
4935	      mismatch_detail->non_fatal = TRUE;
4936	      res = ERR_VFI;
4937	      goto done;
4938	    }
4939
4940	  /* Next check for usage of the predicate register.  */
4941	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
4942	  aarch64_opnd_info blk_pred, inst_pred;
4943	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
4944	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
4945	  bfd_boolean predicated = FALSE;
4946	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
4947
4948	  /* Determine if the movprfx instruction used is predicated or not.  */
4949	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
4950	    {
4951	      predicated = TRUE;
4952	      blk_pred = insn_sequence->instr->operands[1];
4953	    }
4954
4955	  unsigned char max_elem_size = 0;
4956	  unsigned char current_elem_size;
4957	  int num_op_used = 0, last_op_usage = 0;
4958	  int i, inst_pred_idx = -1;
4959	  int num_ops = aarch64_num_of_operands (opcode);
4960	  for (i = 0; i < num_ops; i++)
4961	    {
4962	      aarch64_opnd_info inst_op = inst->operands[i];
4963	      switch (inst_op.type)
4964		{
4965		  case AARCH64_OPND_SVE_Zd:
4966		  case AARCH64_OPND_SVE_Zm_5:
4967		  case AARCH64_OPND_SVE_Zm_16:
4968		  case AARCH64_OPND_SVE_Zn:
4969		  case AARCH64_OPND_SVE_Zt:
4970		  case AARCH64_OPND_SVE_Vm:
4971		  case AARCH64_OPND_SVE_Vn:
4972		  case AARCH64_OPND_Va:
4973		  case AARCH64_OPND_Vn:
4974		  case AARCH64_OPND_Vm:
4975		  case AARCH64_OPND_Sn:
4976		  case AARCH64_OPND_Sm:
4977		    if (inst_op.reg.regno == blk_dest.reg.regno)
4978		      {
4979			num_op_used++;
4980			last_op_usage = i;
4981		      }
4982		    current_elem_size
4983		      = aarch64_get_qualifier_esize (inst_op.qualifier);
4984		    if (current_elem_size > max_elem_size)
4985		      max_elem_size = current_elem_size;
4986		    break;
4987		  case AARCH64_OPND_SVE_Pd:
4988		  case AARCH64_OPND_SVE_Pg3:
4989		  case AARCH64_OPND_SVE_Pg4_5:
4990		  case AARCH64_OPND_SVE_Pg4_10:
4991		  case AARCH64_OPND_SVE_Pg4_16:
4992		  case AARCH64_OPND_SVE_Pm:
4993		  case AARCH64_OPND_SVE_Pn:
4994		  case AARCH64_OPND_SVE_Pt:
4995		    inst_pred = inst_op;
4996		    inst_pred_idx = i;
4997		    break;
4998		  default:
4999		    break;
5000		}
5001	    }
5002
5003	   assert (max_elem_size != 0);
5004	   aarch64_opnd_info inst_dest = inst->operands[0];
5005	   /* Determine the size that should be used to compare against the
5006	      movprfx size.  */
5007	   current_elem_size
5008	     = opcode->constraints & C_MAX_ELEM
5009	       ? max_elem_size
5010	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
5011
5012	  /* If movprfx is predicated do some extra checks.  */
5013	  if (predicated)
5014	    {
5015	      /* The instruction must be predicated.  */
5016	      if (inst_pred_idx < 0)
5017		{
5018		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5019		  mismatch_detail->error = _("predicated instruction expected "
5020					     "after `movprfx'");
5021		  mismatch_detail->index = -1;
5022		  mismatch_detail->non_fatal = TRUE;
5023		  res = ERR_VFI;
5024		  goto done;
5025		}
5026
5027	      /* The instruction must have a merging predicate.  */
5028	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5029		{
5030		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5031		  mismatch_detail->error = _("merging predicate expected due "
5032					     "to preceding `movprfx'");
5033		  mismatch_detail->index = inst_pred_idx;
5034		  mismatch_detail->non_fatal = TRUE;
5035		  res = ERR_VFI;
5036		  goto done;
5037		}
5038
5039	      /* The same register must be used in instruction.  */
5040	      if (blk_pred.reg.regno != inst_pred.reg.regno)
5041		{
5042		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5043		  mismatch_detail->error = _("predicate register differs "
5044					     "from that in preceding "
5045					     "`movprfx'");
5046		  mismatch_detail->index = inst_pred_idx;
5047		  mismatch_detail->non_fatal = TRUE;
5048		  res = ERR_VFI;
5049		  goto done;
5050		}
5051	    }
5052
5053	  /* Destructive operations by definition must allow one usage of the
5054	     same register.  */
5055	  int allowed_usage
5056	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5057
5058	  /* Operand is not used at all.  */
5059	  if (num_op_used == 0)
5060	    {
5061	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5062	      mismatch_detail->error = _("output register of preceding "
5063					 "`movprfx' not used in current "
5064					 "instruction");
5065	      mismatch_detail->index = 0;
5066	      mismatch_detail->non_fatal = TRUE;
5067	      res = ERR_VFI;
5068	      goto done;
5069	    }
5070
5071	  /* We now know it's used, now determine exactly where it's used.  */
5072	  if (blk_dest.reg.regno != inst_dest.reg.regno)
5073	    {
5074	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5075	      mismatch_detail->error = _("output register of preceding "
5076					 "`movprfx' expected as output");
5077	      mismatch_detail->index = 0;
5078	      mismatch_detail->non_fatal = TRUE;
5079	      res = ERR_VFI;
5080	      goto done;
5081	    }
5082
5083	  /* Operand used more than allowed for the specific opcode type.  */
5084	  if (num_op_used > allowed_usage)
5085	    {
5086	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5087	      mismatch_detail->error = _("output register of preceding "
5088					 "`movprfx' used as input");
5089	      mismatch_detail->index = last_op_usage;
5090	      mismatch_detail->non_fatal = TRUE;
5091	      res = ERR_VFI;
5092	      goto done;
5093	    }
5094
5095	  /* Now the only thing left is the qualifiers checks.  The register
5096	     must have the same maximum element size.  */
5097	  if (inst_dest.qualifier
5098	      && blk_dest.qualifier
5099	      && current_elem_size
5100		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
5101	    {
5102	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5103	      mismatch_detail->error = _("register size not compatible with "
5104					 "previous `movprfx'");
5105	      mismatch_detail->index = 0;
5106	      mismatch_detail->non_fatal = TRUE;
5107	      res = ERR_VFI;
5108	      goto done;
5109	    }
5110	}
5111
5112done:
5113      /* Add the new instruction to the sequence.  */
5114      memcpy (insn_sequence->current_insns + insn_sequence->next_insn++,
5115	      inst, sizeof (aarch64_inst));
5116
5117      /* Check if sequence is now full.  */
5118      if (insn_sequence->next_insn >= insn_sequence->num_insns)
5119	{
5120	  /* Sequence is full, but we don't have anything special to do for now,
5121	     so clear and reset it.  */
5122	  init_insn_sequence (NULL, insn_sequence);
5123	}
5124    }
5125
5126  return res;
5127}
5128
5129
5130/* Return true if VALUE cannot be moved into an SVE register using DUP
5131   (with any element size, not just ESIZE) and if using DUPM would
5132   therefore be OK.  ESIZE is the number of bytes in the immediate.  */
5133
5134bfd_boolean
5135aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
5136{
5137  int64_t svalue = uvalue;
5138  uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
5139
5140  if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
5141    return FALSE;
5142  if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
5143    {
5144      svalue = (int32_t) uvalue;
5145      if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
5146	{
5147	  svalue = (int16_t) uvalue;
5148	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
5149	    return FALSE;
5150	}
5151    }
5152  if ((svalue & 0xff) == 0)
5153    svalue /= 256;
5154  return svalue < -128 || svalue >= 128;
5155}
5156
5157/* Include the opcode description table as well as the operand description
5158   table.  */
5159#define VERIFIER(x) verify_##x
5160#include "aarch64-tbl.h"
5161