1/* aarch64-opc.c -- AArch64 opcode support.
2   Copyright (C) 2009-2022 Free Software Foundation, Inc.
3   Contributed by ARM Ltd.
4
5   This file is part of the GNU opcodes library.
6
7   This library is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   It is distributed in the hope that it will be useful, but WITHOUT
13   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
14   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
15   License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with this program; see the file COPYING3. If not,
19   see <http://www.gnu.org/licenses/>.  */
20
21#include "sysdep.h"
22#include <assert.h>
23#include <stdlib.h>
24#include <stdio.h>
25#include <stdint.h>
26#include <stdarg.h>
27#include <inttypes.h>
28
29#include "opintl.h"
30#include "libiberty.h"
31
32#include "aarch64-opc.h"
33
34#ifdef DEBUG_AARCH64
35int debug_dump = false;
36#endif /* DEBUG_AARCH64 */
37
38/* The enumeration strings associated with each value of a 5-bit SVE
39   pattern operand.  A null entry indicates a reserved meaning.  */
40const char *const aarch64_sve_pattern_array[32] = {
41  /* 0-7.  */
42  "pow2",
43  "vl1",
44  "vl2",
45  "vl3",
46  "vl4",
47  "vl5",
48  "vl6",
49  "vl7",
50  /* 8-15.  */
51  "vl8",
52  "vl16",
53  "vl32",
54  "vl64",
55  "vl128",
56  "vl256",
57  0,
58  0,
59  /* 16-23.  */
60  0,
61  0,
62  0,
63  0,
64  0,
65  0,
66  0,
67  0,
68  /* 24-31.  */
69  0,
70  0,
71  0,
72  0,
73  0,
74  "mul4",
75  "mul3",
76  "all"
77};
78
79/* The enumeration strings associated with each value of a 4-bit SVE
80   prefetch operand.  A null entry indicates a reserved meaning.  */
81const char *const aarch64_sve_prfop_array[16] = {
82  /* 0-7.  */
83  "pldl1keep",
84  "pldl1strm",
85  "pldl2keep",
86  "pldl2strm",
87  "pldl3keep",
88  "pldl3strm",
89  0,
90  0,
91  /* 8-15.  */
92  "pstl1keep",
93  "pstl1strm",
94  "pstl2keep",
95  "pstl2strm",
96  "pstl3keep",
97  "pstl3strm",
98  0,
99  0
100};
101
102/* Helper functions to determine which operand to be used to encode/decode
103   the size:Q fields for AdvSIMD instructions.  */
104
105static inline bool
106vector_qualifier_p (enum aarch64_opnd_qualifier qualifier)
107{
108  return (qualifier >= AARCH64_OPND_QLF_V_8B
109	  && qualifier <= AARCH64_OPND_QLF_V_1Q);
110}
111
112static inline bool
113fp_qualifier_p (enum aarch64_opnd_qualifier qualifier)
114{
115  return (qualifier >= AARCH64_OPND_QLF_S_B
116	  && qualifier <= AARCH64_OPND_QLF_S_Q);
117}
118
119enum data_pattern
120{
121  DP_UNKNOWN,
122  DP_VECTOR_3SAME,
123  DP_VECTOR_LONG,
124  DP_VECTOR_WIDE,
125  DP_VECTOR_ACROSS_LANES,
126};
127
128static const char significant_operand_index [] =
129{
130  0,	/* DP_UNKNOWN, by default using operand 0.  */
131  0,	/* DP_VECTOR_3SAME */
132  1,	/* DP_VECTOR_LONG */
133  2,	/* DP_VECTOR_WIDE */
134  1,	/* DP_VECTOR_ACROSS_LANES */
135};
136
137/* Given a sequence of qualifiers in QUALIFIERS, determine and return
138   the data pattern.
139   N.B. QUALIFIERS is a possible sequence of qualifiers each of which
140   corresponds to one of a sequence of operands.  */
141
142static enum data_pattern
143get_data_pattern (const aarch64_opnd_qualifier_seq_t qualifiers)
144{
145  if (vector_qualifier_p (qualifiers[0]))
146    {
147      /* e.g. v.4s, v.4s, v.4s
148	   or v.4h, v.4h, v.h[3].  */
149      if (qualifiers[0] == qualifiers[1]
150	  && vector_qualifier_p (qualifiers[2])
151	  && (aarch64_get_qualifier_esize (qualifiers[0])
152	      == aarch64_get_qualifier_esize (qualifiers[1]))
153	  && (aarch64_get_qualifier_esize (qualifiers[0])
154	      == aarch64_get_qualifier_esize (qualifiers[2])))
155	return DP_VECTOR_3SAME;
156      /* e.g. v.8h, v.8b, v.8b.
157           or v.4s, v.4h, v.h[2].
158	   or v.8h, v.16b.  */
159      if (vector_qualifier_p (qualifiers[1])
160	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
161	  && (aarch64_get_qualifier_esize (qualifiers[0])
162	      == aarch64_get_qualifier_esize (qualifiers[1]) << 1))
163	return DP_VECTOR_LONG;
164      /* e.g. v.8h, v.8h, v.8b.  */
165      if (qualifiers[0] == qualifiers[1]
166	  && vector_qualifier_p (qualifiers[2])
167	  && aarch64_get_qualifier_esize (qualifiers[0]) != 0
168	  && (aarch64_get_qualifier_esize (qualifiers[0])
169	      == aarch64_get_qualifier_esize (qualifiers[2]) << 1)
170	  && (aarch64_get_qualifier_esize (qualifiers[0])
171	      == aarch64_get_qualifier_esize (qualifiers[1])))
172	return DP_VECTOR_WIDE;
173    }
174  else if (fp_qualifier_p (qualifiers[0]))
175    {
176      /* e.g. SADDLV <V><d>, <Vn>.<T>.  */
177      if (vector_qualifier_p (qualifiers[1])
178	  && qualifiers[2] == AARCH64_OPND_QLF_NIL)
179	return DP_VECTOR_ACROSS_LANES;
180    }
181
182  return DP_UNKNOWN;
183}
184
185/* Select the operand to do the encoding/decoding of the 'size:Q' fields in
186   the AdvSIMD instructions.  */
187/* N.B. it is possible to do some optimization that doesn't call
188   get_data_pattern each time when we need to select an operand.  We can
189   either buffer the caculated the result or statically generate the data,
190   however, it is not obvious that the optimization will bring significant
191   benefit.  */
192
193int
194aarch64_select_operand_for_sizeq_field_coding (const aarch64_opcode *opcode)
195{
196  return
197    significant_operand_index [get_data_pattern (opcode->qualifiers_list[0])];
198}
199
200/* Instruction bit-fields.
201+   Keep synced with 'enum aarch64_field_kind'.  */
202const aarch64_field fields[] =
203{
204    {  0,  0 },	/* NIL.  */
205    {  0,  4 },	/* cond2: condition in truly conditional-executed inst.  */
206    {  0,  4 },	/* nzcv: flag bit specifier, encoded in the "nzcv" field.  */
207    {  5,  5 },	/* defgh: d:e:f:g:h bits in AdvSIMD modified immediate.  */
208    { 16,  3 },	/* abc: a:b:c bits in AdvSIMD modified immediate.  */
209    {  5, 19 },	/* imm19: e.g. in CBZ.  */
210    {  5, 19 },	/* immhi: e.g. in ADRP.  */
211    { 29,  2 },	/* immlo: e.g. in ADRP.  */
212    { 22,  2 },	/* size: in most AdvSIMD and floating-point instructions.  */
213    { 10,  2 },	/* vldst_size: size field in the AdvSIMD load/store inst.  */
214    { 29,  1 },	/* op: in AdvSIMD modified immediate instructions.  */
215    { 30,  1 },	/* Q: in most AdvSIMD instructions.  */
216    {  0,  5 },	/* Rt: in load/store instructions.  */
217    {  0,  5 },	/* Rd: in many integer instructions.  */
218    {  5,  5 },	/* Rn: in many integer instructions.  */
219    { 10,  5 },	/* Rt2: in load/store pair instructions.  */
220    { 10,  5 },	/* Ra: in fp instructions.  */
221    {  5,  3 },	/* op2: in the system instructions.  */
222    {  8,  4 },	/* CRm: in the system instructions.  */
223    { 12,  4 },	/* CRn: in the system instructions.  */
224    { 16,  3 },	/* op1: in the system instructions.  */
225    { 19,  2 },	/* op0: in the system instructions.  */
226    { 10,  3 },	/* imm3: in add/sub extended reg instructions.  */
227    { 12,  4 },	/* cond: condition flags as a source operand.  */
228    { 12,  4 },	/* opcode: in advsimd load/store instructions.  */
229    { 12,  4 },	/* cmode: in advsimd modified immediate instructions.  */
230    { 13,  3 },	/* asisdlso_opcode: opcode in advsimd ld/st single element.  */
231    { 13,  2 },	/* len: in advsimd tbl/tbx instructions.  */
232    { 16,  5 },	/* Rm: in ld/st reg offset and some integer inst.  */
233    { 16,  5 },	/* Rs: in load/store exclusive instructions.  */
234    { 13,  3 },	/* option: in ld/st reg offset + add/sub extended reg inst.  */
235    { 12,  1 },	/* S: in load/store reg offset instructions.  */
236    { 21,  2 },	/* hw: in move wide constant instructions.  */
237    { 22,  2 },	/* opc: in load/store reg offset instructions.  */
238    { 23,  1 },	/* opc1: in load/store reg offset instructions.  */
239    { 22,  2 },	/* shift: in add/sub reg/imm shifted instructions.  */
240    { 22,  2 },	/* type: floating point type field in fp data inst.  */
241    { 30,  2 },	/* ldst_size: size field in ld/st reg offset inst.  */
242    { 10,  6 },	/* imm6: in add/sub reg shifted instructions.  */
243    { 15,  6 },	/* imm6_2: in rmif instructions.  */
244    { 11,  4 },	/* imm4: in advsimd ext and advsimd ins instructions.  */
245    {  0,  4 },	/* imm4_2: in rmif instructions.  */
246    { 10,  4 },	/* imm4_3: in adddg/subg instructions.  */
247    {  5,  4 }, /* imm4_5: in SME instructions.  */
248    { 16,  5 },	/* imm5: in conditional compare (immediate) instructions.  */
249    { 15,  7 },	/* imm7: in load/store pair pre/post index instructions.  */
250    { 13,  8 },	/* imm8: in floating-point scalar move immediate inst.  */
251    { 12,  9 },	/* imm9: in load/store pre/post index instructions.  */
252    { 10, 12 },	/* imm12: in ld/st unsigned imm or add/sub shifted inst.  */
253    {  5, 14 },	/* imm14: in test bit and branch instructions.  */
254    {  5, 16 },	/* imm16: in exception instructions.  */
255    {  0, 16 },	/* imm16_2: in udf instruction. */
256    {  0, 26 },	/* imm26: in unconditional branch instructions.  */
257    { 10,  6 },	/* imms: in bitfield and logical immediate instructions.  */
258    { 16,  6 },	/* immr: in bitfield and logical immediate instructions.  */
259    { 16,  3 },	/* immb: in advsimd shift by immediate instructions.  */
260    { 19,  4 },	/* immh: in advsimd shift by immediate instructions.  */
261    { 22,  1 },	/* S: in LDRAA and LDRAB instructions.  */
262    { 22,  1 },	/* N: in logical (immediate) instructions.  */
263    { 11,  1 },	/* index: in ld/st inst deciding the pre/post-index.  */
264    { 24,  1 },	/* index2: in ld/st pair inst deciding the pre/post-index.  */
265    { 31,  1 },	/* sf: in integer data processing instructions.  */
266    { 30,  1 },	/* lse_size: in LSE extension atomic instructions.  */
267    { 11,  1 },	/* H: in advsimd scalar x indexed element instructions.  */
268    { 21,  1 },	/* L: in advsimd scalar x indexed element instructions.  */
269    { 20,  1 },	/* M: in advsimd scalar x indexed element instructions.  */
270    { 31,  1 },	/* b5: in the test bit and branch instructions.  */
271    { 19,  5 },	/* b40: in the test bit and branch instructions.  */
272    { 10,  6 },	/* scale: in the fixed-point scalar to fp converting inst.  */
273    {  4,  1 }, /* SVE_M_4: Merge/zero select, bit 4.  */
274    { 14,  1 }, /* SVE_M_14: Merge/zero select, bit 14.  */
275    { 16,  1 }, /* SVE_M_16: Merge/zero select, bit 16.  */
276    { 17,  1 }, /* SVE_N: SVE equivalent of N.  */
277    {  0,  4 }, /* SVE_Pd: p0-p15, bits [3,0].  */
278    { 10,  3 }, /* SVE_Pg3: p0-p7, bits [12,10].  */
279    {  5,  4 }, /* SVE_Pg4_5: p0-p15, bits [8,5].  */
280    { 10,  4 }, /* SVE_Pg4_10: p0-p15, bits [13,10].  */
281    { 16,  4 }, /* SVE_Pg4_16: p0-p15, bits [19,16].  */
282    { 16,  4 }, /* SVE_Pm: p0-p15, bits [19,16].  */
283    {  5,  4 }, /* SVE_Pn: p0-p15, bits [8,5].  */
284    {  0,  4 }, /* SVE_Pt: p0-p15, bits [3,0].  */
285    {  5,  5 }, /* SVE_Rm: SVE alternative position for Rm.  */
286    { 16,  5 }, /* SVE_Rn: SVE alternative position for Rn.  */
287    {  0,  5 }, /* SVE_Vd: Scalar SIMD&FP register, bits [4,0].  */
288    {  5,  5 }, /* SVE_Vm: Scalar SIMD&FP register, bits [9,5].  */
289    {  5,  5 }, /* SVE_Vn: Scalar SIMD&FP register, bits [9,5].  */
290    {  5,  5 }, /* SVE_Za_5: SVE vector register, bits [9,5].  */
291    { 16,  5 }, /* SVE_Za_16: SVE vector register, bits [20,16].  */
292    {  0,  5 }, /* SVE_Zd: SVE vector register. bits [4,0].  */
293    {  5,  5 }, /* SVE_Zm_5: SVE vector register, bits [9,5].  */
294    { 16,  5 }, /* SVE_Zm_16: SVE vector register, bits [20,16]. */
295    {  5,  5 }, /* SVE_Zn: SVE vector register, bits [9,5].  */
296    {  0,  5 }, /* SVE_Zt: SVE vector register, bits [4,0].  */
297    {  5,  1 }, /* SVE_i1: single-bit immediate.  */
298    { 22,  1 }, /* SVE_i3h: high bit of 3-bit immediate.  */
299    { 11,  1 }, /* SVE_i3l: low bit of 3-bit immediate.  */
300    { 19,  2 }, /* SVE_i3h2: two high bits of 3bit immediate, bits [20,19].  */
301    { 20,  1 }, /* SVE_i2h: high bit of 2bit immediate, bits.  */
302    { 16,  3 }, /* SVE_imm3: 3-bit immediate field.  */
303    { 16,  4 }, /* SVE_imm4: 4-bit immediate field.  */
304    {  5,  5 }, /* SVE_imm5: 5-bit immediate field.  */
305    { 16,  5 }, /* SVE_imm5b: secondary 5-bit immediate field.  */
306    { 16,  6 }, /* SVE_imm6: 6-bit immediate field.  */
307    { 14,  7 }, /* SVE_imm7: 7-bit immediate field.  */
308    {  5,  8 }, /* SVE_imm8: 8-bit immediate field.  */
309    {  5,  9 }, /* SVE_imm9: 9-bit immediate field.  */
310    { 11,  6 }, /* SVE_immr: SVE equivalent of immr.  */
311    {  5,  6 }, /* SVE_imms: SVE equivalent of imms.  */
312    { 10,  2 }, /* SVE_msz: 2-bit shift amount for ADR.  */
313    {  5,  5 }, /* SVE_pattern: vector pattern enumeration.  */
314    {  0,  4 }, /* SVE_prfop: prefetch operation for SVE PRF[BHWD].  */
315    { 16,  1 }, /* SVE_rot1: 1-bit rotation amount.  */
316    { 10,  2 }, /* SVE_rot2: 2-bit rotation amount.  */
317    { 10,  1 }, /* SVE_rot3: 1-bit rotation amount at bit 10.  */
318    { 22,  1 }, /* SVE_sz: 1-bit element size select.  */
319    { 17,  2 }, /* SVE_size: 2-bit element size, bits [18,17].  */
320    { 30,  1 }, /* SVE_sz2: 1-bit element size select.  */
321    { 16,  4 }, /* SVE_tsz: triangular size select.  */
322    { 22,  2 }, /* SVE_tszh: triangular size select high, bits [23,22].  */
323    {  8,  2 }, /* SVE_tszl_8: triangular size select low, bits [9,8].  */
324    { 19,  2 }, /* SVE_tszl_19: triangular size select low, bits [20,19].  */
325    { 14,  1 }, /* SVE_xs_14: UXTW/SXTW select (bit 14).  */
326    { 22,  1 }, /* SVE_xs_22: UXTW/SXTW select (bit 22).  */
327    {  0,  2 }, /* SME ZAda tile ZA0-ZA3.  */
328    {  0,  3 }, /* SME ZAda tile ZA0-ZA7.  */
329    { 22,  2 }, /* SME_size_10: size<1>, size<0> class field, [23:22].  */
330    { 16,  1 }, /* SME_Q: Q class bit, bit 16.  */
331    { 15,  1 }, /* SME_V: (horizontal / vertical tiles), bit 15.  */
332    { 13,  2 }, /* SME_Rv: vector select register W12-W15, bits [14:13].  */
333    { 13,  3 }, /* SME Pm second source scalable predicate register P0-P7.  */
334    { 0,   8 }, /* SME_zero_mask: list of up to 8 tile names separated by commas [7:0].  */
335    { 16,  2 }, /* SME_Rm: index base register W12-W15 [17:16].  */
336    { 23,  1 }, /* SME_i1: immediate field, bit 23.  */
337    { 22,  1 }, /* SME_tszh: immediate and qualifier field, bit 22.  */
338    { 18,  3 }, /* SME_tshl: immediate and qualifier field, bits [20:18].  */
339    { 11,  2 }, /* rotate1: FCMLA immediate rotate.  */
340    { 13,  2 }, /* rotate2: Indexed element FCMLA immediate rotate.  */
341    { 12,  1 }, /* rotate3: FCADD immediate rotate.  */
342    { 12,  2 }, /* SM3: Indexed element SM3 2 bits index immediate.  */
343    { 22,  1 }, /* sz: 1-bit element size select.  */
344    { 10,  2 }, /* CRm_dsb_nxs: 2-bit imm. encoded in CRm<3:2>.  */
345    { 10,  8 }, /* CSSC_imm8.  */
346};
347
348enum aarch64_operand_class
349aarch64_get_operand_class (enum aarch64_opnd type)
350{
351  return aarch64_operands[type].op_class;
352}
353
354const char *
355aarch64_get_operand_name (enum aarch64_opnd type)
356{
357  return aarch64_operands[type].name;
358}
359
360/* Get operand description string.
361   This is usually for the diagnosis purpose.  */
362const char *
363aarch64_get_operand_desc (enum aarch64_opnd type)
364{
365  return aarch64_operands[type].desc;
366}
367
368/* Table of all conditional affixes.  */
369const aarch64_cond aarch64_conds[16] =
370{
371  {{"eq", "none"}, 0x0},
372  {{"ne", "any"}, 0x1},
373  {{"cs", "hs", "nlast"}, 0x2},
374  {{"cc", "lo", "ul", "last"}, 0x3},
375  {{"mi", "first"}, 0x4},
376  {{"pl", "nfrst"}, 0x5},
377  {{"vs"}, 0x6},
378  {{"vc"}, 0x7},
379  {{"hi", "pmore"}, 0x8},
380  {{"ls", "plast"}, 0x9},
381  {{"ge", "tcont"}, 0xa},
382  {{"lt", "tstop"}, 0xb},
383  {{"gt"}, 0xc},
384  {{"le"}, 0xd},
385  {{"al"}, 0xe},
386  {{"nv"}, 0xf},
387};
388
389const aarch64_cond *
390get_cond_from_value (aarch64_insn value)
391{
392  assert (value < 16);
393  return &aarch64_conds[(unsigned int) value];
394}
395
396const aarch64_cond *
397get_inverted_cond (const aarch64_cond *cond)
398{
399  return &aarch64_conds[cond->value ^ 0x1];
400}
401
402/* Table describing the operand extension/shifting operators; indexed by
403   enum aarch64_modifier_kind.
404
405   The value column provides the most common values for encoding modifiers,
406   which enables table-driven encoding/decoding for the modifiers.  */
407const struct aarch64_name_value_pair aarch64_operand_modifiers [] =
408{
409    {"none", 0x0},
410    {"msl",  0x0},
411    {"ror",  0x3},
412    {"asr",  0x2},
413    {"lsr",  0x1},
414    {"lsl",  0x0},
415    {"uxtb", 0x0},
416    {"uxth", 0x1},
417    {"uxtw", 0x2},
418    {"uxtx", 0x3},
419    {"sxtb", 0x4},
420    {"sxth", 0x5},
421    {"sxtw", 0x6},
422    {"sxtx", 0x7},
423    {"mul", 0x0},
424    {"mul vl", 0x0},
425    {NULL, 0},
426};
427
428enum aarch64_modifier_kind
429aarch64_get_operand_modifier (const struct aarch64_name_value_pair *desc)
430{
431  return desc - aarch64_operand_modifiers;
432}
433
434aarch64_insn
435aarch64_get_operand_modifier_value (enum aarch64_modifier_kind kind)
436{
437  return aarch64_operand_modifiers[kind].value;
438}
439
440enum aarch64_modifier_kind
441aarch64_get_operand_modifier_from_value (aarch64_insn value,
442					 bool extend_p)
443{
444  if (extend_p)
445    return AARCH64_MOD_UXTB + value;
446  else
447    return AARCH64_MOD_LSL - value;
448}
449
450bool
451aarch64_extend_operator_p (enum aarch64_modifier_kind kind)
452{
453  return kind > AARCH64_MOD_LSL && kind <= AARCH64_MOD_SXTX;
454}
455
456static inline bool
457aarch64_shift_operator_p (enum aarch64_modifier_kind kind)
458{
459  return kind >= AARCH64_MOD_ROR && kind <= AARCH64_MOD_LSL;
460}
461
462const struct aarch64_name_value_pair aarch64_barrier_options[16] =
463{
464    { "#0x00", 0x0 },
465    { "oshld", 0x1 },
466    { "oshst", 0x2 },
467    { "osh",   0x3 },
468    { "#0x04", 0x4 },
469    { "nshld", 0x5 },
470    { "nshst", 0x6 },
471    { "nsh",   0x7 },
472    { "#0x08", 0x8 },
473    { "ishld", 0x9 },
474    { "ishst", 0xa },
475    { "ish",   0xb },
476    { "#0x0c", 0xc },
477    { "ld",    0xd },
478    { "st",    0xe },
479    { "sy",    0xf },
480};
481
482const struct aarch64_name_value_pair aarch64_barrier_dsb_nxs_options[4] =
483{                       /*  CRm<3:2>  #imm  */
484    { "oshnxs", 16 },    /*    00       16   */
485    { "nshnxs", 20 },    /*    01       20   */
486    { "ishnxs", 24 },    /*    10       24   */
487    { "synxs",  28 },    /*    11       28   */
488};
489
490/* Table describing the operands supported by the aliases of the HINT
491   instruction.
492
493   The name column is the operand that is accepted for the alias.  The value
494   column is the hint number of the alias.  The list of operands is terminated
495   by NULL in the name column.  */
496
497const struct aarch64_name_value_pair aarch64_hint_options[] =
498{
499  /* BTI.  This is also the F_DEFAULT entry for AARCH64_OPND_BTI_TARGET.  */
500  { " ",	HINT_ENCODE (HINT_OPD_F_NOPRINT, 0x20) },
501  { "csync",	HINT_OPD_CSYNC },	/* PSB CSYNC.  */
502  { "c",	HINT_OPD_C },		/* BTI C.  */
503  { "j",	HINT_OPD_J },		/* BTI J.  */
504  { "jc",	HINT_OPD_JC },		/* BTI JC.  */
505  { NULL,	HINT_OPD_NULL },
506};
507
508/* op -> op:       load = 0 instruction = 1 store = 2
509   l  -> level:    1-3
510   t  -> temporal: temporal (retained) = 0 non-temporal (streaming) = 1   */
511#define B(op,l,t) (((op) << 3) | (((l) - 1) << 1) | (t))
512const struct aarch64_name_value_pair aarch64_prfops[32] =
513{
514  { "pldl1keep", B(0, 1, 0) },
515  { "pldl1strm", B(0, 1, 1) },
516  { "pldl2keep", B(0, 2, 0) },
517  { "pldl2strm", B(0, 2, 1) },
518  { "pldl3keep", B(0, 3, 0) },
519  { "pldl3strm", B(0, 3, 1) },
520  { NULL, 0x06 },
521  { NULL, 0x07 },
522  { "plil1keep", B(1, 1, 0) },
523  { "plil1strm", B(1, 1, 1) },
524  { "plil2keep", B(1, 2, 0) },
525  { "plil2strm", B(1, 2, 1) },
526  { "plil3keep", B(1, 3, 0) },
527  { "plil3strm", B(1, 3, 1) },
528  { NULL, 0x0e },
529  { NULL, 0x0f },
530  { "pstl1keep", B(2, 1, 0) },
531  { "pstl1strm", B(2, 1, 1) },
532  { "pstl2keep", B(2, 2, 0) },
533  { "pstl2strm", B(2, 2, 1) },
534  { "pstl3keep", B(2, 3, 0) },
535  { "pstl3strm", B(2, 3, 1) },
536  { NULL, 0x16 },
537  { NULL, 0x17 },
538  { NULL, 0x18 },
539  { NULL, 0x19 },
540  { NULL, 0x1a },
541  { NULL, 0x1b },
542  { NULL, 0x1c },
543  { NULL, 0x1d },
544  { NULL, 0x1e },
545  { NULL, 0x1f },
546};
547#undef B
548
549/* Utilities on value constraint.  */
550
551static inline int
552value_in_range_p (int64_t value, int low, int high)
553{
554  return (value >= low && value <= high) ? 1 : 0;
555}
556
557/* Return true if VALUE is a multiple of ALIGN.  */
558static inline int
559value_aligned_p (int64_t value, int align)
560{
561  return (value % align) == 0;
562}
563
564/* A signed value fits in a field.  */
565static inline int
566value_fit_signed_field_p (int64_t value, unsigned width)
567{
568  assert (width < 32);
569  if (width < sizeof (value) * 8)
570    {
571      int64_t lim = (uint64_t) 1 << (width - 1);
572      if (value >= -lim && value < lim)
573	return 1;
574    }
575  return 0;
576}
577
578/* An unsigned value fits in a field.  */
579static inline int
580value_fit_unsigned_field_p (int64_t value, unsigned width)
581{
582  assert (width < 32);
583  if (width < sizeof (value) * 8)
584    {
585      int64_t lim = (uint64_t) 1 << width;
586      if (value >= 0 && value < lim)
587	return 1;
588    }
589  return 0;
590}
591
592/* Return 1 if OPERAND is SP or WSP.  */
593int
594aarch64_stack_pointer_p (const aarch64_opnd_info *operand)
595{
596  return ((aarch64_get_operand_class (operand->type)
597	   == AARCH64_OPND_CLASS_INT_REG)
598	  && operand_maybe_stack_pointer (aarch64_operands + operand->type)
599	  && operand->reg.regno == 31);
600}
601
602/* Return 1 if OPERAND is XZR or WZP.  */
603int
604aarch64_zero_register_p (const aarch64_opnd_info *operand)
605{
606  return ((aarch64_get_operand_class (operand->type)
607	   == AARCH64_OPND_CLASS_INT_REG)
608	  && !operand_maybe_stack_pointer (aarch64_operands + operand->type)
609	  && operand->reg.regno == 31);
610}
611
612/* Return true if the operand *OPERAND that has the operand code
613   OPERAND->TYPE and been qualified by OPERAND->QUALIFIER can be also
614   qualified by the qualifier TARGET.  */
615
616static inline int
617operand_also_qualified_p (const struct aarch64_opnd_info *operand,
618			  aarch64_opnd_qualifier_t target)
619{
620  switch (operand->qualifier)
621    {
622    case AARCH64_OPND_QLF_W:
623      if (target == AARCH64_OPND_QLF_WSP && aarch64_stack_pointer_p (operand))
624	return 1;
625      break;
626    case AARCH64_OPND_QLF_X:
627      if (target == AARCH64_OPND_QLF_SP && aarch64_stack_pointer_p (operand))
628	return 1;
629      break;
630    case AARCH64_OPND_QLF_WSP:
631      if (target == AARCH64_OPND_QLF_W
632	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
633	return 1;
634      break;
635    case AARCH64_OPND_QLF_SP:
636      if (target == AARCH64_OPND_QLF_X
637	  && operand_maybe_stack_pointer (aarch64_operands + operand->type))
638	return 1;
639      break;
640    default:
641      break;
642    }
643
644  return 0;
645}
646
647/* Given qualifier sequence list QSEQ_LIST and the known qualifier KNOWN_QLF
648   for operand KNOWN_IDX, return the expected qualifier for operand IDX.
649
650   Return NIL if more than one expected qualifiers are found.  */
651
652aarch64_opnd_qualifier_t
653aarch64_get_expected_qualifier (const aarch64_opnd_qualifier_seq_t *qseq_list,
654				int idx,
655				const aarch64_opnd_qualifier_t known_qlf,
656				int known_idx)
657{
658  int i, saved_i;
659
660  /* Special case.
661
662     When the known qualifier is NIL, we have to assume that there is only
663     one qualifier sequence in the *QSEQ_LIST and return the corresponding
664     qualifier directly.  One scenario is that for instruction
665	PRFM <prfop>, [<Xn|SP>, #:lo12:<symbol>]
666     which has only one possible valid qualifier sequence
667	NIL, S_D
668     the caller may pass NIL in KNOWN_QLF to obtain S_D so that it can
669     determine the correct relocation type (i.e. LDST64_LO12) for PRFM.
670
671     Because the qualifier NIL has dual roles in the qualifier sequence:
672     it can mean no qualifier for the operand, or the qualifer sequence is
673     not in use (when all qualifiers in the sequence are NILs), we have to
674     handle this special case here.  */
675  if (known_qlf == AARCH64_OPND_NIL)
676    {
677      assert (qseq_list[0][known_idx] == AARCH64_OPND_NIL);
678      return qseq_list[0][idx];
679    }
680
681  for (i = 0, saved_i = -1; i < AARCH64_MAX_QLF_SEQ_NUM; ++i)
682    {
683      if (qseq_list[i][known_idx] == known_qlf)
684	{
685	  if (saved_i != -1)
686	    /* More than one sequences are found to have KNOWN_QLF at
687	       KNOWN_IDX.  */
688	    return AARCH64_OPND_NIL;
689	  saved_i = i;
690	}
691    }
692
693  return qseq_list[saved_i][idx];
694}
695
696enum operand_qualifier_kind
697{
698  OQK_NIL,
699  OQK_OPD_VARIANT,
700  OQK_VALUE_IN_RANGE,
701  OQK_MISC,
702};
703
704/* Operand qualifier description.  */
705struct operand_qualifier_data
706{
707  /* The usage of the three data fields depends on the qualifier kind.  */
708  int data0;
709  int data1;
710  int data2;
711  /* Description.  */
712  const char *desc;
713  /* Kind.  */
714  enum operand_qualifier_kind kind;
715};
716
717/* Indexed by the operand qualifier enumerators.  */
718struct operand_qualifier_data aarch64_opnd_qualifiers[] =
719{
720  {0, 0, 0, "NIL", OQK_NIL},
721
722  /* Operand variant qualifiers.
723     First 3 fields:
724     element size, number of elements and common value for encoding.  */
725
726  {4, 1, 0x0, "w", OQK_OPD_VARIANT},
727  {8, 1, 0x1, "x", OQK_OPD_VARIANT},
728  {4, 1, 0x0, "wsp", OQK_OPD_VARIANT},
729  {8, 1, 0x1, "sp", OQK_OPD_VARIANT},
730
731  {1, 1, 0x0, "b", OQK_OPD_VARIANT},
732  {2, 1, 0x1, "h", OQK_OPD_VARIANT},
733  {4, 1, 0x2, "s", OQK_OPD_VARIANT},
734  {8, 1, 0x3, "d", OQK_OPD_VARIANT},
735  {16, 1, 0x4, "q", OQK_OPD_VARIANT},
736  {4, 1, 0x0, "4b", OQK_OPD_VARIANT},
737  {4, 1, 0x0, "2h", OQK_OPD_VARIANT},
738
739  {1, 4, 0x0, "4b", OQK_OPD_VARIANT},
740  {1, 8, 0x0, "8b", OQK_OPD_VARIANT},
741  {1, 16, 0x1, "16b", OQK_OPD_VARIANT},
742  {2, 2, 0x0, "2h", OQK_OPD_VARIANT},
743  {2, 4, 0x2, "4h", OQK_OPD_VARIANT},
744  {2, 8, 0x3, "8h", OQK_OPD_VARIANT},
745  {4, 2, 0x4, "2s", OQK_OPD_VARIANT},
746  {4, 4, 0x5, "4s", OQK_OPD_VARIANT},
747  {8, 1, 0x6, "1d", OQK_OPD_VARIANT},
748  {8, 2, 0x7, "2d", OQK_OPD_VARIANT},
749  {16, 1, 0x8, "1q", OQK_OPD_VARIANT},
750
751  {0, 0, 0, "z", OQK_OPD_VARIANT},
752  {0, 0, 0, "m", OQK_OPD_VARIANT},
753
754  /* Qualifier for scaled immediate for Tag granule (stg,st2g,etc).  */
755  {16, 0, 0, "tag", OQK_OPD_VARIANT},
756
757  /* Qualifiers constraining the value range.
758     First 3 fields:
759     Lower bound, higher bound, unused.  */
760
761  {0, 15, 0, "CR",       OQK_VALUE_IN_RANGE},
762  {0,  7, 0, "imm_0_7" , OQK_VALUE_IN_RANGE},
763  {0, 15, 0, "imm_0_15", OQK_VALUE_IN_RANGE},
764  {0, 31, 0, "imm_0_31", OQK_VALUE_IN_RANGE},
765  {0, 63, 0, "imm_0_63", OQK_VALUE_IN_RANGE},
766  {1, 32, 0, "imm_1_32", OQK_VALUE_IN_RANGE},
767  {1, 64, 0, "imm_1_64", OQK_VALUE_IN_RANGE},
768
769  /* Qualifiers for miscellaneous purpose.
770     First 3 fields:
771     unused, unused and unused.  */
772
773  {0, 0, 0, "lsl", 0},
774  {0, 0, 0, "msl", 0},
775
776  {0, 0, 0, "retrieving", 0},
777};
778
779static inline bool
780operand_variant_qualifier_p (aarch64_opnd_qualifier_t qualifier)
781{
782  return aarch64_opnd_qualifiers[qualifier].kind == OQK_OPD_VARIANT;
783}
784
785static inline bool
786qualifier_value_in_range_constraint_p (aarch64_opnd_qualifier_t qualifier)
787{
788  return aarch64_opnd_qualifiers[qualifier].kind == OQK_VALUE_IN_RANGE;
789}
790
791const char*
792aarch64_get_qualifier_name (aarch64_opnd_qualifier_t qualifier)
793{
794  return aarch64_opnd_qualifiers[qualifier].desc;
795}
796
797/* Given an operand qualifier, return the expected data element size
798   of a qualified operand.  */
799unsigned char
800aarch64_get_qualifier_esize (aarch64_opnd_qualifier_t qualifier)
801{
802  assert (operand_variant_qualifier_p (qualifier));
803  return aarch64_opnd_qualifiers[qualifier].data0;
804}
805
806unsigned char
807aarch64_get_qualifier_nelem (aarch64_opnd_qualifier_t qualifier)
808{
809  assert (operand_variant_qualifier_p (qualifier));
810  return aarch64_opnd_qualifiers[qualifier].data1;
811}
812
813aarch64_insn
814aarch64_get_qualifier_standard_value (aarch64_opnd_qualifier_t qualifier)
815{
816  assert (operand_variant_qualifier_p (qualifier));
817  return aarch64_opnd_qualifiers[qualifier].data2;
818}
819
820static int
821get_lower_bound (aarch64_opnd_qualifier_t qualifier)
822{
823  assert (qualifier_value_in_range_constraint_p (qualifier));
824  return aarch64_opnd_qualifiers[qualifier].data0;
825}
826
827static int
828get_upper_bound (aarch64_opnd_qualifier_t qualifier)
829{
830  assert (qualifier_value_in_range_constraint_p (qualifier));
831  return aarch64_opnd_qualifiers[qualifier].data1;
832}
833
834#ifdef DEBUG_AARCH64
835void
836aarch64_verbose (const char *str, ...)
837{
838  va_list ap;
839  va_start (ap, str);
840  printf ("#### ");
841  vprintf (str, ap);
842  printf ("\n");
843  va_end (ap);
844}
845
846static inline void
847dump_qualifier_sequence (const aarch64_opnd_qualifier_t *qualifier)
848{
849  int i;
850  printf ("#### \t");
851  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i, ++qualifier)
852    printf ("%s,", aarch64_get_qualifier_name (*qualifier));
853  printf ("\n");
854}
855
856static void
857dump_match_qualifiers (const struct aarch64_opnd_info *opnd,
858		       const aarch64_opnd_qualifier_t *qualifier)
859{
860  int i;
861  aarch64_opnd_qualifier_t curr[AARCH64_MAX_OPND_NUM];
862
863  aarch64_verbose ("dump_match_qualifiers:");
864  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
865    curr[i] = opnd[i].qualifier;
866  dump_qualifier_sequence (curr);
867  aarch64_verbose ("against");
868  dump_qualifier_sequence (qualifier);
869}
870#endif /* DEBUG_AARCH64 */
871
872/* This function checks if the given instruction INSN is a destructive
873   instruction based on the usage of the registers.  It does not recognize
874   unary destructive instructions.  */
875bool
876aarch64_is_destructive_by_operands (const aarch64_opcode *opcode)
877{
878  int i = 0;
879  const enum aarch64_opnd *opnds = opcode->operands;
880
881  if (opnds[0] == AARCH64_OPND_NIL)
882    return false;
883
884  while (opnds[++i] != AARCH64_OPND_NIL)
885    if (opnds[i] == opnds[0])
886      return true;
887
888  return false;
889}
890
891/* TODO improve this, we can have an extra field at the runtime to
892   store the number of operands rather than calculating it every time.  */
893
894int
895aarch64_num_of_operands (const aarch64_opcode *opcode)
896{
897  int i = 0;
898  const enum aarch64_opnd *opnds = opcode->operands;
899  while (opnds[i++] != AARCH64_OPND_NIL)
900    ;
901  --i;
902  assert (i >= 0 && i <= AARCH64_MAX_OPND_NUM);
903  return i;
904}
905
906/* Find the best matched qualifier sequence in *QUALIFIERS_LIST for INST.
907   If succeeds, fill the found sequence in *RET, return 1; otherwise return 0.
908
909   N.B. on the entry, it is very likely that only some operands in *INST
910   have had their qualifiers been established.
911
912   If STOP_AT is not -1, the function will only try to match
913   the qualifier sequence for operands before and including the operand
914   of index STOP_AT; and on success *RET will only be filled with the first
915   (STOP_AT+1) qualifiers.
916
917   A couple examples of the matching algorithm:
918
919   X,W,NIL should match
920   X,W,NIL
921
922   NIL,NIL should match
923   X  ,NIL
924
925   Apart from serving the main encoding routine, this can also be called
926   during or after the operand decoding.  */
927
928int
929aarch64_find_best_match (const aarch64_inst *inst,
930			 const aarch64_opnd_qualifier_seq_t *qualifiers_list,
931			 int stop_at, aarch64_opnd_qualifier_t *ret)
932{
933  int found = 0;
934  int i, num_opnds;
935  const aarch64_opnd_qualifier_t *qualifiers;
936
937  num_opnds = aarch64_num_of_operands (inst->opcode);
938  if (num_opnds == 0)
939    {
940      DEBUG_TRACE ("SUCCEED: no operand");
941      return 1;
942    }
943
944  if (stop_at < 0 || stop_at >= num_opnds)
945    stop_at = num_opnds - 1;
946
947  /* For each pattern.  */
948  for (i = 0; i < AARCH64_MAX_QLF_SEQ_NUM; ++i, ++qualifiers_list)
949    {
950      int j;
951      qualifiers = *qualifiers_list;
952
953      /* Start as positive.  */
954      found = 1;
955
956      DEBUG_TRACE ("%d", i);
957#ifdef DEBUG_AARCH64
958      if (debug_dump)
959	dump_match_qualifiers (inst->operands, qualifiers);
960#endif
961
962      /* The first entry should be taken literally, even if it's an empty
963	 qualifier sequence.  (This matters for strict testing.)  In other
964	 positions an empty sequence acts as a terminator.  */
965      if (i > 0 && empty_qualifier_sequence_p (qualifiers))
966	{
967	  found = 0;
968	  break;
969	}
970
971      for (j = 0; j < num_opnds && j <= stop_at; ++j, ++qualifiers)
972	{
973	  if (inst->operands[j].qualifier == AARCH64_OPND_QLF_NIL
974	      && !(inst->opcode->flags & F_STRICT))
975	    {
976	      /* Either the operand does not have qualifier, or the qualifier
977		 for the operand needs to be deduced from the qualifier
978		 sequence.
979		 In the latter case, any constraint checking related with
980		 the obtained qualifier should be done later in
981		 operand_general_constraint_met_p.  */
982	      continue;
983	    }
984	  else if (*qualifiers != inst->operands[j].qualifier)
985	    {
986	      /* Unless the target qualifier can also qualify the operand
987		 (which has already had a non-nil qualifier), non-equal
988		 qualifiers are generally un-matched.  */
989	      if (operand_also_qualified_p (inst->operands + j, *qualifiers))
990		continue;
991	      else
992		{
993		  found = 0;
994		  break;
995		}
996	    }
997	  else
998	    continue;	/* Equal qualifiers are certainly matched.  */
999	}
1000
1001      /* Qualifiers established.  */
1002      if (found == 1)
1003	break;
1004    }
1005
1006  if (found == 1)
1007    {
1008      /* Fill the result in *RET.  */
1009      int j;
1010      qualifiers = *qualifiers_list;
1011
1012      DEBUG_TRACE ("complete qualifiers using list %d", i);
1013#ifdef DEBUG_AARCH64
1014      if (debug_dump)
1015	dump_qualifier_sequence (qualifiers);
1016#endif
1017
1018      for (j = 0; j <= stop_at; ++j, ++qualifiers)
1019	ret[j] = *qualifiers;
1020      for (; j < AARCH64_MAX_OPND_NUM; ++j)
1021	ret[j] = AARCH64_OPND_QLF_NIL;
1022
1023      DEBUG_TRACE ("SUCCESS");
1024      return 1;
1025    }
1026
1027  DEBUG_TRACE ("FAIL");
1028  return 0;
1029}
1030
1031/* Operand qualifier matching and resolving.
1032
1033   Return 1 if the operand qualifier(s) in *INST match one of the qualifier
1034   sequences in INST->OPCODE->qualifiers_list; otherwise return 0.
1035
1036   if UPDATE_P, update the qualifier(s) in *INST after the matching
1037   succeeds.  */
1038
1039static int
1040match_operands_qualifier (aarch64_inst *inst, bool update_p)
1041{
1042  int i;
1043  aarch64_opnd_qualifier_seq_t qualifiers;
1044
1045  if (!aarch64_find_best_match (inst, inst->opcode->qualifiers_list, -1,
1046			       qualifiers))
1047    {
1048      DEBUG_TRACE ("matching FAIL");
1049      return 0;
1050    }
1051
1052  /* Update the qualifiers.  */
1053  if (update_p)
1054    for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
1055      {
1056	if (inst->opcode->operands[i] == AARCH64_OPND_NIL)
1057	  break;
1058	DEBUG_TRACE_IF (inst->operands[i].qualifier != qualifiers[i],
1059			"update %s with %s for operand %d",
1060			aarch64_get_qualifier_name (inst->operands[i].qualifier),
1061			aarch64_get_qualifier_name (qualifiers[i]), i);
1062	inst->operands[i].qualifier = qualifiers[i];
1063      }
1064
1065  DEBUG_TRACE ("matching SUCCESS");
1066  return 1;
1067}
1068
1069/* Return TRUE if VALUE is a wide constant that can be moved into a general
1070   register by MOVZ.
1071
1072   IS32 indicates whether value is a 32-bit immediate or not.
1073   If SHIFT_AMOUNT is not NULL, on the return of TRUE, the logical left shift
1074   amount will be returned in *SHIFT_AMOUNT.  */
1075
1076bool
1077aarch64_wide_constant_p (uint64_t value, int is32, unsigned int *shift_amount)
1078{
1079  int amount;
1080
1081  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1082
1083  if (is32)
1084    {
1085      /* Allow all zeros or all ones in top 32-bits, so that
1086	 32-bit constant expressions like ~0x80000000 are
1087	 permitted.  */
1088      if (value >> 32 != 0 && value >> 32 != 0xffffffff)
1089	/* Immediate out of range.  */
1090	return false;
1091      value &= 0xffffffff;
1092    }
1093
1094  /* first, try movz then movn */
1095  amount = -1;
1096  if ((value & ((uint64_t) 0xffff << 0)) == value)
1097    amount = 0;
1098  else if ((value & ((uint64_t) 0xffff << 16)) == value)
1099    amount = 16;
1100  else if (!is32 && (value & ((uint64_t) 0xffff << 32)) == value)
1101    amount = 32;
1102  else if (!is32 && (value & ((uint64_t) 0xffff << 48)) == value)
1103    amount = 48;
1104
1105  if (amount == -1)
1106    {
1107      DEBUG_TRACE ("exit false with 0x%" PRIx64 "(%" PRIi64 ")", value, value);
1108      return false;
1109    }
1110
1111  if (shift_amount != NULL)
1112    *shift_amount = amount;
1113
1114  DEBUG_TRACE ("exit true with amount %d", amount);
1115
1116  return true;
1117}
1118
1119/* Build the accepted values for immediate logical SIMD instructions.
1120
1121   The standard encodings of the immediate value are:
1122     N      imms     immr         SIMD size  R             S
1123     1      ssssss   rrrrrr       64      UInt(rrrrrr)  UInt(ssssss)
1124     0      0sssss   0rrrrr       32      UInt(rrrrr)   UInt(sssss)
1125     0      10ssss   00rrrr       16      UInt(rrrr)    UInt(ssss)
1126     0      110sss   000rrr       8       UInt(rrr)     UInt(sss)
1127     0      1110ss   0000rr       4       UInt(rr)      UInt(ss)
1128     0      11110s   00000r       2       UInt(r)       UInt(s)
1129   where all-ones value of S is reserved.
1130
1131   Let's call E the SIMD size.
1132
1133   The immediate value is: S+1 bits '1' rotated to the right by R.
1134
1135   The total of valid encodings is 64*63 + 32*31 + ... + 2*1 = 5334
1136   (remember S != E - 1).  */
1137
1138#define TOTAL_IMM_NB  5334
1139
1140typedef struct
1141{
1142  uint64_t imm;
1143  aarch64_insn encoding;
1144} simd_imm_encoding;
1145
1146static simd_imm_encoding simd_immediates[TOTAL_IMM_NB];
1147
1148static int
1149simd_imm_encoding_cmp(const void *i1, const void *i2)
1150{
1151  const simd_imm_encoding *imm1 = (const simd_imm_encoding *)i1;
1152  const simd_imm_encoding *imm2 = (const simd_imm_encoding *)i2;
1153
1154  if (imm1->imm < imm2->imm)
1155    return -1;
1156  if (imm1->imm > imm2->imm)
1157    return +1;
1158  return 0;
1159}
1160
1161/* immediate bitfield standard encoding
1162   imm13<12> imm13<5:0> imm13<11:6> SIMD size R      S
1163   1         ssssss     rrrrrr      64        rrrrrr ssssss
1164   0         0sssss     0rrrrr      32        rrrrr  sssss
1165   0         10ssss     00rrrr      16        rrrr   ssss
1166   0         110sss     000rrr      8         rrr    sss
1167   0         1110ss     0000rr      4         rr     ss
1168   0         11110s     00000r      2         r      s  */
1169static inline int
1170encode_immediate_bitfield (int is64, uint32_t s, uint32_t r)
1171{
1172  return (is64 << 12) | (r << 6) | s;
1173}
1174
1175static void
1176build_immediate_table (void)
1177{
1178  uint32_t log_e, e, s, r, s_mask;
1179  uint64_t mask, imm;
1180  int nb_imms;
1181  int is64;
1182
1183  nb_imms = 0;
1184  for (log_e = 1; log_e <= 6; log_e++)
1185    {
1186      /* Get element size.  */
1187      e = 1u << log_e;
1188      if (log_e == 6)
1189	{
1190	  is64 = 1;
1191	  mask = 0xffffffffffffffffull;
1192	  s_mask = 0;
1193	}
1194      else
1195	{
1196	  is64 = 0;
1197	  mask = (1ull << e) - 1;
1198	  /* log_e  s_mask
1199	     1     ((1 << 4) - 1) << 2 = 111100
1200	     2     ((1 << 3) - 1) << 3 = 111000
1201	     3     ((1 << 2) - 1) << 4 = 110000
1202	     4     ((1 << 1) - 1) << 5 = 100000
1203	     5     ((1 << 0) - 1) << 6 = 000000  */
1204	  s_mask = ((1u << (5 - log_e)) - 1) << (log_e + 1);
1205	}
1206      for (s = 0; s < e - 1; s++)
1207	for (r = 0; r < e; r++)
1208	  {
1209	    /* s+1 consecutive bits to 1 (s < 63) */
1210	    imm = (1ull << (s + 1)) - 1;
1211	    /* rotate right by r */
1212	    if (r != 0)
1213	      imm = (imm >> r) | ((imm << (e - r)) & mask);
1214	    /* replicate the constant depending on SIMD size */
1215	    switch (log_e)
1216	      {
1217	      case 1: imm = (imm <<  2) | imm;
1218		/* Fall through.  */
1219	      case 2: imm = (imm <<  4) | imm;
1220		/* Fall through.  */
1221	      case 3: imm = (imm <<  8) | imm;
1222		/* Fall through.  */
1223	      case 4: imm = (imm << 16) | imm;
1224		/* Fall through.  */
1225	      case 5: imm = (imm << 32) | imm;
1226		/* Fall through.  */
1227	      case 6: break;
1228	      default: abort ();
1229	      }
1230	    simd_immediates[nb_imms].imm = imm;
1231	    simd_immediates[nb_imms].encoding =
1232	      encode_immediate_bitfield(is64, s | s_mask, r);
1233	    nb_imms++;
1234	  }
1235    }
1236  assert (nb_imms == TOTAL_IMM_NB);
1237  qsort(simd_immediates, nb_imms,
1238	sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1239}
1240
1241/* Return TRUE if VALUE is a valid logical immediate, i.e. bitmask, that can
1242   be accepted by logical (immediate) instructions
1243   e.g. ORR <Xd|SP>, <Xn>, #<imm>.
1244
1245   ESIZE is the number of bytes in the decoded immediate value.
1246   If ENCODING is not NULL, on the return of TRUE, the standard encoding for
1247   VALUE will be returned in *ENCODING.  */
1248
1249bool
1250aarch64_logical_immediate_p (uint64_t value, int esize, aarch64_insn *encoding)
1251{
1252  simd_imm_encoding imm_enc;
1253  const simd_imm_encoding *imm_encoding;
1254  static bool initialized = false;
1255  uint64_t upper;
1256  int i;
1257
1258  DEBUG_TRACE ("enter with 0x%" PRIx64 "(%" PRIi64 "), esize: %d", value,
1259	       value, esize);
1260
1261  if (!initialized)
1262    {
1263      build_immediate_table ();
1264      initialized = true;
1265    }
1266
1267  /* Allow all zeros or all ones in top bits, so that
1268     constant expressions like ~1 are permitted.  */
1269  upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
1270  if ((value & ~upper) != value && (value | upper) != value)
1271    return false;
1272
1273  /* Replicate to a full 64-bit value.  */
1274  value &= ~upper;
1275  for (i = esize * 8; i < 64; i *= 2)
1276    value |= (value << i);
1277
1278  imm_enc.imm = value;
1279  imm_encoding = (const simd_imm_encoding *)
1280    bsearch(&imm_enc, simd_immediates, TOTAL_IMM_NB,
1281            sizeof(simd_immediates[0]), simd_imm_encoding_cmp);
1282  if (imm_encoding == NULL)
1283    {
1284      DEBUG_TRACE ("exit with false");
1285      return false;
1286    }
1287  if (encoding != NULL)
1288    *encoding = imm_encoding->encoding;
1289  DEBUG_TRACE ("exit with true");
1290  return true;
1291}
1292
1293/* If 64-bit immediate IMM is in the format of
1294   "aaaaaaaabbbbbbbbccccccccddddddddeeeeeeeeffffffffgggggggghhhhhhhh",
1295   where a, b, c, d, e, f, g and h are independently 0 or 1, return an integer
1296   of value "abcdefgh".  Otherwise return -1.  */
1297int
1298aarch64_shrink_expanded_imm8 (uint64_t imm)
1299{
1300  int i, ret;
1301  uint32_t byte;
1302
1303  ret = 0;
1304  for (i = 0; i < 8; i++)
1305    {
1306      byte = (imm >> (8 * i)) & 0xff;
1307      if (byte == 0xff)
1308	ret |= 1 << i;
1309      else if (byte != 0x00)
1310	return -1;
1311    }
1312  return ret;
1313}
1314
1315/* Utility inline functions for operand_general_constraint_met_p.  */
1316
1317static inline void
1318set_error (aarch64_operand_error *mismatch_detail,
1319	   enum aarch64_operand_error_kind kind, int idx,
1320	   const char* error)
1321{
1322  if (mismatch_detail == NULL)
1323    return;
1324  mismatch_detail->kind = kind;
1325  mismatch_detail->index = idx;
1326  mismatch_detail->error = error;
1327}
1328
1329static inline void
1330set_syntax_error (aarch64_operand_error *mismatch_detail, int idx,
1331		  const char* error)
1332{
1333  if (mismatch_detail == NULL)
1334    return;
1335  set_error (mismatch_detail, AARCH64_OPDE_SYNTAX_ERROR, idx, error);
1336}
1337
1338static inline void
1339set_out_of_range_error (aarch64_operand_error *mismatch_detail,
1340			int idx, int lower_bound, int upper_bound,
1341			const char* error)
1342{
1343  if (mismatch_detail == NULL)
1344    return;
1345  set_error (mismatch_detail, AARCH64_OPDE_OUT_OF_RANGE, idx, error);
1346  mismatch_detail->data[0].i = lower_bound;
1347  mismatch_detail->data[1].i = upper_bound;
1348}
1349
1350static inline void
1351set_imm_out_of_range_error (aarch64_operand_error *mismatch_detail,
1352			    int idx, int lower_bound, int upper_bound)
1353{
1354  if (mismatch_detail == NULL)
1355    return;
1356  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1357			  _("immediate value"));
1358}
1359
1360static inline void
1361set_offset_out_of_range_error (aarch64_operand_error *mismatch_detail,
1362			       int idx, int lower_bound, int upper_bound)
1363{
1364  if (mismatch_detail == NULL)
1365    return;
1366  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1367			  _("immediate offset"));
1368}
1369
1370static inline void
1371set_regno_out_of_range_error (aarch64_operand_error *mismatch_detail,
1372			      int idx, int lower_bound, int upper_bound)
1373{
1374  if (mismatch_detail == NULL)
1375    return;
1376  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1377			  _("register number"));
1378}
1379
1380static inline void
1381set_elem_idx_out_of_range_error (aarch64_operand_error *mismatch_detail,
1382				 int idx, int lower_bound, int upper_bound)
1383{
1384  if (mismatch_detail == NULL)
1385    return;
1386  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1387			  _("register element index"));
1388}
1389
1390static inline void
1391set_sft_amount_out_of_range_error (aarch64_operand_error *mismatch_detail,
1392				   int idx, int lower_bound, int upper_bound)
1393{
1394  if (mismatch_detail == NULL)
1395    return;
1396  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1397			  _("shift amount"));
1398}
1399
1400/* Report that the MUL modifier in operand IDX should be in the range
1401   [LOWER_BOUND, UPPER_BOUND].  */
1402static inline void
1403set_multiplier_out_of_range_error (aarch64_operand_error *mismatch_detail,
1404				   int idx, int lower_bound, int upper_bound)
1405{
1406  if (mismatch_detail == NULL)
1407    return;
1408  set_out_of_range_error (mismatch_detail, idx, lower_bound, upper_bound,
1409			  _("multiplier"));
1410}
1411
1412static inline void
1413set_unaligned_error (aarch64_operand_error *mismatch_detail, int idx,
1414		     int alignment)
1415{
1416  if (mismatch_detail == NULL)
1417    return;
1418  set_error (mismatch_detail, AARCH64_OPDE_UNALIGNED, idx, NULL);
1419  mismatch_detail->data[0].i = alignment;
1420}
1421
1422static inline void
1423set_reg_list_error (aarch64_operand_error *mismatch_detail, int idx,
1424		    int expected_num)
1425{
1426  if (mismatch_detail == NULL)
1427    return;
1428  set_error (mismatch_detail, AARCH64_OPDE_REG_LIST, idx, NULL);
1429  mismatch_detail->data[0].i = expected_num;
1430}
1431
1432static inline void
1433set_other_error (aarch64_operand_error *mismatch_detail, int idx,
1434		 const char* error)
1435{
1436  if (mismatch_detail == NULL)
1437    return;
1438  set_error (mismatch_detail, AARCH64_OPDE_OTHER_ERROR, idx, error);
1439}
1440
1441/* General constraint checking based on operand code.
1442
1443   Return 1 if OPNDS[IDX] meets the general constraint of operand code TYPE
1444   as the IDXth operand of opcode OPCODE.  Otherwise return 0.
1445
1446   This function has to be called after the qualifiers for all operands
1447   have been resolved.
1448
1449   Mismatching error message is returned in *MISMATCH_DETAIL upon request,
1450   i.e. when MISMATCH_DETAIL is non-NULL.  This avoids the generation
1451   of error message during the disassembling where error message is not
1452   wanted.  We avoid the dynamic construction of strings of error messages
1453   here (i.e. in libopcodes), as it is costly and complicated; instead, we
1454   use a combination of error code, static string and some integer data to
1455   represent an error.  */
1456
1457static int
1458operand_general_constraint_met_p (const aarch64_opnd_info *opnds, int idx,
1459				  enum aarch64_opnd type,
1460				  const aarch64_opcode *opcode,
1461				  aarch64_operand_error *mismatch_detail)
1462{
1463  unsigned num, modifiers, shift;
1464  unsigned char size;
1465  int64_t imm, min_value, max_value;
1466  uint64_t uvalue, mask;
1467  const aarch64_opnd_info *opnd = opnds + idx;
1468  aarch64_opnd_qualifier_t qualifier = opnd->qualifier;
1469  int i;
1470
1471  assert (opcode->operands[idx] == opnd->type && opnd->type == type);
1472
1473  switch (aarch64_operands[type].op_class)
1474    {
1475    case AARCH64_OPND_CLASS_INT_REG:
1476      /* Check pair reg constraints for cas* instructions.  */
1477      if (type == AARCH64_OPND_PAIRREG)
1478	{
1479	  assert (idx == 1 || idx == 3);
1480	  if (opnds[idx - 1].reg.regno % 2 != 0)
1481	    {
1482	      set_syntax_error (mismatch_detail, idx - 1,
1483				_("reg pair must start from even reg"));
1484	      return 0;
1485	    }
1486	  if (opnds[idx].reg.regno != opnds[idx - 1].reg.regno + 1)
1487	    {
1488	      set_syntax_error (mismatch_detail, idx,
1489				_("reg pair must be contiguous"));
1490	      return 0;
1491	    }
1492	  break;
1493	}
1494
1495      /* <Xt> may be optional in some IC and TLBI instructions.  */
1496      if (type == AARCH64_OPND_Rt_SYS)
1497	{
1498	  assert (idx == 1 && (aarch64_get_operand_class (opnds[0].type)
1499			       == AARCH64_OPND_CLASS_SYSTEM));
1500	  if (opnds[1].present
1501	      && !aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1502	    {
1503	      set_other_error (mismatch_detail, idx, _("extraneous register"));
1504	      return 0;
1505	    }
1506	  if (!opnds[1].present
1507	      && aarch64_sys_ins_reg_has_xt (opnds[0].sysins_op))
1508	    {
1509	      set_other_error (mismatch_detail, idx, _("missing register"));
1510	      return 0;
1511	    }
1512	}
1513      switch (qualifier)
1514	{
1515	case AARCH64_OPND_QLF_WSP:
1516	case AARCH64_OPND_QLF_SP:
1517	  if (!aarch64_stack_pointer_p (opnd))
1518	    {
1519	      set_other_error (mismatch_detail, idx,
1520		       _("stack pointer register expected"));
1521	      return 0;
1522	    }
1523	  break;
1524	default:
1525	  break;
1526	}
1527      break;
1528
1529    case AARCH64_OPND_CLASS_SVE_REG:
1530      switch (type)
1531	{
1532	case AARCH64_OPND_SVE_Zm3_INDEX:
1533	case AARCH64_OPND_SVE_Zm3_22_INDEX:
1534	case AARCH64_OPND_SVE_Zm3_11_INDEX:
1535	case AARCH64_OPND_SVE_Zm4_11_INDEX:
1536	case AARCH64_OPND_SVE_Zm4_INDEX:
1537	  size = get_operand_fields_width (get_operand_from_code (type));
1538	  shift = get_operand_specific_data (&aarch64_operands[type]);
1539	  mask = (1 << shift) - 1;
1540	  if (opnd->reg.regno > mask)
1541	    {
1542	      assert (mask == 7 || mask == 15);
1543	      set_other_error (mismatch_detail, idx,
1544			       mask == 15
1545			       ? _("z0-z15 expected")
1546			       : _("z0-z7 expected"));
1547	      return 0;
1548	    }
1549	  mask = (1u << (size - shift)) - 1;
1550	  if (!value_in_range_p (opnd->reglane.index, 0, mask))
1551	    {
1552	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, mask);
1553	      return 0;
1554	    }
1555	  break;
1556
1557	case AARCH64_OPND_SVE_Zn_INDEX:
1558	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1559	  if (!value_in_range_p (opnd->reglane.index, 0, 64 / size - 1))
1560	    {
1561	      set_elem_idx_out_of_range_error (mismatch_detail, idx,
1562					       0, 64 / size - 1);
1563	      return 0;
1564	    }
1565	  break;
1566
1567	case AARCH64_OPND_SVE_ZnxN:
1568	case AARCH64_OPND_SVE_ZtxN:
1569	  if (opnd->reglist.num_regs != get_opcode_dependent_value (opcode))
1570	    {
1571	      set_other_error (mismatch_detail, idx,
1572			       _("invalid register list"));
1573	      return 0;
1574	    }
1575	  break;
1576
1577	default:
1578	  break;
1579	}
1580      break;
1581
1582    case AARCH64_OPND_CLASS_PRED_REG:
1583      if (opnd->reg.regno >= 8
1584	  && get_operand_fields_width (get_operand_from_code (type)) == 3)
1585	{
1586	  set_other_error (mismatch_detail, idx, _("p0-p7 expected"));
1587	  return 0;
1588	}
1589      break;
1590
1591    case AARCH64_OPND_CLASS_COND:
1592      if (type == AARCH64_OPND_COND1
1593	  && (opnds[idx].cond->value & 0xe) == 0xe)
1594	{
1595	  /* Not allow AL or NV.  */
1596	  set_syntax_error (mismatch_detail, idx, NULL);
1597	}
1598      break;
1599
1600    case AARCH64_OPND_CLASS_ADDRESS:
1601      /* Check writeback.  */
1602      switch (opcode->iclass)
1603	{
1604	case ldst_pos:
1605	case ldst_unscaled:
1606	case ldstnapair_offs:
1607	case ldstpair_off:
1608	case ldst_unpriv:
1609	  if (opnd->addr.writeback == 1)
1610	    {
1611	      set_syntax_error (mismatch_detail, idx,
1612				_("unexpected address writeback"));
1613	      return 0;
1614	    }
1615	  break;
1616	case ldst_imm10:
1617	  if (opnd->addr.writeback == 1 && opnd->addr.preind != 1)
1618	    {
1619	      set_syntax_error (mismatch_detail, idx,
1620				_("unexpected address writeback"));
1621	      return 0;
1622	    }
1623	  break;
1624	case ldst_imm9:
1625	case ldstpair_indexed:
1626	case asisdlsep:
1627	case asisdlsop:
1628	  if (opnd->addr.writeback == 0)
1629	    {
1630	      set_syntax_error (mismatch_detail, idx,
1631				_("address writeback expected"));
1632	      return 0;
1633	    }
1634	  break;
1635	default:
1636	  assert (opnd->addr.writeback == 0);
1637	  break;
1638	}
1639      switch (type)
1640	{
1641	case AARCH64_OPND_ADDR_SIMM7:
1642	  /* Scaled signed 7 bits immediate offset.  */
1643	  /* Get the size of the data element that is accessed, which may be
1644	     different from that of the source register size,
1645	     e.g. in strb/ldrb.  */
1646	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1647	  if (!value_in_range_p (opnd->addr.offset.imm, -64 * size, 63 * size))
1648	    {
1649	      set_offset_out_of_range_error (mismatch_detail, idx,
1650					     -64 * size, 63 * size);
1651	      return 0;
1652	    }
1653	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1654	    {
1655	      set_unaligned_error (mismatch_detail, idx, size);
1656	      return 0;
1657	    }
1658	  break;
1659	case AARCH64_OPND_ADDR_OFFSET:
1660	case AARCH64_OPND_ADDR_SIMM9:
1661	  /* Unscaled signed 9 bits immediate offset.  */
1662	  if (!value_in_range_p (opnd->addr.offset.imm, -256, 255))
1663	    {
1664	      set_offset_out_of_range_error (mismatch_detail, idx, -256, 255);
1665	      return 0;
1666	    }
1667	  break;
1668
1669	case AARCH64_OPND_ADDR_SIMM9_2:
1670	  /* Unscaled signed 9 bits immediate offset, which has to be negative
1671	     or unaligned.  */
1672	  size = aarch64_get_qualifier_esize (qualifier);
1673	  if ((value_in_range_p (opnd->addr.offset.imm, 0, 255)
1674	       && !value_aligned_p (opnd->addr.offset.imm, size))
1675	      || value_in_range_p (opnd->addr.offset.imm, -256, -1))
1676	    return 1;
1677	  set_other_error (mismatch_detail, idx,
1678			   _("negative or unaligned offset expected"));
1679	  return 0;
1680
1681	case AARCH64_OPND_ADDR_SIMM10:
1682	  /* Scaled signed 10 bits immediate offset.  */
1683	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4088))
1684	    {
1685	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4088);
1686	      return 0;
1687	    }
1688	  if (!value_aligned_p (opnd->addr.offset.imm, 8))
1689	    {
1690	      set_unaligned_error (mismatch_detail, idx, 8);
1691	      return 0;
1692	    }
1693	  break;
1694
1695	case AARCH64_OPND_ADDR_SIMM11:
1696	  /* Signed 11 bits immediate offset (multiple of 16).  */
1697	  if (!value_in_range_p (opnd->addr.offset.imm, -1024, 1008))
1698	    {
1699	      set_offset_out_of_range_error (mismatch_detail, idx, -1024, 1008);
1700	      return 0;
1701	    }
1702
1703	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1704	    {
1705	      set_unaligned_error (mismatch_detail, idx, 16);
1706	      return 0;
1707	    }
1708	  break;
1709
1710	case AARCH64_OPND_ADDR_SIMM13:
1711	  /* Signed 13 bits immediate offset (multiple of 16).  */
1712	  if (!value_in_range_p (opnd->addr.offset.imm, -4096, 4080))
1713	    {
1714	      set_offset_out_of_range_error (mismatch_detail, idx, -4096, 4080);
1715	      return 0;
1716	    }
1717
1718	  if (!value_aligned_p (opnd->addr.offset.imm, 16))
1719	    {
1720	      set_unaligned_error (mismatch_detail, idx, 16);
1721	      return 0;
1722	    }
1723	  break;
1724
1725	case AARCH64_OPND_SIMD_ADDR_POST:
1726	  /* AdvSIMD load/store multiple structures, post-index.  */
1727	  assert (idx == 1);
1728	  if (opnd->addr.offset.is_reg)
1729	    {
1730	      if (value_in_range_p (opnd->addr.offset.regno, 0, 30))
1731		return 1;
1732	      else
1733		{
1734		  set_other_error (mismatch_detail, idx,
1735				   _("invalid register offset"));
1736		  return 0;
1737		}
1738	    }
1739	  else
1740	    {
1741	      const aarch64_opnd_info *prev = &opnds[idx-1];
1742	      unsigned num_bytes; /* total number of bytes transferred.  */
1743	      /* The opcode dependent area stores the number of elements in
1744		 each structure to be loaded/stored.  */
1745	      int is_ld1r = get_opcode_dependent_value (opcode) == 1;
1746	      if (opcode->operands[0] == AARCH64_OPND_LVt_AL)
1747		/* Special handling of loading single structure to all lane.  */
1748		num_bytes = (is_ld1r ? 1 : prev->reglist.num_regs)
1749		  * aarch64_get_qualifier_esize (prev->qualifier);
1750	      else
1751		num_bytes = prev->reglist.num_regs
1752		  * aarch64_get_qualifier_esize (prev->qualifier)
1753		  * aarch64_get_qualifier_nelem (prev->qualifier);
1754	      if ((int) num_bytes != opnd->addr.offset.imm)
1755		{
1756		  set_other_error (mismatch_detail, idx,
1757				   _("invalid post-increment amount"));
1758		  return 0;
1759		}
1760	    }
1761	  break;
1762
1763	case AARCH64_OPND_ADDR_REGOFF:
1764	  /* Get the size of the data element that is accessed, which may be
1765	     different from that of the source register size,
1766	     e.g. in strb/ldrb.  */
1767	  size = aarch64_get_qualifier_esize (opnd->qualifier);
1768	  /* It is either no shift or shift by the binary logarithm of SIZE.  */
1769	  if (opnd->shifter.amount != 0
1770	      && opnd->shifter.amount != (int)get_logsz (size))
1771	    {
1772	      set_other_error (mismatch_detail, idx,
1773			       _("invalid shift amount"));
1774	      return 0;
1775	    }
1776	  /* Only UXTW, LSL, SXTW and SXTX are the accepted extending
1777	     operators.  */
1778	  switch (opnd->shifter.kind)
1779	    {
1780	    case AARCH64_MOD_UXTW:
1781	    case AARCH64_MOD_LSL:
1782	    case AARCH64_MOD_SXTW:
1783	    case AARCH64_MOD_SXTX: break;
1784	    default:
1785	      set_other_error (mismatch_detail, idx,
1786			       _("invalid extend/shift operator"));
1787	      return 0;
1788	    }
1789	  break;
1790
1791	case AARCH64_OPND_ADDR_UIMM12:
1792	  imm = opnd->addr.offset.imm;
1793	  /* Get the size of the data element that is accessed, which may be
1794	     different from that of the source register size,
1795	     e.g. in strb/ldrb.  */
1796	  size = aarch64_get_qualifier_esize (qualifier);
1797	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 4095 * size))
1798	    {
1799	      set_offset_out_of_range_error (mismatch_detail, idx,
1800					     0, 4095 * size);
1801	      return 0;
1802	    }
1803	  if (!value_aligned_p (opnd->addr.offset.imm, size))
1804	    {
1805	      set_unaligned_error (mismatch_detail, idx, size);
1806	      return 0;
1807	    }
1808	  break;
1809
1810	case AARCH64_OPND_ADDR_PCREL14:
1811	case AARCH64_OPND_ADDR_PCREL19:
1812	case AARCH64_OPND_ADDR_PCREL21:
1813	case AARCH64_OPND_ADDR_PCREL26:
1814	  imm = opnd->imm.value;
1815	  if (operand_need_shift_by_two (get_operand_from_code (type)))
1816	    {
1817	      /* The offset value in a PC-relative branch instruction is alway
1818		 4-byte aligned and is encoded without the lowest 2 bits.  */
1819	      if (!value_aligned_p (imm, 4))
1820		{
1821		  set_unaligned_error (mismatch_detail, idx, 4);
1822		  return 0;
1823		}
1824	      /* Right shift by 2 so that we can carry out the following check
1825		 canonically.  */
1826	      imm >>= 2;
1827	    }
1828	  size = get_operand_fields_width (get_operand_from_code (type));
1829	  if (!value_fit_signed_field_p (imm, size))
1830	    {
1831	      set_other_error (mismatch_detail, idx,
1832			       _("immediate out of range"));
1833	      return 0;
1834	    }
1835	  break;
1836
1837	case AARCH64_OPND_SME_ADDR_RI_U4xVL:
1838	  if (!value_in_range_p (opnd->addr.offset.imm, 0, 15))
1839	    {
1840	      set_offset_out_of_range_error (mismatch_detail, idx, 0, 15);
1841	      return 0;
1842	    }
1843	  break;
1844
1845	case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
1846	case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
1847	case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
1848	case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
1849	  min_value = -8;
1850	  max_value = 7;
1851	sve_imm_offset_vl:
1852	  assert (!opnd->addr.offset.is_reg);
1853	  assert (opnd->addr.preind);
1854	  num = 1 + get_operand_specific_data (&aarch64_operands[type]);
1855	  min_value *= num;
1856	  max_value *= num;
1857	  if ((opnd->addr.offset.imm != 0 && !opnd->shifter.operator_present)
1858	      || (opnd->shifter.operator_present
1859		  && opnd->shifter.kind != AARCH64_MOD_MUL_VL))
1860	    {
1861	      set_other_error (mismatch_detail, idx,
1862			       _("invalid addressing mode"));
1863	      return 0;
1864	    }
1865	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1866	    {
1867	      set_offset_out_of_range_error (mismatch_detail, idx,
1868					     min_value, max_value);
1869	      return 0;
1870	    }
1871	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1872	    {
1873	      set_unaligned_error (mismatch_detail, idx, num);
1874	      return 0;
1875	    }
1876	  break;
1877
1878	case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
1879	  min_value = -32;
1880	  max_value = 31;
1881	  goto sve_imm_offset_vl;
1882
1883	case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
1884	  min_value = -256;
1885	  max_value = 255;
1886	  goto sve_imm_offset_vl;
1887
1888	case AARCH64_OPND_SVE_ADDR_RI_U6:
1889	case AARCH64_OPND_SVE_ADDR_RI_U6x2:
1890	case AARCH64_OPND_SVE_ADDR_RI_U6x4:
1891	case AARCH64_OPND_SVE_ADDR_RI_U6x8:
1892	  min_value = 0;
1893	  max_value = 63;
1894	sve_imm_offset:
1895	  assert (!opnd->addr.offset.is_reg);
1896	  assert (opnd->addr.preind);
1897	  num = 1 << get_operand_specific_data (&aarch64_operands[type]);
1898	  min_value *= num;
1899	  max_value *= num;
1900	  if (opnd->shifter.operator_present
1901	      || opnd->shifter.amount_present)
1902	    {
1903	      set_other_error (mismatch_detail, idx,
1904			       _("invalid addressing mode"));
1905	      return 0;
1906	    }
1907	  if (!value_in_range_p (opnd->addr.offset.imm, min_value, max_value))
1908	    {
1909	      set_offset_out_of_range_error (mismatch_detail, idx,
1910					     min_value, max_value);
1911	      return 0;
1912	    }
1913	  if (!value_aligned_p (opnd->addr.offset.imm, num))
1914	    {
1915	      set_unaligned_error (mismatch_detail, idx, num);
1916	      return 0;
1917	    }
1918	  break;
1919
1920	case AARCH64_OPND_SVE_ADDR_RI_S4x16:
1921	case AARCH64_OPND_SVE_ADDR_RI_S4x32:
1922	  min_value = -8;
1923	  max_value = 7;
1924	  goto sve_imm_offset;
1925
1926	case AARCH64_OPND_SVE_ADDR_ZX:
1927	  /* Everything is already ensured by parse_operands or
1928	     aarch64_ext_sve_addr_rr_lsl (because this is a very specific
1929	     argument type).  */
1930	  assert (opnd->addr.offset.is_reg);
1931	  assert (opnd->addr.preind);
1932	  assert ((aarch64_operands[type].flags & OPD_F_NO_ZR) == 0);
1933	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
1934	  assert (opnd->shifter.operator_present == 0);
1935	  break;
1936
1937	case AARCH64_OPND_SVE_ADDR_R:
1938	case AARCH64_OPND_SVE_ADDR_RR:
1939	case AARCH64_OPND_SVE_ADDR_RR_LSL1:
1940	case AARCH64_OPND_SVE_ADDR_RR_LSL2:
1941	case AARCH64_OPND_SVE_ADDR_RR_LSL3:
1942	case AARCH64_OPND_SVE_ADDR_RR_LSL4:
1943	case AARCH64_OPND_SVE_ADDR_RX:
1944	case AARCH64_OPND_SVE_ADDR_RX_LSL1:
1945	case AARCH64_OPND_SVE_ADDR_RX_LSL2:
1946	case AARCH64_OPND_SVE_ADDR_RX_LSL3:
1947	case AARCH64_OPND_SVE_ADDR_RZ:
1948	case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
1949	case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
1950	case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
1951	  modifiers = 1 << AARCH64_MOD_LSL;
1952	sve_rr_operand:
1953	  assert (opnd->addr.offset.is_reg);
1954	  assert (opnd->addr.preind);
1955	  if ((aarch64_operands[type].flags & OPD_F_NO_ZR) != 0
1956	      && opnd->addr.offset.regno == 31)
1957	    {
1958	      set_other_error (mismatch_detail, idx,
1959			       _("index register xzr is not allowed"));
1960	      return 0;
1961	    }
1962	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1963	      || (opnd->shifter.amount
1964		  != get_operand_specific_data (&aarch64_operands[type])))
1965	    {
1966	      set_other_error (mismatch_detail, idx,
1967			       _("invalid addressing mode"));
1968	      return 0;
1969	    }
1970	  break;
1971
1972	case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
1973	case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
1974	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
1975	case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
1976	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
1977	case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
1978	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
1979	case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
1980	  modifiers = (1 << AARCH64_MOD_SXTW) | (1 << AARCH64_MOD_UXTW);
1981	  goto sve_rr_operand;
1982
1983	case AARCH64_OPND_SVE_ADDR_ZI_U5:
1984	case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
1985	case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
1986	case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
1987	  min_value = 0;
1988	  max_value = 31;
1989	  goto sve_imm_offset;
1990
1991	case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
1992	  modifiers = 1 << AARCH64_MOD_LSL;
1993	sve_zz_operand:
1994	  assert (opnd->addr.offset.is_reg);
1995	  assert (opnd->addr.preind);
1996	  if (((1 << opnd->shifter.kind) & modifiers) == 0
1997	      || opnd->shifter.amount < 0
1998	      || opnd->shifter.amount > 3)
1999	    {
2000	      set_other_error (mismatch_detail, idx,
2001			       _("invalid addressing mode"));
2002	      return 0;
2003	    }
2004	  break;
2005
2006	case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
2007	  modifiers = (1 << AARCH64_MOD_SXTW);
2008	  goto sve_zz_operand;
2009
2010	case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
2011	  modifiers = 1 << AARCH64_MOD_UXTW;
2012	  goto sve_zz_operand;
2013
2014	default:
2015	  break;
2016	}
2017      break;
2018
2019    case AARCH64_OPND_CLASS_SIMD_REGLIST:
2020      if (type == AARCH64_OPND_LEt)
2021	{
2022	  /* Get the upper bound for the element index.  */
2023	  num = 16 / aarch64_get_qualifier_esize (qualifier) - 1;
2024	  if (!value_in_range_p (opnd->reglist.index, 0, num))
2025	    {
2026	      set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2027	      return 0;
2028	    }
2029	}
2030      /* The opcode dependent area stores the number of elements in
2031	 each structure to be loaded/stored.  */
2032      num = get_opcode_dependent_value (opcode);
2033      switch (type)
2034	{
2035	case AARCH64_OPND_LVt:
2036	  assert (num >= 1 && num <= 4);
2037	  /* Unless LD1/ST1, the number of registers should be equal to that
2038	     of the structure elements.  */
2039	  if (num != 1 && opnd->reglist.num_regs != num)
2040	    {
2041	      set_reg_list_error (mismatch_detail, idx, num);
2042	      return 0;
2043	    }
2044	  break;
2045	case AARCH64_OPND_LVt_AL:
2046	case AARCH64_OPND_LEt:
2047	  assert (num >= 1 && num <= 4);
2048	  /* The number of registers should be equal to that of the structure
2049	     elements.  */
2050	  if (opnd->reglist.num_regs != num)
2051	    {
2052	      set_reg_list_error (mismatch_detail, idx, num);
2053	      return 0;
2054	    }
2055	  break;
2056	default:
2057	  break;
2058	}
2059      break;
2060
2061    case AARCH64_OPND_CLASS_IMMEDIATE:
2062      /* Constraint check on immediate operand.  */
2063      imm = opnd->imm.value;
2064      /* E.g. imm_0_31 constrains value to be 0..31.  */
2065      if (qualifier_value_in_range_constraint_p (qualifier)
2066	  && !value_in_range_p (imm, get_lower_bound (qualifier),
2067				get_upper_bound (qualifier)))
2068	{
2069	  set_imm_out_of_range_error (mismatch_detail, idx,
2070				      get_lower_bound (qualifier),
2071				      get_upper_bound (qualifier));
2072	  return 0;
2073	}
2074
2075      switch (type)
2076	{
2077	case AARCH64_OPND_AIMM:
2078	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2079	    {
2080	      set_other_error (mismatch_detail, idx,
2081			       _("invalid shift operator"));
2082	      return 0;
2083	    }
2084	  if (opnd->shifter.amount != 0 && opnd->shifter.amount != 12)
2085	    {
2086	      set_other_error (mismatch_detail, idx,
2087			       _("shift amount must be 0 or 12"));
2088	      return 0;
2089	    }
2090	  if (!value_fit_unsigned_field_p (opnd->imm.value, 12))
2091	    {
2092	      set_other_error (mismatch_detail, idx,
2093			       _("immediate out of range"));
2094	      return 0;
2095	    }
2096	  break;
2097
2098	case AARCH64_OPND_HALF:
2099	  assert (idx == 1 && opnds[0].type == AARCH64_OPND_Rd);
2100	  if (opnd->shifter.kind != AARCH64_MOD_LSL)
2101	    {
2102	      set_other_error (mismatch_detail, idx,
2103			       _("invalid shift operator"));
2104	      return 0;
2105	    }
2106	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2107	  if (!value_aligned_p (opnd->shifter.amount, 16))
2108	    {
2109	      set_other_error (mismatch_detail, idx,
2110			       _("shift amount must be a multiple of 16"));
2111	      return 0;
2112	    }
2113	  if (!value_in_range_p (opnd->shifter.amount, 0, size * 8 - 16))
2114	    {
2115	      set_sft_amount_out_of_range_error (mismatch_detail, idx,
2116						 0, size * 8 - 16);
2117	      return 0;
2118	    }
2119	  if (opnd->imm.value < 0)
2120	    {
2121	      set_other_error (mismatch_detail, idx,
2122			       _("negative immediate value not allowed"));
2123	      return 0;
2124	    }
2125	  if (!value_fit_unsigned_field_p (opnd->imm.value, 16))
2126	    {
2127	      set_other_error (mismatch_detail, idx,
2128			       _("immediate out of range"));
2129	      return 0;
2130	    }
2131	  break;
2132
2133	case AARCH64_OPND_IMM_MOV:
2134	    {
2135	      int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2136	      imm = opnd->imm.value;
2137	      assert (idx == 1);
2138	      switch (opcode->op)
2139		{
2140		case OP_MOV_IMM_WIDEN:
2141		  imm = ~imm;
2142		  /* Fall through.  */
2143		case OP_MOV_IMM_WIDE:
2144		  if (!aarch64_wide_constant_p (imm, esize == 4, NULL))
2145		    {
2146		      set_other_error (mismatch_detail, idx,
2147				       _("immediate out of range"));
2148		      return 0;
2149		    }
2150		  break;
2151		case OP_MOV_IMM_LOG:
2152		  if (!aarch64_logical_immediate_p (imm, esize, NULL))
2153		    {
2154		      set_other_error (mismatch_detail, idx,
2155				       _("immediate out of range"));
2156		      return 0;
2157		    }
2158		  break;
2159		default:
2160		  assert (0);
2161		  return 0;
2162		}
2163	    }
2164	  break;
2165
2166	case AARCH64_OPND_NZCV:
2167	case AARCH64_OPND_CCMP_IMM:
2168	case AARCH64_OPND_EXCEPTION:
2169	case AARCH64_OPND_UNDEFINED:
2170	case AARCH64_OPND_TME_UIMM16:
2171	case AARCH64_OPND_UIMM4:
2172	case AARCH64_OPND_UIMM4_ADDG:
2173	case AARCH64_OPND_UIMM7:
2174	case AARCH64_OPND_UIMM3_OP1:
2175	case AARCH64_OPND_UIMM3_OP2:
2176	case AARCH64_OPND_SVE_UIMM3:
2177	case AARCH64_OPND_SVE_UIMM7:
2178	case AARCH64_OPND_SVE_UIMM8:
2179	case AARCH64_OPND_SVE_UIMM8_53:
2180	case AARCH64_OPND_CSSC_UIMM8:
2181	  size = get_operand_fields_width (get_operand_from_code (type));
2182	  assert (size < 32);
2183	  if (!value_fit_unsigned_field_p (opnd->imm.value, size))
2184	    {
2185	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2186					  (1u << size) - 1);
2187	      return 0;
2188	    }
2189	  break;
2190
2191	case AARCH64_OPND_UIMM10:
2192	  /* Scaled unsigned 10 bits immediate offset.  */
2193	  if (!value_in_range_p (opnd->imm.value, 0, 1008))
2194	    {
2195	      set_imm_out_of_range_error (mismatch_detail, idx, 0, 1008);
2196	      return 0;
2197	    }
2198
2199	  if (!value_aligned_p (opnd->imm.value, 16))
2200	    {
2201	      set_unaligned_error (mismatch_detail, idx, 16);
2202	      return 0;
2203	    }
2204	  break;
2205
2206	case AARCH64_OPND_SIMM5:
2207	case AARCH64_OPND_SVE_SIMM5:
2208	case AARCH64_OPND_SVE_SIMM5B:
2209	case AARCH64_OPND_SVE_SIMM6:
2210	case AARCH64_OPND_SVE_SIMM8:
2211	case AARCH64_OPND_CSSC_SIMM8:
2212	  size = get_operand_fields_width (get_operand_from_code (type));
2213	  assert (size < 32);
2214	  if (!value_fit_signed_field_p (opnd->imm.value, size))
2215	    {
2216	      set_imm_out_of_range_error (mismatch_detail, idx,
2217					  -(1 << (size - 1)),
2218					  (1 << (size - 1)) - 1);
2219	      return 0;
2220	    }
2221	  break;
2222
2223	case AARCH64_OPND_WIDTH:
2224	  assert (idx > 1 && opnds[idx-1].type == AARCH64_OPND_IMM
2225		  && opnds[0].type == AARCH64_OPND_Rd);
2226	  size = get_upper_bound (qualifier);
2227	  if (opnd->imm.value + opnds[idx-1].imm.value > size)
2228	    /* lsb+width <= reg.size  */
2229	    {
2230	      set_imm_out_of_range_error (mismatch_detail, idx, 1,
2231					  size - opnds[idx-1].imm.value);
2232	      return 0;
2233	    }
2234	  break;
2235
2236	case AARCH64_OPND_LIMM:
2237	case AARCH64_OPND_SVE_LIMM:
2238	  {
2239	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2240	    uint64_t uimm = opnd->imm.value;
2241	    if (opcode->op == OP_BIC)
2242	      uimm = ~uimm;
2243	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2244	      {
2245		set_other_error (mismatch_detail, idx,
2246				 _("immediate out of range"));
2247		return 0;
2248	      }
2249	  }
2250	  break;
2251
2252	case AARCH64_OPND_IMM0:
2253	case AARCH64_OPND_FPIMM0:
2254	  if (opnd->imm.value != 0)
2255	    {
2256	      set_other_error (mismatch_detail, idx,
2257			       _("immediate zero expected"));
2258	      return 0;
2259	    }
2260	  break;
2261
2262	case AARCH64_OPND_IMM_ROT1:
2263	case AARCH64_OPND_IMM_ROT2:
2264	case AARCH64_OPND_SVE_IMM_ROT2:
2265	  if (opnd->imm.value != 0
2266	      && opnd->imm.value != 90
2267	      && opnd->imm.value != 180
2268	      && opnd->imm.value != 270)
2269	    {
2270	      set_other_error (mismatch_detail, idx,
2271			       _("rotate expected to be 0, 90, 180 or 270"));
2272	      return 0;
2273	    }
2274	  break;
2275
2276	case AARCH64_OPND_IMM_ROT3:
2277	case AARCH64_OPND_SVE_IMM_ROT1:
2278	case AARCH64_OPND_SVE_IMM_ROT3:
2279	  if (opnd->imm.value != 90 && opnd->imm.value != 270)
2280	    {
2281	      set_other_error (mismatch_detail, idx,
2282			       _("rotate expected to be 90 or 270"));
2283	      return 0;
2284	    }
2285	  break;
2286
2287	case AARCH64_OPND_SHLL_IMM:
2288	  assert (idx == 2);
2289	  size = 8 * aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2290	  if (opnd->imm.value != size)
2291	    {
2292	      set_other_error (mismatch_detail, idx,
2293			       _("invalid shift amount"));
2294	      return 0;
2295	    }
2296	  break;
2297
2298	case AARCH64_OPND_IMM_VLSL:
2299	  size = aarch64_get_qualifier_esize (qualifier);
2300	  if (!value_in_range_p (opnd->imm.value, 0, size * 8 - 1))
2301	    {
2302	      set_imm_out_of_range_error (mismatch_detail, idx, 0,
2303					  size * 8 - 1);
2304	      return 0;
2305	    }
2306	  break;
2307
2308	case AARCH64_OPND_IMM_VLSR:
2309	  size = aarch64_get_qualifier_esize (qualifier);
2310	  if (!value_in_range_p (opnd->imm.value, 1, size * 8))
2311	    {
2312	      set_imm_out_of_range_error (mismatch_detail, idx, 1, size * 8);
2313	      return 0;
2314	    }
2315	  break;
2316
2317	case AARCH64_OPND_SIMD_IMM:
2318	case AARCH64_OPND_SIMD_IMM_SFT:
2319	  /* Qualifier check.  */
2320	  switch (qualifier)
2321	    {
2322	    case AARCH64_OPND_QLF_LSL:
2323	      if (opnd->shifter.kind != AARCH64_MOD_LSL)
2324		{
2325		  set_other_error (mismatch_detail, idx,
2326				   _("invalid shift operator"));
2327		  return 0;
2328		}
2329	      break;
2330	    case AARCH64_OPND_QLF_MSL:
2331	      if (opnd->shifter.kind != AARCH64_MOD_MSL)
2332		{
2333		  set_other_error (mismatch_detail, idx,
2334				   _("invalid shift operator"));
2335		  return 0;
2336		}
2337	      break;
2338	    case AARCH64_OPND_QLF_NIL:
2339	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2340		{
2341		  set_other_error (mismatch_detail, idx,
2342				   _("shift is not permitted"));
2343		  return 0;
2344		}
2345	      break;
2346	    default:
2347	      assert (0);
2348	      return 0;
2349	    }
2350	  /* Is the immediate valid?  */
2351	  assert (idx == 1);
2352	  if (aarch64_get_qualifier_esize (opnds[0].qualifier) != 8)
2353	    {
2354	      /* uimm8 or simm8 */
2355	      if (!value_in_range_p (opnd->imm.value, -128, 255))
2356		{
2357		  set_imm_out_of_range_error (mismatch_detail, idx, -128, 255);
2358		  return 0;
2359		}
2360	    }
2361	  else if (aarch64_shrink_expanded_imm8 (opnd->imm.value) < 0)
2362	    {
2363	      /* uimm64 is not
2364		 'aaaaaaaabbbbbbbbccccccccddddddddeeeeeeee
2365		 ffffffffgggggggghhhhhhhh'.  */
2366	      set_other_error (mismatch_detail, idx,
2367			       _("invalid value for immediate"));
2368	      return 0;
2369	    }
2370	  /* Is the shift amount valid?  */
2371	  switch (opnd->shifter.kind)
2372	    {
2373	    case AARCH64_MOD_LSL:
2374	      size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2375	      if (!value_in_range_p (opnd->shifter.amount, 0, (size - 1) * 8))
2376		{
2377		  set_sft_amount_out_of_range_error (mismatch_detail, idx, 0,
2378						     (size - 1) * 8);
2379		  return 0;
2380		}
2381	      if (!value_aligned_p (opnd->shifter.amount, 8))
2382		{
2383		  set_unaligned_error (mismatch_detail, idx, 8);
2384		  return 0;
2385		}
2386	      break;
2387	    case AARCH64_MOD_MSL:
2388	      /* Only 8 and 16 are valid shift amount.  */
2389	      if (opnd->shifter.amount != 8 && opnd->shifter.amount != 16)
2390		{
2391		  set_other_error (mismatch_detail, idx,
2392				   _("shift amount must be 0 or 16"));
2393		  return 0;
2394		}
2395	      break;
2396	    default:
2397	      if (opnd->shifter.kind != AARCH64_MOD_NONE)
2398		{
2399		  set_other_error (mismatch_detail, idx,
2400				   _("invalid shift operator"));
2401		  return 0;
2402		}
2403	      break;
2404	    }
2405	  break;
2406
2407	case AARCH64_OPND_FPIMM:
2408	case AARCH64_OPND_SIMD_FPIMM:
2409	case AARCH64_OPND_SVE_FPIMM8:
2410	  if (opnd->imm.is_fp == 0)
2411	    {
2412	      set_other_error (mismatch_detail, idx,
2413			       _("floating-point immediate expected"));
2414	      return 0;
2415	    }
2416	  /* The value is expected to be an 8-bit floating-point constant with
2417	     sign, 3-bit exponent and normalized 4 bits of precision, encoded
2418	     in "a:b:c:d:e:f:g:h" or FLD_imm8 (depending on the type of the
2419	     instruction).  */
2420	  if (!value_in_range_p (opnd->imm.value, 0, 255))
2421	    {
2422	      set_other_error (mismatch_detail, idx,
2423			       _("immediate out of range"));
2424	      return 0;
2425	    }
2426	  if (opnd->shifter.kind != AARCH64_MOD_NONE)
2427	    {
2428	      set_other_error (mismatch_detail, idx,
2429			       _("invalid shift operator"));
2430	      return 0;
2431	    }
2432	  break;
2433
2434	case AARCH64_OPND_SVE_AIMM:
2435	  min_value = 0;
2436	sve_aimm:
2437	  assert (opnd->shifter.kind == AARCH64_MOD_LSL);
2438	  size = aarch64_get_qualifier_esize (opnds[0].qualifier);
2439	  mask = ~((uint64_t) -1 << (size * 4) << (size * 4));
2440	  uvalue = opnd->imm.value;
2441	  shift = opnd->shifter.amount;
2442	  if (size == 1)
2443	    {
2444	      if (shift != 0)
2445		{
2446		  set_other_error (mismatch_detail, idx,
2447				   _("no shift amount allowed for"
2448				     " 8-bit constants"));
2449		  return 0;
2450		}
2451	    }
2452	  else
2453	    {
2454	      if (shift != 0 && shift != 8)
2455		{
2456		  set_other_error (mismatch_detail, idx,
2457				   _("shift amount must be 0 or 8"));
2458		  return 0;
2459		}
2460	      if (shift == 0 && (uvalue & 0xff) == 0)
2461		{
2462		  shift = 8;
2463		  uvalue = (int64_t) uvalue / 256;
2464		}
2465	    }
2466	  mask >>= shift;
2467	  if ((uvalue & mask) != uvalue && (uvalue | ~mask) != uvalue)
2468	    {
2469	      set_other_error (mismatch_detail, idx,
2470			       _("immediate too big for element size"));
2471	      return 0;
2472	    }
2473	  uvalue = (uvalue - min_value) & mask;
2474	  if (uvalue > 0xff)
2475	    {
2476	      set_other_error (mismatch_detail, idx,
2477			       _("invalid arithmetic immediate"));
2478	      return 0;
2479	    }
2480	  break;
2481
2482	case AARCH64_OPND_SVE_ASIMM:
2483	  min_value = -128;
2484	  goto sve_aimm;
2485
2486	case AARCH64_OPND_SVE_I1_HALF_ONE:
2487	  assert (opnd->imm.is_fp);
2488	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x3f800000)
2489	    {
2490	      set_other_error (mismatch_detail, idx,
2491			       _("floating-point value must be 0.5 or 1.0"));
2492	      return 0;
2493	    }
2494	  break;
2495
2496	case AARCH64_OPND_SVE_I1_HALF_TWO:
2497	  assert (opnd->imm.is_fp);
2498	  if (opnd->imm.value != 0x3f000000 && opnd->imm.value != 0x40000000)
2499	    {
2500	      set_other_error (mismatch_detail, idx,
2501			       _("floating-point value must be 0.5 or 2.0"));
2502	      return 0;
2503	    }
2504	  break;
2505
2506	case AARCH64_OPND_SVE_I1_ZERO_ONE:
2507	  assert (opnd->imm.is_fp);
2508	  if (opnd->imm.value != 0 && opnd->imm.value != 0x3f800000)
2509	    {
2510	      set_other_error (mismatch_detail, idx,
2511			       _("floating-point value must be 0.0 or 1.0"));
2512	      return 0;
2513	    }
2514	  break;
2515
2516	case AARCH64_OPND_SVE_INV_LIMM:
2517	  {
2518	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2519	    uint64_t uimm = ~opnd->imm.value;
2520	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2521	      {
2522		set_other_error (mismatch_detail, idx,
2523				 _("immediate out of range"));
2524		return 0;
2525	      }
2526	  }
2527	  break;
2528
2529	case AARCH64_OPND_SVE_LIMM_MOV:
2530	  {
2531	    int esize = aarch64_get_qualifier_esize (opnds[0].qualifier);
2532	    uint64_t uimm = opnd->imm.value;
2533	    if (!aarch64_logical_immediate_p (uimm, esize, NULL))
2534	      {
2535		set_other_error (mismatch_detail, idx,
2536				 _("immediate out of range"));
2537		return 0;
2538	      }
2539	    if (!aarch64_sve_dupm_mov_immediate_p (uimm, esize))
2540	      {
2541		set_other_error (mismatch_detail, idx,
2542				 _("invalid replicated MOV immediate"));
2543		return 0;
2544	      }
2545	  }
2546	  break;
2547
2548	case AARCH64_OPND_SVE_PATTERN_SCALED:
2549	  assert (opnd->shifter.kind == AARCH64_MOD_MUL);
2550	  if (!value_in_range_p (opnd->shifter.amount, 1, 16))
2551	    {
2552	      set_multiplier_out_of_range_error (mismatch_detail, idx, 1, 16);
2553	      return 0;
2554	    }
2555	  break;
2556
2557	case AARCH64_OPND_SVE_SHLIMM_PRED:
2558	case AARCH64_OPND_SVE_SHLIMM_UNPRED:
2559	case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
2560	  size = aarch64_get_qualifier_esize (opnds[idx - 1].qualifier);
2561	  if (!value_in_range_p (opnd->imm.value, 0, 8 * size - 1))
2562	    {
2563	      set_imm_out_of_range_error (mismatch_detail, idx,
2564					  0, 8 * size - 1);
2565	      return 0;
2566	    }
2567	  break;
2568
2569	case AARCH64_OPND_SVE_SHRIMM_PRED:
2570	case AARCH64_OPND_SVE_SHRIMM_UNPRED:
2571	case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
2572	  num = (type == AARCH64_OPND_SVE_SHRIMM_UNPRED_22) ? 2 : 1;
2573	  size = aarch64_get_qualifier_esize (opnds[idx - num].qualifier);
2574	  if (!value_in_range_p (opnd->imm.value, 1, 8 * size))
2575	    {
2576	      set_imm_out_of_range_error (mismatch_detail, idx, 1, 8*size);
2577	      return 0;
2578	    }
2579	  break;
2580
2581	default:
2582	  break;
2583	}
2584      break;
2585
2586    case AARCH64_OPND_CLASS_SYSTEM:
2587      switch (type)
2588	{
2589	case AARCH64_OPND_PSTATEFIELD:
2590	  for (i = 0; aarch64_pstatefields[i].name; ++i)
2591	    if (aarch64_pstatefields[i].value == opnd->pstatefield)
2592	      break;
2593	  assert (aarch64_pstatefields[i].name);
2594	  assert (idx == 0 && opnds[1].type == AARCH64_OPND_UIMM4);
2595	  max_value = F_GET_REG_MAX_VALUE (aarch64_pstatefields[i].flags);
2596	  if (opnds[1].imm.value < 0 || opnds[1].imm.value > max_value)
2597	    {
2598	      set_imm_out_of_range_error (mismatch_detail, 1, 0, max_value);
2599	      return 0;
2600	    }
2601	  break;
2602	default:
2603	  break;
2604	}
2605      break;
2606
2607    case AARCH64_OPND_CLASS_SIMD_ELEMENT:
2608      /* Get the upper bound for the element index.  */
2609      if (opcode->op == OP_FCMLA_ELEM)
2610	/* FCMLA index range depends on the vector size of other operands
2611	   and is halfed because complex numbers take two elements.  */
2612	num = aarch64_get_qualifier_nelem (opnds[0].qualifier)
2613	      * aarch64_get_qualifier_esize (opnds[0].qualifier) / 2;
2614      else
2615	num = 16;
2616      num = num / aarch64_get_qualifier_esize (qualifier) - 1;
2617      assert (aarch64_get_qualifier_nelem (qualifier) == 1);
2618
2619      /* Index out-of-range.  */
2620      if (!value_in_range_p (opnd->reglane.index, 0, num))
2621	{
2622	  set_elem_idx_out_of_range_error (mismatch_detail, idx, 0, num);
2623	  return 0;
2624	}
2625      /* SMLAL<Q> <Vd>.<Ta>, <Vn>.<Tb>, <Vm>.<Ts>[<index>].
2626	 <Vm>	Is the vector register (V0-V31) or (V0-V15), whose
2627	 number is encoded in "size:M:Rm":
2628	 size	<Vm>
2629	 00		RESERVED
2630	 01		0:Rm
2631	 10		M:Rm
2632	 11		RESERVED  */
2633      if (type == AARCH64_OPND_Em16 && qualifier == AARCH64_OPND_QLF_S_H
2634	  && !value_in_range_p (opnd->reglane.regno, 0, 15))
2635	{
2636	  set_regno_out_of_range_error (mismatch_detail, idx, 0, 15);
2637	  return 0;
2638	}
2639      break;
2640
2641    case AARCH64_OPND_CLASS_MODIFIED_REG:
2642      assert (idx == 1 || idx == 2);
2643      switch (type)
2644	{
2645	case AARCH64_OPND_Rm_EXT:
2646	  if (!aarch64_extend_operator_p (opnd->shifter.kind)
2647	      && opnd->shifter.kind != AARCH64_MOD_LSL)
2648	    {
2649	      set_other_error (mismatch_detail, idx,
2650			       _("extend operator expected"));
2651	      return 0;
2652	    }
2653	  /* It is not optional unless at least one of "Rd" or "Rn" is '11111'
2654	     (i.e. SP), in which case it defaults to LSL. The LSL alias is
2655	     only valid when "Rd" or "Rn" is '11111', and is preferred in that
2656	     case.  */
2657	  if (!aarch64_stack_pointer_p (opnds + 0)
2658	      && (idx != 2 || !aarch64_stack_pointer_p (opnds + 1)))
2659	    {
2660	      if (!opnd->shifter.operator_present)
2661		{
2662		  set_other_error (mismatch_detail, idx,
2663				   _("missing extend operator"));
2664		  return 0;
2665		}
2666	      else if (opnd->shifter.kind == AARCH64_MOD_LSL)
2667		{
2668		  set_other_error (mismatch_detail, idx,
2669				   _("'LSL' operator not allowed"));
2670		  return 0;
2671		}
2672	    }
2673	  assert (opnd->shifter.operator_present	/* Default to LSL.  */
2674		  || opnd->shifter.kind == AARCH64_MOD_LSL);
2675	  if (!value_in_range_p (opnd->shifter.amount, 0, 4))
2676	    {
2677	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, 4);
2678	      return 0;
2679	    }
2680	  /* In the 64-bit form, the final register operand is written as Wm
2681	     for all but the (possibly omitted) UXTX/LSL and SXTX
2682	     operators.
2683	     N.B. GAS allows X register to be used with any operator as a
2684	     programming convenience.  */
2685	  if (qualifier == AARCH64_OPND_QLF_X
2686	      && opnd->shifter.kind != AARCH64_MOD_LSL
2687	      && opnd->shifter.kind != AARCH64_MOD_UXTX
2688	      && opnd->shifter.kind != AARCH64_MOD_SXTX)
2689	    {
2690	      set_other_error (mismatch_detail, idx, _("W register expected"));
2691	      return 0;
2692	    }
2693	  break;
2694
2695	case AARCH64_OPND_Rm_SFT:
2696	  /* ROR is not available to the shifted register operand in
2697	     arithmetic instructions.  */
2698	  if (!aarch64_shift_operator_p (opnd->shifter.kind))
2699	    {
2700	      set_other_error (mismatch_detail, idx,
2701			       _("shift operator expected"));
2702	      return 0;
2703	    }
2704	  if (opnd->shifter.kind == AARCH64_MOD_ROR
2705	      && opcode->iclass != log_shift)
2706	    {
2707	      set_other_error (mismatch_detail, idx,
2708			       _("'ROR' operator not allowed"));
2709	      return 0;
2710	    }
2711	  num = qualifier == AARCH64_OPND_QLF_W ? 31 : 63;
2712	  if (!value_in_range_p (opnd->shifter.amount, 0, num))
2713	    {
2714	      set_sft_amount_out_of_range_error (mismatch_detail, idx, 0, num);
2715	      return 0;
2716	    }
2717	  break;
2718
2719	default:
2720	  break;
2721	}
2722      break;
2723
2724    default:
2725      break;
2726    }
2727
2728  return 1;
2729}
2730
2731/* Main entrypoint for the operand constraint checking.
2732
2733   Return 1 if operands of *INST meet the constraint applied by the operand
2734   codes and operand qualifiers; otherwise return 0 and if MISMATCH_DETAIL is
2735   not NULL, return the detail of the error in *MISMATCH_DETAIL.  N.B. when
2736   adding more constraint checking, make sure MISMATCH_DETAIL->KIND is set
2737   with a proper error kind rather than AARCH64_OPDE_NIL (GAS asserts non-NIL
2738   error kind when it is notified that an instruction does not pass the check).
2739
2740   Un-determined operand qualifiers may get established during the process.  */
2741
2742int
2743aarch64_match_operands_constraint (aarch64_inst *inst,
2744				   aarch64_operand_error *mismatch_detail)
2745{
2746  int i;
2747
2748  DEBUG_TRACE ("enter");
2749
2750  i = inst->opcode->tied_operand;
2751
2752  if (i > 0)
2753    {
2754      /* Check for tied_operands with specific opcode iclass.  */
2755      switch (inst->opcode->iclass)
2756        {
2757        /* For SME LDR and STR instructions #imm must have the same numerical
2758           value for both operands.
2759        */
2760        case sme_ldr:
2761        case sme_str:
2762          assert (inst->operands[0].type == AARCH64_OPND_SME_ZA_array);
2763          assert (inst->operands[1].type == AARCH64_OPND_SME_ADDR_RI_U4xVL);
2764          if (inst->operands[0].za_tile_vector.index.imm
2765              != inst->operands[1].addr.offset.imm)
2766            {
2767              if (mismatch_detail)
2768                {
2769                  mismatch_detail->kind = AARCH64_OPDE_UNTIED_IMMS;
2770                  mismatch_detail->index = i;
2771                }
2772              return 0;
2773            }
2774          break;
2775
2776        default:
2777          /* Check for cases where a source register needs to be the same as the
2778             destination register.  Do this before matching qualifiers since if
2779             an instruction has both invalid tying and invalid qualifiers,
2780             the error about qualifiers would suggest several alternative
2781             instructions that also have invalid tying.  */
2782          if (inst->operands[0].reg.regno
2783              != inst->operands[i].reg.regno)
2784            {
2785              if (mismatch_detail)
2786                {
2787                  mismatch_detail->kind = AARCH64_OPDE_UNTIED_OPERAND;
2788                  mismatch_detail->index = i;
2789                  mismatch_detail->error = NULL;
2790                }
2791              return 0;
2792            }
2793          break;
2794        }
2795    }
2796
2797  /* Match operands' qualifier.
2798     *INST has already had qualifier establish for some, if not all, of
2799     its operands; we need to find out whether these established
2800     qualifiers match one of the qualifier sequence in
2801     INST->OPCODE->QUALIFIERS_LIST.  If yes, we will assign each operand
2802     with the corresponding qualifier in such a sequence.
2803     Only basic operand constraint checking is done here; the more thorough
2804     constraint checking will carried out by operand_general_constraint_met_p,
2805     which has be to called after this in order to get all of the operands'
2806     qualifiers established.  */
2807  if (match_operands_qualifier (inst, true /* update_p */) == 0)
2808    {
2809      DEBUG_TRACE ("FAIL on operand qualifier matching");
2810      if (mismatch_detail)
2811	{
2812	  /* Return an error type to indicate that it is the qualifier
2813	     matching failure; we don't care about which operand as there
2814	     are enough information in the opcode table to reproduce it.  */
2815	  mismatch_detail->kind = AARCH64_OPDE_INVALID_VARIANT;
2816	  mismatch_detail->index = -1;
2817	  mismatch_detail->error = NULL;
2818	}
2819      return 0;
2820    }
2821
2822  /* Match operands' constraint.  */
2823  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2824    {
2825      enum aarch64_opnd type = inst->opcode->operands[i];
2826      if (type == AARCH64_OPND_NIL)
2827	break;
2828      if (inst->operands[i].skip)
2829	{
2830	  DEBUG_TRACE ("skip the incomplete operand %d", i);
2831	  continue;
2832	}
2833      if (operand_general_constraint_met_p (inst->operands, i, type,
2834					    inst->opcode, mismatch_detail) == 0)
2835	{
2836	  DEBUG_TRACE ("FAIL on operand %d", i);
2837	  return 0;
2838	}
2839    }
2840
2841  DEBUG_TRACE ("PASS");
2842
2843  return 1;
2844}
2845
2846/* Replace INST->OPCODE with OPCODE and return the replaced OPCODE.
2847   Also updates the TYPE of each INST->OPERANDS with the corresponding
2848   value of OPCODE->OPERANDS.
2849
2850   Note that some operand qualifiers may need to be manually cleared by
2851   the caller before it further calls the aarch64_opcode_encode; by
2852   doing this, it helps the qualifier matching facilities work
2853   properly.  */
2854
2855const aarch64_opcode*
2856aarch64_replace_opcode (aarch64_inst *inst, const aarch64_opcode *opcode)
2857{
2858  int i;
2859  const aarch64_opcode *old = inst->opcode;
2860
2861  inst->opcode = opcode;
2862
2863  /* Update the operand types.  */
2864  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2865    {
2866      inst->operands[i].type = opcode->operands[i];
2867      if (opcode->operands[i] == AARCH64_OPND_NIL)
2868	break;
2869    }
2870
2871  DEBUG_TRACE ("replace %s with %s", old->name, opcode->name);
2872
2873  return old;
2874}
2875
2876int
2877aarch64_operand_index (const enum aarch64_opnd *operands, enum aarch64_opnd operand)
2878{
2879  int i;
2880  for (i = 0; i < AARCH64_MAX_OPND_NUM; ++i)
2881    if (operands[i] == operand)
2882      return i;
2883    else if (operands[i] == AARCH64_OPND_NIL)
2884      break;
2885  return -1;
2886}
2887
2888/* R0...R30, followed by FOR31.  */
2889#define BANK(R, FOR31) \
2890  { R  (0), R  (1), R  (2), R  (3), R  (4), R  (5), R  (6), R  (7), \
2891    R  (8), R  (9), R (10), R (11), R (12), R (13), R (14), R (15), \
2892    R (16), R (17), R (18), R (19), R (20), R (21), R (22), R (23), \
2893    R (24), R (25), R (26), R (27), R (28), R (29), R (30),  FOR31 }
2894/* [0][0]  32-bit integer regs with sp   Wn
2895   [0][1]  64-bit integer regs with sp   Xn  sf=1
2896   [1][0]  32-bit integer regs with #0   Wn
2897   [1][1]  64-bit integer regs with #0   Xn  sf=1 */
2898static const char *int_reg[2][2][32] = {
2899#define R32(X) "w" #X
2900#define R64(X) "x" #X
2901  { BANK (R32, "wsp"), BANK (R64, "sp") },
2902  { BANK (R32, "wzr"), BANK (R64, "xzr") }
2903#undef R64
2904#undef R32
2905};
2906
2907/* Names of the SVE vector registers, first with .S suffixes,
2908   then with .D suffixes.  */
2909
2910static const char *sve_reg[2][32] = {
2911#define ZS(X) "z" #X ".s"
2912#define ZD(X) "z" #X ".d"
2913  BANK (ZS, ZS (31)), BANK (ZD, ZD (31))
2914#undef ZD
2915#undef ZS
2916};
2917#undef BANK
2918
2919/* Return the integer register name.
2920   if SP_REG_P is not 0, R31 is an SP reg, other R31 is the zero reg.  */
2921
2922static inline const char *
2923get_int_reg_name (int regno, aarch64_opnd_qualifier_t qualifier, int sp_reg_p)
2924{
2925  const int has_zr = sp_reg_p ? 0 : 1;
2926  const int is_64 = aarch64_get_qualifier_esize (qualifier) == 4 ? 0 : 1;
2927  return int_reg[has_zr][is_64][regno];
2928}
2929
2930/* Like get_int_reg_name, but IS_64 is always 1.  */
2931
2932static inline const char *
2933get_64bit_int_reg_name (int regno, int sp_reg_p)
2934{
2935  const int has_zr = sp_reg_p ? 0 : 1;
2936  return int_reg[has_zr][1][regno];
2937}
2938
2939/* Get the name of the integer offset register in OPND, using the shift type
2940   to decide whether it's a word or doubleword.  */
2941
2942static inline const char *
2943get_offset_int_reg_name (const aarch64_opnd_info *opnd)
2944{
2945  switch (opnd->shifter.kind)
2946    {
2947    case AARCH64_MOD_UXTW:
2948    case AARCH64_MOD_SXTW:
2949      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_W, 0);
2950
2951    case AARCH64_MOD_LSL:
2952    case AARCH64_MOD_SXTX:
2953      return get_int_reg_name (opnd->addr.offset.regno, AARCH64_OPND_QLF_X, 0);
2954
2955    default:
2956      abort ();
2957    }
2958}
2959
2960/* Get the name of the SVE vector offset register in OPND, using the operand
2961   qualifier to decide whether the suffix should be .S or .D.  */
2962
2963static inline const char *
2964get_addr_sve_reg_name (int regno, aarch64_opnd_qualifier_t qualifier)
2965{
2966  assert (qualifier == AARCH64_OPND_QLF_S_S
2967	  || qualifier == AARCH64_OPND_QLF_S_D);
2968  return sve_reg[qualifier == AARCH64_OPND_QLF_S_D][regno];
2969}
2970
2971/* Types for expanding an encoded 8-bit value to a floating-point value.  */
2972
2973typedef union
2974{
2975  uint64_t i;
2976  double   d;
2977} double_conv_t;
2978
2979typedef union
2980{
2981  uint32_t i;
2982  float    f;
2983} single_conv_t;
2984
2985typedef union
2986{
2987  uint32_t i;
2988  float    f;
2989} half_conv_t;
2990
2991/* IMM8 is an 8-bit floating-point constant with sign, 3-bit exponent and
2992   normalized 4 bits of precision, encoded in "a:b:c:d:e:f:g:h" or FLD_imm8
2993   (depending on the type of the instruction).  IMM8 will be expanded to a
2994   single-precision floating-point value (SIZE == 4) or a double-precision
2995   floating-point value (SIZE == 8).  A half-precision floating-point value
2996   (SIZE == 2) is expanded to a single-precision floating-point value.  The
2997   expanded value is returned.  */
2998
2999static uint64_t
3000expand_fp_imm (int size, uint32_t imm8)
3001{
3002  uint64_t imm = 0;
3003  uint32_t imm8_7, imm8_6_0, imm8_6, imm8_6_repl4;
3004
3005  imm8_7 = (imm8 >> 7) & 0x01;	/* imm8<7>   */
3006  imm8_6_0 = imm8 & 0x7f;	/* imm8<6:0> */
3007  imm8_6 = imm8_6_0 >> 6;	/* imm8<6>   */
3008  imm8_6_repl4 = (imm8_6 << 3) | (imm8_6 << 2)
3009    | (imm8_6 << 1) | imm8_6;	/* Replicate(imm8<6>,4) */
3010  if (size == 8)
3011    {
3012      imm = (imm8_7 << (63-32))		/* imm8<7>  */
3013	| ((imm8_6 ^ 1) << (62-32))	/* NOT(imm8<6)	*/
3014	| (imm8_6_repl4 << (58-32)) | (imm8_6 << (57-32))
3015	| (imm8_6 << (56-32)) | (imm8_6 << (55-32)) /* Replicate(imm8<6>,7) */
3016	| (imm8_6_0 << (48-32));	/* imm8<6>:imm8<5:0>    */
3017      imm <<= 32;
3018    }
3019  else if (size == 4 || size == 2)
3020    {
3021      imm = (imm8_7 << 31)	/* imm8<7>              */
3022	| ((imm8_6 ^ 1) << 30)	/* NOT(imm8<6>)         */
3023	| (imm8_6_repl4 << 26)	/* Replicate(imm8<6>,4) */
3024	| (imm8_6_0 << 19);	/* imm8<6>:imm8<5:0>    */
3025    }
3026  else
3027    {
3028      /* An unsupported size.  */
3029      assert (0);
3030    }
3031
3032  return imm;
3033}
3034
3035/* Return a string based on FMT with the register style applied.  */
3036
3037static const char *
3038style_reg (struct aarch64_styler *styler, const char *fmt, ...)
3039{
3040  const char *txt;
3041  va_list ap;
3042
3043  va_start (ap, fmt);
3044  txt = styler->apply_style (styler, dis_style_register, fmt, ap);
3045  va_end (ap);
3046
3047  return txt;
3048}
3049
3050/* Return a string based on FMT with the immediate style applied.  */
3051
3052static const char *
3053style_imm (struct aarch64_styler *styler, const char *fmt, ...)
3054{
3055  const char *txt;
3056  va_list ap;
3057
3058  va_start (ap, fmt);
3059  txt = styler->apply_style (styler, dis_style_immediate, fmt, ap);
3060  va_end (ap);
3061
3062  return txt;
3063}
3064
3065/* Return a string based on FMT with the sub-mnemonic style applied.  */
3066
3067static const char *
3068style_sub_mnem (struct aarch64_styler *styler, const char *fmt, ...)
3069{
3070  const char *txt;
3071  va_list ap;
3072
3073  va_start (ap, fmt);
3074  txt = styler->apply_style (styler, dis_style_sub_mnemonic, fmt, ap);
3075  va_end (ap);
3076
3077  return txt;
3078}
3079
3080/* Return a string based on FMT with the address style applied.  */
3081
3082static const char *
3083style_addr (struct aarch64_styler *styler, const char *fmt, ...)
3084{
3085  const char *txt;
3086  va_list ap;
3087
3088  va_start (ap, fmt);
3089  txt = styler->apply_style (styler, dis_style_address, fmt, ap);
3090  va_end (ap);
3091
3092  return txt;
3093}
3094
3095/* Produce the string representation of the register list operand *OPND
3096   in the buffer pointed by BUF of size SIZE.  PREFIX is the part of
3097   the register name that comes before the register number, such as "v".  */
3098static void
3099print_register_list (char *buf, size_t size, const aarch64_opnd_info *opnd,
3100		     const char *prefix, struct aarch64_styler *styler)
3101{
3102  const int num_regs = opnd->reglist.num_regs;
3103  const int first_reg = opnd->reglist.first_regno;
3104  const int last_reg = (first_reg + num_regs - 1) & 0x1f;
3105  const char *qlf_name = aarch64_get_qualifier_name (opnd->qualifier);
3106  char tb[16];	/* Temporary buffer.  */
3107
3108  assert (opnd->type != AARCH64_OPND_LEt || opnd->reglist.has_index);
3109  assert (num_regs >= 1 && num_regs <= 4);
3110
3111  /* Prepare the index if any.  */
3112  if (opnd->reglist.has_index)
3113    /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3114    snprintf (tb, sizeof (tb), "[%s]",
3115	      style_imm (styler, "%" PRIi64, (opnd->reglist.index % 100)));
3116  else
3117    tb[0] = '\0';
3118
3119  /* The hyphenated form is preferred for disassembly if there are
3120     more than two registers in the list, and the register numbers
3121     are monotonically increasing in increments of one.  */
3122  if (num_regs > 2 && last_reg > first_reg)
3123    snprintf (buf, size, "{%s-%s}%s",
3124	      style_reg (styler, "%s%d.%s", prefix, first_reg, qlf_name),
3125	      style_reg (styler, "%s%d.%s", prefix, last_reg, qlf_name), tb);
3126  else
3127    {
3128      const int reg0 = first_reg;
3129      const int reg1 = (first_reg + 1) & 0x1f;
3130      const int reg2 = (first_reg + 2) & 0x1f;
3131      const int reg3 = (first_reg + 3) & 0x1f;
3132
3133      switch (num_regs)
3134	{
3135	case 1:
3136	  snprintf (buf, size, "{%s}%s",
3137		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3138		    tb);
3139	  break;
3140	case 2:
3141	  snprintf (buf, size, "{%s, %s}%s",
3142		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3143		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3144		    tb);
3145	  break;
3146	case 3:
3147	  snprintf (buf, size, "{%s, %s, %s}%s",
3148		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3149		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3150		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3151		    tb);
3152	  break;
3153	case 4:
3154	  snprintf (buf, size, "{%s, %s, %s, %s}%s",
3155		    style_reg (styler, "%s%d.%s", prefix, reg0, qlf_name),
3156		    style_reg (styler, "%s%d.%s", prefix, reg1, qlf_name),
3157		    style_reg (styler, "%s%d.%s", prefix, reg2, qlf_name),
3158		    style_reg (styler, "%s%d.%s", prefix, reg3, qlf_name),
3159		    tb);
3160	  break;
3161	}
3162    }
3163}
3164
3165/* Print the register+immediate address in OPND to BUF, which has SIZE
3166   characters.  BASE is the name of the base register.  */
3167
3168static void
3169print_immediate_offset_address (char *buf, size_t size,
3170				const aarch64_opnd_info *opnd,
3171				const char *base,
3172				struct aarch64_styler *styler)
3173{
3174  if (opnd->addr.writeback)
3175    {
3176      if (opnd->addr.preind)
3177        {
3178	  if (opnd->type == AARCH64_OPND_ADDR_SIMM10 && !opnd->addr.offset.imm)
3179	    snprintf (buf, size, "[%s]!", style_reg (styler, base));
3180          else
3181	    snprintf (buf, size, "[%s, %s]!",
3182		      style_reg (styler, base),
3183		      style_imm (styler, "#%d", opnd->addr.offset.imm));
3184        }
3185      else
3186	snprintf (buf, size, "[%s], %s",
3187		  style_reg (styler, base),
3188		  style_imm (styler, "#%d", opnd->addr.offset.imm));
3189    }
3190  else
3191    {
3192      if (opnd->shifter.operator_present)
3193	{
3194	  assert (opnd->shifter.kind == AARCH64_MOD_MUL_VL);
3195	  snprintf (buf, size, "[%s, %s, %s]",
3196		    style_reg (styler, base),
3197		    style_imm (styler, "#%d", opnd->addr.offset.imm),
3198		    style_sub_mnem (styler, "mul vl"));
3199	}
3200      else if (opnd->addr.offset.imm)
3201	snprintf (buf, size, "[%s, %s]",
3202		  style_reg (styler, base),
3203		  style_imm (styler, "#%d", opnd->addr.offset.imm));
3204      else
3205	snprintf (buf, size, "[%s]", style_reg (styler, base));
3206    }
3207}
3208
3209/* Produce the string representation of the register offset address operand
3210   *OPND in the buffer pointed by BUF of size SIZE.  BASE and OFFSET are
3211   the names of the base and offset registers.  */
3212static void
3213print_register_offset_address (char *buf, size_t size,
3214			       const aarch64_opnd_info *opnd,
3215			       const char *base, const char *offset,
3216			       struct aarch64_styler *styler)
3217{
3218  char tb[32];			/* Temporary buffer.  */
3219  bool print_extend_p = true;
3220  bool print_amount_p = true;
3221  const char *shift_name = aarch64_operand_modifiers[opnd->shifter.kind].name;
3222
3223  if (!opnd->shifter.amount && (opnd->qualifier != AARCH64_OPND_QLF_S_B
3224				|| !opnd->shifter.amount_present))
3225    {
3226      /* Not print the shift/extend amount when the amount is zero and
3227         when it is not the special case of 8-bit load/store instruction.  */
3228      print_amount_p = false;
3229      /* Likewise, no need to print the shift operator LSL in such a
3230	 situation.  */
3231      if (opnd->shifter.kind == AARCH64_MOD_LSL)
3232	print_extend_p = false;
3233    }
3234
3235  /* Prepare for the extend/shift.  */
3236  if (print_extend_p)
3237    {
3238      if (print_amount_p)
3239	snprintf (tb, sizeof (tb), ", %s %s",
3240		  style_sub_mnem (styler, shift_name),
3241		  style_imm (styler, "#%" PRIi64,
3242  /* PR 21096: The %100 is to silence a warning about possible truncation.  */
3243			     (opnd->shifter.amount % 100)));
3244      else
3245	snprintf (tb, sizeof (tb), ", %s",
3246		  style_sub_mnem (styler, shift_name));
3247    }
3248  else
3249    tb[0] = '\0';
3250
3251  snprintf (buf, size, "[%s, %s%s]", style_reg (styler, base),
3252	    style_reg (styler, offset), tb);
3253}
3254
3255/* Print ZA tiles from imm8 in ZERO instruction.
3256
3257   The preferred disassembly of this instruction uses the shortest list of tile
3258   names that represent the encoded immediate mask.
3259
3260   For example:
3261    * An all-ones immediate is disassembled as {ZA}.
3262    * An all-zeros immediate is disassembled as an empty list { }.
3263*/
3264static void
3265print_sme_za_list (char *buf, size_t size, int mask,
3266		   struct aarch64_styler *styler)
3267{
3268  const char* zan[] = { "za",    "za0.h", "za1.h", "za0.s",
3269                        "za1.s", "za2.s", "za3.s", "za0.d",
3270                        "za1.d", "za2.d", "za3.d", "za4.d",
3271                        "za5.d", "za6.d", "za7.d", " " };
3272  const int zan_v[] = { 0xff, 0x55, 0xaa, 0x11,
3273                        0x22, 0x44, 0x88, 0x01,
3274                        0x02, 0x04, 0x08, 0x10,
3275                        0x20, 0x40, 0x80, 0x00 };
3276  int i, k;
3277  const int ZAN_SIZE = sizeof(zan) / sizeof(zan[0]);
3278
3279  k = snprintf (buf, size, "{");
3280  for (i = 0; i < ZAN_SIZE; i++)
3281    {
3282      if ((mask & zan_v[i]) == zan_v[i])
3283        {
3284          mask &= ~zan_v[i];
3285          if (k > 1)
3286	    k += snprintf (buf + k, size - k, ", ");
3287
3288	  k += snprintf (buf + k, size - k, "%s", style_reg (styler, zan[i]));
3289        }
3290      if (mask == 0)
3291        break;
3292    }
3293  snprintf (buf + k, size - k, "}");
3294}
3295
3296/* Generate the string representation of the operand OPNDS[IDX] for OPCODE
3297   in *BUF.  The caller should pass in the maximum size of *BUF in SIZE.
3298   PC, PCREL_P and ADDRESS are used to pass in and return information about
3299   the PC-relative address calculation, where the PC value is passed in
3300   PC.  If the operand is pc-relative related, *PCREL_P (if PCREL_P non-NULL)
3301   will return 1 and *ADDRESS (if ADDRESS non-NULL) will return the
3302   calculated address; otherwise, *PCREL_P (if PCREL_P non-NULL) returns 0.
3303
3304   The function serves both the disassembler and the assembler diagnostics
3305   issuer, which is the reason why it lives in this file.  */
3306
3307void
3308aarch64_print_operand (char *buf, size_t size, bfd_vma pc,
3309		       const aarch64_opcode *opcode,
3310		       const aarch64_opnd_info *opnds, int idx, int *pcrel_p,
3311		       bfd_vma *address, char** notes,
3312		       char *comment, size_t comment_size,
3313		       aarch64_feature_set features,
3314		       struct aarch64_styler *styler)
3315{
3316  unsigned int i, num_conds;
3317  const char *name = NULL;
3318  const aarch64_opnd_info *opnd = opnds + idx;
3319  enum aarch64_modifier_kind kind;
3320  uint64_t addr, enum_value;
3321
3322  if (comment != NULL)
3323    {
3324      assert (comment_size > 0);
3325      comment[0] = '\0';
3326    }
3327  else
3328    assert (comment_size == 0);
3329
3330  buf[0] = '\0';
3331  if (pcrel_p)
3332    *pcrel_p = 0;
3333
3334  switch (opnd->type)
3335    {
3336    case AARCH64_OPND_Rd:
3337    case AARCH64_OPND_Rn:
3338    case AARCH64_OPND_Rm:
3339    case AARCH64_OPND_Rt:
3340    case AARCH64_OPND_Rt2:
3341    case AARCH64_OPND_Rs:
3342    case AARCH64_OPND_Ra:
3343    case AARCH64_OPND_Rt_LS64:
3344    case AARCH64_OPND_Rt_SYS:
3345    case AARCH64_OPND_PAIRREG:
3346    case AARCH64_OPND_SVE_Rm:
3347      /* The optional-ness of <Xt> in e.g. IC <ic_op>{, <Xt>} is determined by
3348	 the <ic_op>, therefore we use opnd->present to override the
3349	 generic optional-ness information.  */
3350      if (opnd->type == AARCH64_OPND_Rt_SYS)
3351	{
3352	  if (!opnd->present)
3353	    break;
3354	}
3355      /* Omit the operand, e.g. RET.  */
3356      else if (optional_operand_p (opcode, idx)
3357	       && (opnd->reg.regno
3358		   == get_optional_operand_default_value (opcode)))
3359	break;
3360      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3361	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3362      snprintf (buf, size, "%s",
3363		style_reg (styler, get_int_reg_name (opnd->reg.regno,
3364						     opnd->qualifier, 0)));
3365      break;
3366
3367    case AARCH64_OPND_Rd_SP:
3368    case AARCH64_OPND_Rn_SP:
3369    case AARCH64_OPND_Rt_SP:
3370    case AARCH64_OPND_SVE_Rn_SP:
3371    case AARCH64_OPND_Rm_SP:
3372      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3373	      || opnd->qualifier == AARCH64_OPND_QLF_WSP
3374	      || opnd->qualifier == AARCH64_OPND_QLF_X
3375	      || opnd->qualifier == AARCH64_OPND_QLF_SP);
3376      snprintf (buf, size, "%s",
3377		style_reg (styler, get_int_reg_name (opnd->reg.regno,
3378						     opnd->qualifier, 1)));
3379      break;
3380
3381    case AARCH64_OPND_Rm_EXT:
3382      kind = opnd->shifter.kind;
3383      assert (idx == 1 || idx == 2);
3384      if ((aarch64_stack_pointer_p (opnds)
3385	   || (idx == 2 && aarch64_stack_pointer_p (opnds + 1)))
3386	  && ((opnd->qualifier == AARCH64_OPND_QLF_W
3387	       && opnds[0].qualifier == AARCH64_OPND_QLF_W
3388	       && kind == AARCH64_MOD_UXTW)
3389	      || (opnd->qualifier == AARCH64_OPND_QLF_X
3390		  && kind == AARCH64_MOD_UXTX)))
3391	{
3392	  /* 'LSL' is the preferred form in this case.  */
3393	  kind = AARCH64_MOD_LSL;
3394	  if (opnd->shifter.amount == 0)
3395	    {
3396	      /* Shifter omitted.  */
3397	      snprintf (buf, size, "%s",
3398			style_reg (styler,
3399				   get_int_reg_name (opnd->reg.regno,
3400						     opnd->qualifier, 0)));
3401	      break;
3402	    }
3403	}
3404      if (opnd->shifter.amount)
3405	snprintf (buf, size, "%s, %s %s",
3406		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3407		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name),
3408		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3409      else
3410	snprintf (buf, size, "%s, %s",
3411		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3412		  style_sub_mnem (styler, aarch64_operand_modifiers[kind].name));
3413      break;
3414
3415    case AARCH64_OPND_Rm_SFT:
3416      assert (opnd->qualifier == AARCH64_OPND_QLF_W
3417	      || opnd->qualifier == AARCH64_OPND_QLF_X);
3418      if (opnd->shifter.amount == 0 && opnd->shifter.kind == AARCH64_MOD_LSL)
3419	snprintf (buf, size, "%s",
3420		  style_reg (styler, get_int_reg_name (opnd->reg.regno,
3421						       opnd->qualifier, 0)));
3422      else
3423	snprintf (buf, size, "%s, %s %s",
3424		  style_reg (styler, get_int_reg_name (opnd->reg.regno, opnd->qualifier, 0)),
3425		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3426		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3427      break;
3428
3429    case AARCH64_OPND_Fd:
3430    case AARCH64_OPND_Fn:
3431    case AARCH64_OPND_Fm:
3432    case AARCH64_OPND_Fa:
3433    case AARCH64_OPND_Ft:
3434    case AARCH64_OPND_Ft2:
3435    case AARCH64_OPND_Sd:
3436    case AARCH64_OPND_Sn:
3437    case AARCH64_OPND_Sm:
3438    case AARCH64_OPND_SVE_VZn:
3439    case AARCH64_OPND_SVE_Vd:
3440    case AARCH64_OPND_SVE_Vm:
3441    case AARCH64_OPND_SVE_Vn:
3442      snprintf (buf, size, "%s",
3443		style_reg (styler, "%s%d",
3444			   aarch64_get_qualifier_name (opnd->qualifier),
3445			   opnd->reg.regno));
3446      break;
3447
3448    case AARCH64_OPND_Va:
3449    case AARCH64_OPND_Vd:
3450    case AARCH64_OPND_Vn:
3451    case AARCH64_OPND_Vm:
3452      snprintf (buf, size, "%s",
3453		style_reg (styler, "v%d.%s", opnd->reg.regno,
3454			   aarch64_get_qualifier_name (opnd->qualifier)));
3455      break;
3456
3457    case AARCH64_OPND_Ed:
3458    case AARCH64_OPND_En:
3459    case AARCH64_OPND_Em:
3460    case AARCH64_OPND_Em16:
3461    case AARCH64_OPND_SM3_IMM2:
3462      snprintf (buf, size, "%s[%s]",
3463		style_reg (styler, "v%d.%s", opnd->reglane.regno,
3464			   aarch64_get_qualifier_name (opnd->qualifier)),
3465		style_imm (styler, "%" PRIi64, opnd->reglane.index));
3466      break;
3467
3468    case AARCH64_OPND_VdD1:
3469    case AARCH64_OPND_VnD1:
3470      snprintf (buf, size, "%s[%s]",
3471		style_reg (styler, "v%d.d", opnd->reg.regno),
3472		style_imm (styler, "1"));
3473      break;
3474
3475    case AARCH64_OPND_LVn:
3476    case AARCH64_OPND_LVt:
3477    case AARCH64_OPND_LVt_AL:
3478    case AARCH64_OPND_LEt:
3479      print_register_list (buf, size, opnd, "v", styler);
3480      break;
3481
3482    case AARCH64_OPND_SVE_Pd:
3483    case AARCH64_OPND_SVE_Pg3:
3484    case AARCH64_OPND_SVE_Pg4_5:
3485    case AARCH64_OPND_SVE_Pg4_10:
3486    case AARCH64_OPND_SVE_Pg4_16:
3487    case AARCH64_OPND_SVE_Pm:
3488    case AARCH64_OPND_SVE_Pn:
3489    case AARCH64_OPND_SVE_Pt:
3490    case AARCH64_OPND_SME_Pm:
3491      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3492	snprintf (buf, size, "%s",
3493		  style_reg (styler, "p%d", opnd->reg.regno));
3494      else if (opnd->qualifier == AARCH64_OPND_QLF_P_Z
3495	       || opnd->qualifier == AARCH64_OPND_QLF_P_M)
3496	snprintf (buf, size, "%s",
3497		  style_reg (styler, "p%d/%s", opnd->reg.regno,
3498			     aarch64_get_qualifier_name (opnd->qualifier)));
3499      else
3500	snprintf (buf, size, "%s",
3501		  style_reg (styler, "p%d.%s", opnd->reg.regno,
3502			     aarch64_get_qualifier_name (opnd->qualifier)));
3503      break;
3504
3505    case AARCH64_OPND_SVE_Za_5:
3506    case AARCH64_OPND_SVE_Za_16:
3507    case AARCH64_OPND_SVE_Zd:
3508    case AARCH64_OPND_SVE_Zm_5:
3509    case AARCH64_OPND_SVE_Zm_16:
3510    case AARCH64_OPND_SVE_Zn:
3511    case AARCH64_OPND_SVE_Zt:
3512      if (opnd->qualifier == AARCH64_OPND_QLF_NIL)
3513	snprintf (buf, size, "%s", style_reg (styler, "z%d", opnd->reg.regno));
3514      else
3515	snprintf (buf, size, "%s",
3516		  style_reg (styler, "z%d.%s", opnd->reg.regno,
3517			     aarch64_get_qualifier_name (opnd->qualifier)));
3518      break;
3519
3520    case AARCH64_OPND_SVE_ZnxN:
3521    case AARCH64_OPND_SVE_ZtxN:
3522      print_register_list (buf, size, opnd, "z", styler);
3523      break;
3524
3525    case AARCH64_OPND_SVE_Zm3_INDEX:
3526    case AARCH64_OPND_SVE_Zm3_22_INDEX:
3527    case AARCH64_OPND_SVE_Zm3_11_INDEX:
3528    case AARCH64_OPND_SVE_Zm4_11_INDEX:
3529    case AARCH64_OPND_SVE_Zm4_INDEX:
3530    case AARCH64_OPND_SVE_Zn_INDEX:
3531      snprintf (buf, size, "%s[%s]",
3532		style_reg (styler, "z%d.%s", opnd->reglane.regno,
3533			   aarch64_get_qualifier_name (opnd->qualifier)),
3534		style_imm (styler, "%" PRIi64, opnd->reglane.index));
3535      break;
3536
3537    case AARCH64_OPND_SME_ZAda_2b:
3538    case AARCH64_OPND_SME_ZAda_3b:
3539      snprintf (buf, size, "%s",
3540		style_reg (styler, "za%d.%s", opnd->reg.regno,
3541			   aarch64_get_qualifier_name (opnd->qualifier)));
3542      break;
3543
3544    case AARCH64_OPND_SME_ZA_HV_idx_src:
3545    case AARCH64_OPND_SME_ZA_HV_idx_dest:
3546    case AARCH64_OPND_SME_ZA_HV_idx_ldstr:
3547      snprintf (buf, size, "%s%s[%s, %s]%s",
3548		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "{" : "",
3549		style_reg (styler, "za%d%c.%s",
3550			   opnd->za_tile_vector.regno,
3551			   opnd->za_tile_vector.v == 1 ? 'v' : 'h',
3552			   aarch64_get_qualifier_name (opnd->qualifier)),
3553		style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3554		style_imm (styler, "%d", opnd->za_tile_vector.index.imm),
3555		opnd->type == AARCH64_OPND_SME_ZA_HV_idx_ldstr ? "}" : "");
3556      break;
3557
3558    case AARCH64_OPND_SME_list_of_64bit_tiles:
3559      print_sme_za_list (buf, size, opnd->reg.regno, styler);
3560      break;
3561
3562    case AARCH64_OPND_SME_ZA_array:
3563      snprintf (buf, size, "%s[%s, %s]",
3564		style_reg (styler, "za"),
3565		style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3566		style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3567      break;
3568
3569    case AARCH64_OPND_SME_SM_ZA:
3570      snprintf (buf, size, "%s",
3571		style_reg (styler, opnd->reg.regno == 's' ? "sm" : "za"));
3572      break;
3573
3574    case AARCH64_OPND_SME_PnT_Wm_imm:
3575      snprintf (buf, size, "%s[%s, %s]",
3576		style_reg (styler, "p%d.%s", opnd->za_tile_vector.regno,
3577			   aarch64_get_qualifier_name (opnd->qualifier)),
3578                style_reg (styler, "w%d", opnd->za_tile_vector.index.regno),
3579                style_imm (styler, "%d", opnd->za_tile_vector.index.imm));
3580      break;
3581
3582    case AARCH64_OPND_CRn:
3583    case AARCH64_OPND_CRm:
3584      snprintf (buf, size, "%s",
3585		style_reg (styler, "C%" PRIi64, opnd->imm.value));
3586      break;
3587
3588    case AARCH64_OPND_IDX:
3589    case AARCH64_OPND_MASK:
3590    case AARCH64_OPND_IMM:
3591    case AARCH64_OPND_IMM_2:
3592    case AARCH64_OPND_WIDTH:
3593    case AARCH64_OPND_UIMM3_OP1:
3594    case AARCH64_OPND_UIMM3_OP2:
3595    case AARCH64_OPND_BIT_NUM:
3596    case AARCH64_OPND_IMM_VLSL:
3597    case AARCH64_OPND_IMM_VLSR:
3598    case AARCH64_OPND_SHLL_IMM:
3599    case AARCH64_OPND_IMM0:
3600    case AARCH64_OPND_IMMR:
3601    case AARCH64_OPND_IMMS:
3602    case AARCH64_OPND_UNDEFINED:
3603    case AARCH64_OPND_FBITS:
3604    case AARCH64_OPND_TME_UIMM16:
3605    case AARCH64_OPND_SIMM5:
3606    case AARCH64_OPND_SVE_SHLIMM_PRED:
3607    case AARCH64_OPND_SVE_SHLIMM_UNPRED:
3608    case AARCH64_OPND_SVE_SHLIMM_UNPRED_22:
3609    case AARCH64_OPND_SVE_SHRIMM_PRED:
3610    case AARCH64_OPND_SVE_SHRIMM_UNPRED:
3611    case AARCH64_OPND_SVE_SHRIMM_UNPRED_22:
3612    case AARCH64_OPND_SVE_SIMM5:
3613    case AARCH64_OPND_SVE_SIMM5B:
3614    case AARCH64_OPND_SVE_SIMM6:
3615    case AARCH64_OPND_SVE_SIMM8:
3616    case AARCH64_OPND_SVE_UIMM3:
3617    case AARCH64_OPND_SVE_UIMM7:
3618    case AARCH64_OPND_SVE_UIMM8:
3619    case AARCH64_OPND_SVE_UIMM8_53:
3620    case AARCH64_OPND_IMM_ROT1:
3621    case AARCH64_OPND_IMM_ROT2:
3622    case AARCH64_OPND_IMM_ROT3:
3623    case AARCH64_OPND_SVE_IMM_ROT1:
3624    case AARCH64_OPND_SVE_IMM_ROT2:
3625    case AARCH64_OPND_SVE_IMM_ROT3:
3626    case AARCH64_OPND_CSSC_SIMM8:
3627    case AARCH64_OPND_CSSC_UIMM8:
3628      snprintf (buf, size, "%s",
3629		style_imm (styler, "#%" PRIi64, opnd->imm.value));
3630      break;
3631
3632    case AARCH64_OPND_SVE_I1_HALF_ONE:
3633    case AARCH64_OPND_SVE_I1_HALF_TWO:
3634    case AARCH64_OPND_SVE_I1_ZERO_ONE:
3635      {
3636	single_conv_t c;
3637	c.i = opnd->imm.value;
3638	snprintf (buf, size, "%s", style_imm (styler, "#%.1f", c.f));
3639	break;
3640      }
3641
3642    case AARCH64_OPND_SVE_PATTERN:
3643      if (optional_operand_p (opcode, idx)
3644	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3645	break;
3646      enum_value = opnd->imm.value;
3647      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3648      if (aarch64_sve_pattern_array[enum_value])
3649	snprintf (buf, size, "%s",
3650		  style_reg (styler, aarch64_sve_pattern_array[enum_value]));
3651      else
3652	snprintf (buf, size, "%s",
3653		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
3654      break;
3655
3656    case AARCH64_OPND_SVE_PATTERN_SCALED:
3657      if (optional_operand_p (opcode, idx)
3658	  && !opnd->shifter.operator_present
3659	  && opnd->imm.value == get_optional_operand_default_value (opcode))
3660	break;
3661      enum_value = opnd->imm.value;
3662      assert (enum_value < ARRAY_SIZE (aarch64_sve_pattern_array));
3663      if (aarch64_sve_pattern_array[opnd->imm.value])
3664	snprintf (buf, size, "%s",
3665		  style_reg (styler,
3666			     aarch64_sve_pattern_array[opnd->imm.value]));
3667      else
3668	snprintf (buf, size, "%s",
3669		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
3670      if (opnd->shifter.operator_present)
3671	{
3672	  size_t len = strlen (buf);
3673	  const char *shift_name
3674	    = aarch64_operand_modifiers[opnd->shifter.kind].name;
3675	  snprintf (buf + len, size - len, ", %s %s",
3676		    style_sub_mnem (styler, shift_name),
3677		    style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3678	}
3679      break;
3680
3681    case AARCH64_OPND_SVE_PRFOP:
3682      enum_value = opnd->imm.value;
3683      assert (enum_value < ARRAY_SIZE (aarch64_sve_prfop_array));
3684      if (aarch64_sve_prfop_array[enum_value])
3685	snprintf (buf, size, "%s",
3686		  style_reg (styler, aarch64_sve_prfop_array[enum_value]));
3687      else
3688	snprintf (buf, size, "%s",
3689		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
3690      break;
3691
3692    case AARCH64_OPND_IMM_MOV:
3693      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3694	{
3695	case 4:	/* e.g. MOV Wd, #<imm32>.  */
3696	    {
3697	      int imm32 = opnd->imm.value;
3698	      snprintf (buf, size, "%s",
3699			style_imm (styler, "#0x%-20x", imm32));
3700	      snprintf (comment, comment_size, "#%d", imm32);
3701	    }
3702	  break;
3703	case 8:	/* e.g. MOV Xd, #<imm64>.  */
3704	  snprintf (buf, size, "%s", style_imm (styler, "#0x%-20" PRIx64,
3705						opnd->imm.value));
3706	  snprintf (comment, comment_size, "#%" PRIi64, opnd->imm.value);
3707	  break;
3708	default:
3709	  snprintf (buf, size, "<invalid>");
3710	  break;
3711	}
3712      break;
3713
3714    case AARCH64_OPND_FPIMM0:
3715      snprintf (buf, size, "%s", style_imm (styler, "#0.0"));
3716      break;
3717
3718    case AARCH64_OPND_LIMM:
3719    case AARCH64_OPND_AIMM:
3720    case AARCH64_OPND_HALF:
3721    case AARCH64_OPND_SVE_INV_LIMM:
3722    case AARCH64_OPND_SVE_LIMM:
3723    case AARCH64_OPND_SVE_LIMM_MOV:
3724      if (opnd->shifter.amount)
3725	snprintf (buf, size, "%s, %s %s",
3726		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3727		  style_sub_mnem (styler, "lsl"),
3728		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3729      else
3730	snprintf (buf, size, "%s",
3731		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3732      break;
3733
3734    case AARCH64_OPND_SIMD_IMM:
3735    case AARCH64_OPND_SIMD_IMM_SFT:
3736      if ((! opnd->shifter.amount && opnd->shifter.kind == AARCH64_MOD_LSL)
3737	  || opnd->shifter.kind == AARCH64_MOD_NONE)
3738	snprintf (buf, size, "%s",
3739		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value));
3740      else
3741	snprintf (buf, size, "%s, %s %s",
3742		  style_imm (styler, "#0x%" PRIx64, opnd->imm.value),
3743		  style_sub_mnem (styler, aarch64_operand_modifiers[opnd->shifter.kind].name),
3744		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3745      break;
3746
3747    case AARCH64_OPND_SVE_AIMM:
3748    case AARCH64_OPND_SVE_ASIMM:
3749      if (opnd->shifter.amount)
3750	snprintf (buf, size, "%s, %s %s",
3751		  style_imm (styler, "#%" PRIi64, opnd->imm.value),
3752		  style_sub_mnem (styler, "lsl"),
3753		  style_imm (styler, "#%" PRIi64, opnd->shifter.amount));
3754      else
3755	snprintf (buf, size, "%s",
3756		  style_imm (styler, "#%" PRIi64, opnd->imm.value));
3757      break;
3758
3759    case AARCH64_OPND_FPIMM:
3760    case AARCH64_OPND_SIMD_FPIMM:
3761    case AARCH64_OPND_SVE_FPIMM8:
3762      switch (aarch64_get_qualifier_esize (opnds[0].qualifier))
3763	{
3764	case 2:	/* e.g. FMOV <Hd>, #<imm>.  */
3765	    {
3766	      half_conv_t c;
3767	      c.i = expand_fp_imm (2, opnd->imm.value);
3768	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3769	    }
3770	  break;
3771	case 4:	/* e.g. FMOV <Vd>.4S, #<imm>.  */
3772	    {
3773	      single_conv_t c;
3774	      c.i = expand_fp_imm (4, opnd->imm.value);
3775	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.f));
3776	    }
3777	  break;
3778	case 8:	/* e.g. FMOV <Sd>, #<imm>.  */
3779	    {
3780	      double_conv_t c;
3781	      c.i = expand_fp_imm (8, opnd->imm.value);
3782	      snprintf (buf, size, "%s", style_imm (styler, "#%.18e", c.d));
3783	    }
3784	  break;
3785	default:
3786	  snprintf (buf, size, "<invalid>");
3787	  break;
3788	}
3789      break;
3790
3791    case AARCH64_OPND_CCMP_IMM:
3792    case AARCH64_OPND_NZCV:
3793    case AARCH64_OPND_EXCEPTION:
3794    case AARCH64_OPND_UIMM4:
3795    case AARCH64_OPND_UIMM4_ADDG:
3796    case AARCH64_OPND_UIMM7:
3797    case AARCH64_OPND_UIMM10:
3798      if (optional_operand_p (opcode, idx)
3799	  && (opnd->imm.value ==
3800	      (int64_t) get_optional_operand_default_value (opcode)))
3801	/* Omit the operand, e.g. DCPS1.  */
3802	break;
3803      snprintf (buf, size, "%s",
3804		style_imm (styler, "#0x%x", (unsigned int) opnd->imm.value));
3805      break;
3806
3807    case AARCH64_OPND_COND:
3808    case AARCH64_OPND_COND1:
3809      snprintf (buf, size, "%s",
3810		style_sub_mnem (styler, opnd->cond->names[0]));
3811      num_conds = ARRAY_SIZE (opnd->cond->names);
3812      for (i = 1; i < num_conds && opnd->cond->names[i]; ++i)
3813	{
3814	  size_t len = comment != NULL ? strlen (comment) : 0;
3815	  if (i == 1)
3816	    snprintf (comment + len, comment_size - len, "%s = %s",
3817		      opnd->cond->names[0], opnd->cond->names[i]);
3818	  else
3819	    snprintf (comment + len, comment_size - len, ", %s",
3820		      opnd->cond->names[i]);
3821	}
3822      break;
3823
3824    case AARCH64_OPND_ADDR_ADRP:
3825      addr = ((pc + AARCH64_PCREL_OFFSET) & ~(uint64_t)0xfff)
3826	+ opnd->imm.value;
3827      if (pcrel_p)
3828	*pcrel_p = 1;
3829      if (address)
3830	*address = addr;
3831      /* This is not necessary during the disassembling, as print_address_func
3832	 in the disassemble_info will take care of the printing.  But some
3833	 other callers may be still interested in getting the string in *STR,
3834	 so here we do snprintf regardless.  */
3835      snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64 , addr));
3836      break;
3837
3838    case AARCH64_OPND_ADDR_PCREL14:
3839    case AARCH64_OPND_ADDR_PCREL19:
3840    case AARCH64_OPND_ADDR_PCREL21:
3841    case AARCH64_OPND_ADDR_PCREL26:
3842      addr = pc + AARCH64_PCREL_OFFSET + opnd->imm.value;
3843      if (pcrel_p)
3844	*pcrel_p = 1;
3845      if (address)
3846	*address = addr;
3847      /* This is not necessary during the disassembling, as print_address_func
3848	 in the disassemble_info will take care of the printing.  But some
3849	 other callers may be still interested in getting the string in *STR,
3850	 so here we do snprintf regardless.  */
3851      snprintf (buf, size, "%s", style_addr (styler, "#0x%" PRIx64, addr));
3852      break;
3853
3854    case AARCH64_OPND_ADDR_SIMPLE:
3855    case AARCH64_OPND_SIMD_ADDR_SIMPLE:
3856    case AARCH64_OPND_SIMD_ADDR_POST:
3857      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3858      if (opnd->type == AARCH64_OPND_SIMD_ADDR_POST)
3859	{
3860	  if (opnd->addr.offset.is_reg)
3861	    snprintf (buf, size, "[%s], %s",
3862		      style_reg (styler, name),
3863		      style_reg (styler, "x%d", opnd->addr.offset.regno));
3864	  else
3865	    snprintf (buf, size, "[%s], %s",
3866		      style_reg (styler, name),
3867		      style_imm (styler, "#%d", opnd->addr.offset.imm));
3868	}
3869      else
3870	snprintf (buf, size, "[%s]", style_reg (styler, name));
3871      break;
3872
3873    case AARCH64_OPND_ADDR_REGOFF:
3874    case AARCH64_OPND_SVE_ADDR_R:
3875    case AARCH64_OPND_SVE_ADDR_RR:
3876    case AARCH64_OPND_SVE_ADDR_RR_LSL1:
3877    case AARCH64_OPND_SVE_ADDR_RR_LSL2:
3878    case AARCH64_OPND_SVE_ADDR_RR_LSL3:
3879    case AARCH64_OPND_SVE_ADDR_RR_LSL4:
3880    case AARCH64_OPND_SVE_ADDR_RX:
3881    case AARCH64_OPND_SVE_ADDR_RX_LSL1:
3882    case AARCH64_OPND_SVE_ADDR_RX_LSL2:
3883    case AARCH64_OPND_SVE_ADDR_RX_LSL3:
3884      print_register_offset_address
3885	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3886	 get_offset_int_reg_name (opnd), styler);
3887      break;
3888
3889    case AARCH64_OPND_SVE_ADDR_ZX:
3890      print_register_offset_address
3891	(buf, size, opnd,
3892	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3893	 get_64bit_int_reg_name (opnd->addr.offset.regno, 0), styler);
3894      break;
3895
3896    case AARCH64_OPND_SVE_ADDR_RZ:
3897    case AARCH64_OPND_SVE_ADDR_RZ_LSL1:
3898    case AARCH64_OPND_SVE_ADDR_RZ_LSL2:
3899    case AARCH64_OPND_SVE_ADDR_RZ_LSL3:
3900    case AARCH64_OPND_SVE_ADDR_RZ_XTW_14:
3901    case AARCH64_OPND_SVE_ADDR_RZ_XTW_22:
3902    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_14:
3903    case AARCH64_OPND_SVE_ADDR_RZ_XTW1_22:
3904    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_14:
3905    case AARCH64_OPND_SVE_ADDR_RZ_XTW2_22:
3906    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_14:
3907    case AARCH64_OPND_SVE_ADDR_RZ_XTW3_22:
3908      print_register_offset_address
3909	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3910	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3911	 styler);
3912      break;
3913
3914    case AARCH64_OPND_ADDR_SIMM7:
3915    case AARCH64_OPND_ADDR_SIMM9:
3916    case AARCH64_OPND_ADDR_SIMM9_2:
3917    case AARCH64_OPND_ADDR_SIMM10:
3918    case AARCH64_OPND_ADDR_SIMM11:
3919    case AARCH64_OPND_ADDR_SIMM13:
3920    case AARCH64_OPND_ADDR_OFFSET:
3921    case AARCH64_OPND_SME_ADDR_RI_U4xVL:
3922    case AARCH64_OPND_SVE_ADDR_RI_S4x16:
3923    case AARCH64_OPND_SVE_ADDR_RI_S4x32:
3924    case AARCH64_OPND_SVE_ADDR_RI_S4xVL:
3925    case AARCH64_OPND_SVE_ADDR_RI_S4x2xVL:
3926    case AARCH64_OPND_SVE_ADDR_RI_S4x3xVL:
3927    case AARCH64_OPND_SVE_ADDR_RI_S4x4xVL:
3928    case AARCH64_OPND_SVE_ADDR_RI_S6xVL:
3929    case AARCH64_OPND_SVE_ADDR_RI_S9xVL:
3930    case AARCH64_OPND_SVE_ADDR_RI_U6:
3931    case AARCH64_OPND_SVE_ADDR_RI_U6x2:
3932    case AARCH64_OPND_SVE_ADDR_RI_U6x4:
3933    case AARCH64_OPND_SVE_ADDR_RI_U6x8:
3934      print_immediate_offset_address
3935	(buf, size, opnd, get_64bit_int_reg_name (opnd->addr.base_regno, 1),
3936	 styler);
3937      break;
3938
3939    case AARCH64_OPND_SVE_ADDR_ZI_U5:
3940    case AARCH64_OPND_SVE_ADDR_ZI_U5x2:
3941    case AARCH64_OPND_SVE_ADDR_ZI_U5x4:
3942    case AARCH64_OPND_SVE_ADDR_ZI_U5x8:
3943      print_immediate_offset_address
3944	(buf, size, opnd,
3945	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3946	 styler);
3947      break;
3948
3949    case AARCH64_OPND_SVE_ADDR_ZZ_LSL:
3950    case AARCH64_OPND_SVE_ADDR_ZZ_SXTW:
3951    case AARCH64_OPND_SVE_ADDR_ZZ_UXTW:
3952      print_register_offset_address
3953	(buf, size, opnd,
3954	 get_addr_sve_reg_name (opnd->addr.base_regno, opnd->qualifier),
3955	 get_addr_sve_reg_name (opnd->addr.offset.regno, opnd->qualifier),
3956	 styler);
3957      break;
3958
3959    case AARCH64_OPND_ADDR_UIMM12:
3960      name = get_64bit_int_reg_name (opnd->addr.base_regno, 1);
3961      if (opnd->addr.offset.imm)
3962	snprintf (buf, size, "[%s, %s]",
3963		  style_reg (styler, name),
3964		  style_imm (styler, "#%d", opnd->addr.offset.imm));
3965      else
3966	snprintf (buf, size, "[%s]", style_reg (styler, name));
3967      break;
3968
3969    case AARCH64_OPND_SYSREG:
3970      for (i = 0; aarch64_sys_regs[i].name; ++i)
3971	{
3972	  const aarch64_sys_reg *sr = aarch64_sys_regs + i;
3973
3974	  bool exact_match
3975	    = (!(sr->flags & (F_REG_READ | F_REG_WRITE))
3976	    || (sr->flags & opnd->sysreg.flags) == opnd->sysreg.flags)
3977	    && AARCH64_CPU_HAS_FEATURE (features, sr->features);
3978
3979	  /* Try and find an exact match, But if that fails, return the first
3980	     partial match that was found.  */
3981	  if (aarch64_sys_regs[i].value == opnd->sysreg.value
3982	      && ! aarch64_sys_reg_deprecated_p (aarch64_sys_regs[i].flags)
3983	      && (name == NULL || exact_match))
3984	    {
3985	      name = aarch64_sys_regs[i].name;
3986	      if (exact_match)
3987		{
3988		  if (notes)
3989		    *notes = NULL;
3990		  break;
3991		}
3992
3993	      /* If we didn't match exactly, that means the presense of a flag
3994		 indicates what we didn't want for this instruction.  e.g. If
3995		 F_REG_READ is there, that means we were looking for a write
3996		 register.  See aarch64_ext_sysreg.  */
3997	      if (aarch64_sys_regs[i].flags & F_REG_WRITE)
3998		*notes = _("reading from a write-only register");
3999	      else if (aarch64_sys_regs[i].flags & F_REG_READ)
4000		*notes = _("writing to a read-only register");
4001	    }
4002	}
4003
4004      if (name)
4005	snprintf (buf, size, "%s", style_reg (styler, name));
4006      else
4007	{
4008	  /* Implementation defined system register.  */
4009	  unsigned int value = opnd->sysreg.value;
4010	  snprintf (buf, size, "%s",
4011		    style_reg (styler, "s%u_%u_c%u_c%u_%u",
4012			       (value >> 14) & 0x3, (value >> 11) & 0x7,
4013			       (value >> 7) & 0xf, (value >> 3) & 0xf,
4014			       value & 0x7));
4015	}
4016      break;
4017
4018    case AARCH64_OPND_PSTATEFIELD:
4019      for (i = 0; aarch64_pstatefields[i].name; ++i)
4020        if (aarch64_pstatefields[i].value == opnd->pstatefield)
4021          {
4022            /* PSTATEFIELD name is encoded partially in CRm[3:1] for SVCRSM,
4023               SVCRZA and SVCRSMZA.  */
4024            uint32_t flags = aarch64_pstatefields[i].flags;
4025            if (flags & F_REG_IN_CRM
4026                && (PSTATE_DECODE_CRM (opnd->sysreg.flags)
4027                    != PSTATE_DECODE_CRM (flags)))
4028              continue;
4029            break;
4030          }
4031      assert (aarch64_pstatefields[i].name);
4032      snprintf (buf, size, "%s",
4033		style_reg (styler, aarch64_pstatefields[i].name));
4034      break;
4035
4036    case AARCH64_OPND_SYSREG_AT:
4037    case AARCH64_OPND_SYSREG_DC:
4038    case AARCH64_OPND_SYSREG_IC:
4039    case AARCH64_OPND_SYSREG_TLBI:
4040    case AARCH64_OPND_SYSREG_SR:
4041      snprintf (buf, size, "%s", style_reg (styler, opnd->sysins_op->name));
4042      break;
4043
4044    case AARCH64_OPND_BARRIER:
4045    case AARCH64_OPND_BARRIER_DSB_NXS:
4046      {
4047	if (opnd->barrier->name[0] == '#')
4048	  snprintf (buf, size, "%s", style_imm (styler, opnd->barrier->name));
4049	else
4050	  snprintf (buf, size, "%s",
4051		    style_sub_mnem (styler, opnd->barrier->name));
4052      }
4053      break;
4054
4055    case AARCH64_OPND_BARRIER_ISB:
4056      /* Operand can be omitted, e.g. in DCPS1.  */
4057      if (! optional_operand_p (opcode, idx)
4058	  || (opnd->barrier->value
4059	      != get_optional_operand_default_value (opcode)))
4060	snprintf (buf, size, "%s",
4061		  style_imm (styler, "#0x%x", opnd->barrier->value));
4062      break;
4063
4064    case AARCH64_OPND_PRFOP:
4065      if (opnd->prfop->name != NULL)
4066	snprintf (buf, size, "%s", style_sub_mnem (styler, opnd->prfop->name));
4067      else
4068	snprintf (buf, size, "%s", style_imm (styler, "#0x%02x",
4069					      opnd->prfop->value));
4070      break;
4071
4072    case AARCH64_OPND_BARRIER_PSB:
4073      snprintf (buf, size, "%s", style_sub_mnem (styler, "csync"));
4074      break;
4075
4076    case AARCH64_OPND_BTI_TARGET:
4077      if ((HINT_FLAG (opnd->hint_option->value) & HINT_OPD_F_NOPRINT) == 0)
4078	snprintf (buf, size, "%s",
4079		  style_sub_mnem (styler, opnd->hint_option->name));
4080      break;
4081
4082    case AARCH64_OPND_MOPS_ADDR_Rd:
4083    case AARCH64_OPND_MOPS_ADDR_Rs:
4084      snprintf (buf, size, "[%s]!",
4085		style_reg (styler,
4086			   get_int_reg_name (opnd->reg.regno,
4087					     AARCH64_OPND_QLF_X, 0)));
4088      break;
4089
4090    case AARCH64_OPND_MOPS_WB_Rn:
4091      snprintf (buf, size, "%s!",
4092		style_reg (styler, get_int_reg_name (opnd->reg.regno,
4093						     AARCH64_OPND_QLF_X, 0)));
4094      break;
4095
4096    default:
4097      snprintf (buf, size, "<invalid>");
4098      break;
4099    }
4100}
4101
4102#define CPENC(op0,op1,crn,crm,op2) \
4103  ((((op0) << 19) | ((op1) << 16) | ((crn) << 12) | ((crm) << 8) | ((op2) << 5)) >> 5)
4104  /* for 3.9.3 Instructions for Accessing Special Purpose Registers */
4105#define CPEN_(op1,crm,op2) CPENC(3,(op1),4,(crm),(op2))
4106  /* for 3.9.10 System Instructions */
4107#define CPENS(op1,crn,crm,op2) CPENC(1,(op1),(crn),(crm),(op2))
4108
4109#define C0  0
4110#define C1  1
4111#define C2  2
4112#define C3  3
4113#define C4  4
4114#define C5  5
4115#define C6  6
4116#define C7  7
4117#define C8  8
4118#define C9  9
4119#define C10 10
4120#define C11 11
4121#define C12 12
4122#define C13 13
4123#define C14 14
4124#define C15 15
4125
4126#define SYSREG(name, encoding, flags, features) \
4127  { name, encoding, flags, features }
4128
4129#define SR_CORE(n,e,f) SYSREG (n,e,f,0)
4130
4131#define SR_FEAT(n,e,f,feat) \
4132  SYSREG ((n), (e), (f) | F_ARCHEXT, AARCH64_FEATURE_##feat)
4133
4134#define SR_FEAT2(n,e,f,fe1,fe2) \
4135  SYSREG ((n), (e), (f) | F_ARCHEXT, \
4136	  AARCH64_FEATURE_##fe1 | AARCH64_FEATURE_##fe2)
4137
4138#define SR_V8_1_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_1)
4139#define SR_V8_4_A(n,e,f) SR_FEAT2(n,e,f,V8_A,V8_4)
4140
4141#define SR_V8_A(n,e,f)	  SR_FEAT (n,e,f,V8_A)
4142#define SR_V8_R(n,e,f)	  SR_FEAT (n,e,f,V8_R)
4143#define SR_V8_1(n,e,f)	  SR_FEAT (n,e,f,V8_1)
4144#define SR_V8_2(n,e,f)	  SR_FEAT (n,e,f,V8_2)
4145#define SR_V8_3(n,e,f)	  SR_FEAT (n,e,f,V8_3)
4146#define SR_V8_4(n,e,f)	  SR_FEAT (n,e,f,V8_4)
4147#define SR_V8_6(n,e,f)	  SR_FEAT (n,e,f,V8_6)
4148#define SR_V8_7(n,e,f)	  SR_FEAT (n,e,f,V8_7)
4149#define SR_V8_8(n,e,f)	  SR_FEAT (n,e,f,V8_8)
4150/* Has no separate libopcodes feature flag, but separated out for clarity.  */
4151#define SR_GIC(n,e,f)	  SR_CORE (n,e,f)
4152/* Has no separate libopcodes feature flag, but separated out for clarity.  */
4153#define SR_AMU(n,e,f)	  SR_FEAT (n,e,f,V8_4)
4154#define SR_LOR(n,e,f)	  SR_FEAT (n,e,f,LOR)
4155#define SR_PAN(n,e,f)	  SR_FEAT (n,e,f,PAN)
4156#define SR_RAS(n,e,f)	  SR_FEAT (n,e,f,RAS)
4157#define SR_RNG(n,e,f)	  SR_FEAT (n,e,f,RNG)
4158#define SR_SME(n,e,f)	  SR_FEAT (n,e,f,SME)
4159#define SR_SSBS(n,e,f)	  SR_FEAT (n,e,f,SSBS)
4160#define SR_SVE(n,e,f)	  SR_FEAT (n,e,f,SVE)
4161#define SR_ID_PFR2(n,e,f) SR_FEAT (n,e,f,ID_PFR2)
4162#define SR_PROFILE(n,e,f) SR_FEAT (n,e,f,PROFILE)
4163#define SR_MEMTAG(n,e,f)  SR_FEAT (n,e,f,MEMTAG)
4164#define SR_SCXTNUM(n,e,f) SR_FEAT (n,e,f,SCXTNUM)
4165
4166#define SR_EXPAND_ELx(f,x) \
4167  f (x, 1),  \
4168  f (x, 2),  \
4169  f (x, 3),  \
4170  f (x, 4),  \
4171  f (x, 5),  \
4172  f (x, 6),  \
4173  f (x, 7),  \
4174  f (x, 8),  \
4175  f (x, 9),  \
4176  f (x, 10), \
4177  f (x, 11), \
4178  f (x, 12), \
4179  f (x, 13), \
4180  f (x, 14), \
4181  f (x, 15),
4182
4183#define SR_EXPAND_EL12(f) \
4184  SR_EXPAND_ELx (f,1) \
4185  SR_EXPAND_ELx (f,2)
4186
4187/* TODO there is one more issues need to be resolved
4188   1. handle cpu-implementation-defined system registers.
4189
4190   Note that the F_REG_{READ,WRITE} flags mean read-only and write-only
4191   respectively.  If neither of these are set then the register is read-write.  */
4192const aarch64_sys_reg aarch64_sys_regs [] =
4193{
4194  SR_CORE ("spsr_el1",		CPEN_ (0,C0,0),		0), /* = spsr_svc.  */
4195  SR_V8_1 ("spsr_el12",		CPEN_ (5,C0,0),		0),
4196  SR_CORE ("elr_el1",		CPEN_ (0,C0,1),		0),
4197  SR_V8_1 ("elr_el12",		CPEN_ (5,C0,1),		0),
4198  SR_CORE ("sp_el0",		CPEN_ (0,C1,0),		0),
4199  SR_CORE ("spsel",		CPEN_ (0,C2,0),		0),
4200  SR_CORE ("daif",		CPEN_ (3,C2,1),		0),
4201  SR_CORE ("currentel",		CPEN_ (0,C2,2),		F_REG_READ),
4202  SR_PAN  ("pan",		CPEN_ (0,C2,3),		0),
4203  SR_V8_2 ("uao",		CPEN_ (0,C2,4),		0),
4204  SR_CORE ("nzcv",		CPEN_ (3,C2,0),		0),
4205  SR_SSBS ("ssbs",		CPEN_ (3,C2,6),		0),
4206  SR_CORE ("fpcr",		CPEN_ (3,C4,0),		0),
4207  SR_CORE ("fpsr",		CPEN_ (3,C4,1),		0),
4208  SR_CORE ("dspsr_el0",		CPEN_ (3,C5,0),		0),
4209  SR_CORE ("dlr_el0",		CPEN_ (3,C5,1),		0),
4210  SR_CORE ("spsr_el2",		CPEN_ (4,C0,0),		0), /* = spsr_hyp.  */
4211  SR_CORE ("elr_el2",		CPEN_ (4,C0,1),		0),
4212  SR_CORE ("sp_el1",		CPEN_ (4,C1,0),		0),
4213  SR_CORE ("spsr_irq",		CPEN_ (4,C3,0),		0),
4214  SR_CORE ("spsr_abt",		CPEN_ (4,C3,1),		0),
4215  SR_CORE ("spsr_und",		CPEN_ (4,C3,2),		0),
4216  SR_CORE ("spsr_fiq",		CPEN_ (4,C3,3),		0),
4217  SR_CORE ("spsr_el3",		CPEN_ (6,C0,0),		0),
4218  SR_CORE ("elr_el3",		CPEN_ (6,C0,1),		0),
4219  SR_CORE ("sp_el2",		CPEN_ (6,C1,0),		0),
4220  SR_CORE ("spsr_svc",		CPEN_ (0,C0,0),		F_DEPRECATED), /* = spsr_el1.  */
4221  SR_CORE ("spsr_hyp",		CPEN_ (4,C0,0),		F_DEPRECATED), /* = spsr_el2.  */
4222  SR_CORE ("midr_el1",		CPENC (3,0,C0,C0,0),	F_REG_READ),
4223  SR_CORE ("ctr_el0",		CPENC (3,3,C0,C0,1),	F_REG_READ),
4224  SR_CORE ("mpidr_el1",		CPENC (3,0,C0,C0,5),	F_REG_READ),
4225  SR_CORE ("revidr_el1",	CPENC (3,0,C0,C0,6),	F_REG_READ),
4226  SR_CORE ("aidr_el1",		CPENC (3,1,C0,C0,7),	F_REG_READ),
4227  SR_CORE ("dczid_el0",		CPENC (3,3,C0,C0,7),	F_REG_READ),
4228  SR_CORE ("id_dfr0_el1",	CPENC (3,0,C0,C1,2),	F_REG_READ),
4229  SR_CORE ("id_dfr1_el1",	CPENC (3,0,C0,C3,5),	F_REG_READ),
4230  SR_CORE ("id_pfr0_el1",	CPENC (3,0,C0,C1,0),	F_REG_READ),
4231  SR_CORE ("id_pfr1_el1",	CPENC (3,0,C0,C1,1),	F_REG_READ),
4232  SR_ID_PFR2 ("id_pfr2_el1",	CPENC (3,0,C0,C3,4),	F_REG_READ),
4233  SR_CORE ("id_afr0_el1",	CPENC (3,0,C0,C1,3),	F_REG_READ),
4234  SR_CORE ("id_mmfr0_el1",	CPENC (3,0,C0,C1,4),	F_REG_READ),
4235  SR_CORE ("id_mmfr1_el1",	CPENC (3,0,C0,C1,5),	F_REG_READ),
4236  SR_CORE ("id_mmfr2_el1",	CPENC (3,0,C0,C1,6),	F_REG_READ),
4237  SR_CORE ("id_mmfr3_el1",	CPENC (3,0,C0,C1,7),	F_REG_READ),
4238  SR_CORE ("id_mmfr4_el1",	CPENC (3,0,C0,C2,6),	F_REG_READ),
4239  SR_CORE ("id_mmfr5_el1",	CPENC (3,0,C0,C3,6),	F_REG_READ),
4240  SR_CORE ("id_isar0_el1",	CPENC (3,0,C0,C2,0),	F_REG_READ),
4241  SR_CORE ("id_isar1_el1",	CPENC (3,0,C0,C2,1),	F_REG_READ),
4242  SR_CORE ("id_isar2_el1",	CPENC (3,0,C0,C2,2),	F_REG_READ),
4243  SR_CORE ("id_isar3_el1",	CPENC (3,0,C0,C2,3),	F_REG_READ),
4244  SR_CORE ("id_isar4_el1",	CPENC (3,0,C0,C2,4),	F_REG_READ),
4245  SR_CORE ("id_isar5_el1",	CPENC (3,0,C0,C2,5),	F_REG_READ),
4246  SR_CORE ("id_isar6_el1",	CPENC (3,0,C0,C2,7),	F_REG_READ),
4247  SR_CORE ("mvfr0_el1",		CPENC (3,0,C0,C3,0),	F_REG_READ),
4248  SR_CORE ("mvfr1_el1",		CPENC (3,0,C0,C3,1),	F_REG_READ),
4249  SR_CORE ("mvfr2_el1",		CPENC (3,0,C0,C3,2),	F_REG_READ),
4250  SR_CORE ("ccsidr_el1",	CPENC (3,1,C0,C0,0),	F_REG_READ),
4251  SR_V8_3 ("ccsidr2_el1",       CPENC (3,1,C0,C0,2),    F_REG_READ),
4252  SR_CORE ("id_aa64pfr0_el1",	CPENC (3,0,C0,C4,0),	F_REG_READ),
4253  SR_CORE ("id_aa64pfr1_el1",	CPENC (3,0,C0,C4,1),	F_REG_READ),
4254  SR_CORE ("id_aa64dfr0_el1",	CPENC (3,0,C0,C5,0),	F_REG_READ),
4255  SR_CORE ("id_aa64dfr1_el1",	CPENC (3,0,C0,C5,1),	F_REG_READ),
4256  SR_CORE ("id_aa64isar0_el1",	CPENC (3,0,C0,C6,0),	F_REG_READ),
4257  SR_CORE ("id_aa64isar1_el1",	CPENC (3,0,C0,C6,1),	F_REG_READ),
4258  SR_CORE ("id_aa64isar2_el1",	CPENC (3,0,C0,C6,2),	F_REG_READ),
4259  SR_CORE ("id_aa64mmfr0_el1",	CPENC (3,0,C0,C7,0),	F_REG_READ),
4260  SR_CORE ("id_aa64mmfr1_el1",	CPENC (3,0,C0,C7,1),	F_REG_READ),
4261  SR_CORE ("id_aa64mmfr2_el1",	CPENC (3,0,C0,C7,2),	F_REG_READ),
4262  SR_CORE ("id_aa64afr0_el1",	CPENC (3,0,C0,C5,4),	F_REG_READ),
4263  SR_CORE ("id_aa64afr1_el1",	CPENC (3,0,C0,C5,5),	F_REG_READ),
4264  SR_SVE  ("id_aa64zfr0_el1",	CPENC (3,0,C0,C4,4),	F_REG_READ),
4265  SR_CORE ("clidr_el1",		CPENC (3,1,C0,C0,1),	F_REG_READ),
4266  SR_CORE ("csselr_el1",	CPENC (3,2,C0,C0,0),	0),
4267  SR_CORE ("vpidr_el2",		CPENC (3,4,C0,C0,0),	0),
4268  SR_CORE ("vmpidr_el2",	CPENC (3,4,C0,C0,5),	0),
4269  SR_CORE ("sctlr_el1",		CPENC (3,0,C1,C0,0),	0),
4270  SR_CORE ("sctlr_el2",		CPENC (3,4,C1,C0,0),	0),
4271  SR_CORE ("sctlr_el3",		CPENC (3,6,C1,C0,0),	0),
4272  SR_V8_1 ("sctlr_el12",	CPENC (3,5,C1,C0,0),	0),
4273  SR_CORE ("actlr_el1",		CPENC (3,0,C1,C0,1),	0),
4274  SR_CORE ("actlr_el2",		CPENC (3,4,C1,C0,1),	0),
4275  SR_CORE ("actlr_el3",		CPENC (3,6,C1,C0,1),	0),
4276  SR_CORE ("cpacr_el1",		CPENC (3,0,C1,C0,2),	0),
4277  SR_V8_1 ("cpacr_el12",	CPENC (3,5,C1,C0,2),	0),
4278  SR_CORE ("cptr_el2",		CPENC (3,4,C1,C1,2),	0),
4279  SR_CORE ("cptr_el3",		CPENC (3,6,C1,C1,2),	0),
4280  SR_CORE ("scr_el3",		CPENC (3,6,C1,C1,0),	0),
4281  SR_CORE ("hcr_el2",		CPENC (3,4,C1,C1,0),	0),
4282  SR_CORE ("mdcr_el2",		CPENC (3,4,C1,C1,1),	0),
4283  SR_CORE ("mdcr_el3",		CPENC (3,6,C1,C3,1),	0),
4284  SR_CORE ("hstr_el2",		CPENC (3,4,C1,C1,3),	0),
4285  SR_CORE ("hacr_el2",		CPENC (3,4,C1,C1,7),	0),
4286  SR_SVE  ("zcr_el1",		CPENC (3,0,C1,C2,0),	0),
4287  SR_SVE  ("zcr_el12",		CPENC (3,5,C1,C2,0),	0),
4288  SR_SVE  ("zcr_el2",		CPENC (3,4,C1,C2,0),	0),
4289  SR_SVE  ("zcr_el3",		CPENC (3,6,C1,C2,0),	0),
4290  SR_CORE ("ttbr0_el1",		CPENC (3,0,C2,C0,0),	0),
4291  SR_CORE ("ttbr1_el1",		CPENC (3,0,C2,C0,1),	0),
4292  SR_V8_A ("ttbr0_el2",		CPENC (3,4,C2,C0,0),	0),
4293  SR_V8_1_A ("ttbr1_el2",	CPENC (3,4,C2,C0,1),	0),
4294  SR_CORE ("ttbr0_el3",		CPENC (3,6,C2,C0,0),	0),
4295  SR_V8_1 ("ttbr0_el12",	CPENC (3,5,C2,C0,0),	0),
4296  SR_V8_1 ("ttbr1_el12",	CPENC (3,5,C2,C0,1),	0),
4297  SR_V8_A ("vttbr_el2",		CPENC (3,4,C2,C1,0),	0),
4298  SR_CORE ("tcr_el1",		CPENC (3,0,C2,C0,2),	0),
4299  SR_CORE ("tcr_el2",		CPENC (3,4,C2,C0,2),	0),
4300  SR_CORE ("tcr_el3",		CPENC (3,6,C2,C0,2),	0),
4301  SR_V8_1 ("tcr_el12",		CPENC (3,5,C2,C0,2),	0),
4302  SR_CORE ("vtcr_el2",		CPENC (3,4,C2,C1,2),	0),
4303  SR_V8_3 ("apiakeylo_el1",	CPENC (3,0,C2,C1,0),	0),
4304  SR_V8_3 ("apiakeyhi_el1",	CPENC (3,0,C2,C1,1),	0),
4305  SR_V8_3 ("apibkeylo_el1",	CPENC (3,0,C2,C1,2),	0),
4306  SR_V8_3 ("apibkeyhi_el1",	CPENC (3,0,C2,C1,3),	0),
4307  SR_V8_3 ("apdakeylo_el1",	CPENC (3,0,C2,C2,0),	0),
4308  SR_V8_3 ("apdakeyhi_el1",	CPENC (3,0,C2,C2,1),	0),
4309  SR_V8_3 ("apdbkeylo_el1",	CPENC (3,0,C2,C2,2),	0),
4310  SR_V8_3 ("apdbkeyhi_el1",	CPENC (3,0,C2,C2,3),	0),
4311  SR_V8_3 ("apgakeylo_el1",	CPENC (3,0,C2,C3,0),	0),
4312  SR_V8_3 ("apgakeyhi_el1",	CPENC (3,0,C2,C3,1),	0),
4313  SR_CORE ("afsr0_el1",		CPENC (3,0,C5,C1,0),	0),
4314  SR_CORE ("afsr1_el1",		CPENC (3,0,C5,C1,1),	0),
4315  SR_CORE ("afsr0_el2",		CPENC (3,4,C5,C1,0),	0),
4316  SR_CORE ("afsr1_el2",		CPENC (3,4,C5,C1,1),	0),
4317  SR_CORE ("afsr0_el3",		CPENC (3,6,C5,C1,0),	0),
4318  SR_V8_1 ("afsr0_el12",	CPENC (3,5,C5,C1,0),	0),
4319  SR_CORE ("afsr1_el3",		CPENC (3,6,C5,C1,1),	0),
4320  SR_V8_1 ("afsr1_el12",	CPENC (3,5,C5,C1,1),	0),
4321  SR_CORE ("esr_el1",		CPENC (3,0,C5,C2,0),	0),
4322  SR_CORE ("esr_el2",		CPENC (3,4,C5,C2,0),	0),
4323  SR_CORE ("esr_el3",		CPENC (3,6,C5,C2,0),	0),
4324  SR_V8_1 ("esr_el12",		CPENC (3,5,C5,C2,0),	0),
4325  SR_RAS  ("vsesr_el2",		CPENC (3,4,C5,C2,3),	0),
4326  SR_CORE ("fpexc32_el2",	CPENC (3,4,C5,C3,0),	0),
4327  SR_RAS  ("erridr_el1",	CPENC (3,0,C5,C3,0),	F_REG_READ),
4328  SR_RAS  ("errselr_el1",	CPENC (3,0,C5,C3,1),	0),
4329  SR_RAS  ("erxfr_el1",		CPENC (3,0,C5,C4,0),	F_REG_READ),
4330  SR_RAS  ("erxctlr_el1",	CPENC (3,0,C5,C4,1),	0),
4331  SR_RAS  ("erxstatus_el1",	CPENC (3,0,C5,C4,2),	0),
4332  SR_RAS  ("erxaddr_el1",	CPENC (3,0,C5,C4,3),	0),
4333  SR_RAS  ("erxmisc0_el1",	CPENC (3,0,C5,C5,0),	0),
4334  SR_RAS  ("erxmisc1_el1",	CPENC (3,0,C5,C5,1),	0),
4335  SR_RAS  ("erxmisc2_el1",	CPENC (3,0,C5,C5,2),	0),
4336  SR_RAS  ("erxmisc3_el1",	CPENC (3,0,C5,C5,3),	0),
4337  SR_RAS  ("erxpfgcdn_el1",	CPENC (3,0,C5,C4,6),	0),
4338  SR_RAS  ("erxpfgctl_el1",	CPENC (3,0,C5,C4,5),	0),
4339  SR_RAS  ("erxpfgf_el1",	CPENC (3,0,C5,C4,4),	F_REG_READ),
4340  SR_CORE ("far_el1",		CPENC (3,0,C6,C0,0),	0),
4341  SR_CORE ("far_el2",		CPENC (3,4,C6,C0,0),	0),
4342  SR_CORE ("far_el3",		CPENC (3,6,C6,C0,0),	0),
4343  SR_V8_1 ("far_el12",		CPENC (3,5,C6,C0,0),	0),
4344  SR_CORE ("hpfar_el2",		CPENC (3,4,C6,C0,4),	0),
4345  SR_CORE ("par_el1",		CPENC (3,0,C7,C4,0),	0),
4346  SR_CORE ("mair_el1",		CPENC (3,0,C10,C2,0),	0),
4347  SR_CORE ("mair_el2",		CPENC (3,4,C10,C2,0),	0),
4348  SR_CORE ("mair_el3",		CPENC (3,6,C10,C2,0),	0),
4349  SR_V8_1 ("mair_el12",		CPENC (3,5,C10,C2,0),	0),
4350  SR_CORE ("amair_el1",		CPENC (3,0,C10,C3,0),	0),
4351  SR_CORE ("amair_el2",		CPENC (3,4,C10,C3,0),	0),
4352  SR_CORE ("amair_el3",		CPENC (3,6,C10,C3,0),	0),
4353  SR_V8_1 ("amair_el12",	CPENC (3,5,C10,C3,0),	0),
4354  SR_CORE ("vbar_el1",		CPENC (3,0,C12,C0,0),	0),
4355  SR_CORE ("vbar_el2",		CPENC (3,4,C12,C0,0),	0),
4356  SR_CORE ("vbar_el3",		CPENC (3,6,C12,C0,0),	0),
4357  SR_V8_1 ("vbar_el12",		CPENC (3,5,C12,C0,0),	0),
4358  SR_CORE ("rvbar_el1",		CPENC (3,0,C12,C0,1),	F_REG_READ),
4359  SR_CORE ("rvbar_el2",		CPENC (3,4,C12,C0,1),	F_REG_READ),
4360  SR_CORE ("rvbar_el3",		CPENC (3,6,C12,C0,1),	F_REG_READ),
4361  SR_CORE ("rmr_el1",		CPENC (3,0,C12,C0,2),	0),
4362  SR_CORE ("rmr_el2",		CPENC (3,4,C12,C0,2),	0),
4363  SR_CORE ("rmr_el3",		CPENC (3,6,C12,C0,2),	0),
4364  SR_CORE ("isr_el1",		CPENC (3,0,C12,C1,0),	F_REG_READ),
4365  SR_RAS  ("disr_el1",		CPENC (3,0,C12,C1,1),	0),
4366  SR_RAS  ("vdisr_el2",		CPENC (3,4,C12,C1,1),	0),
4367  SR_CORE ("contextidr_el1",	CPENC (3,0,C13,C0,1),	0),
4368  SR_V8_1 ("contextidr_el2",	CPENC (3,4,C13,C0,1),	0),
4369  SR_V8_1 ("contextidr_el12",	CPENC (3,5,C13,C0,1),	0),
4370  SR_RNG  ("rndr",		CPENC (3,3,C2,C4,0),	F_REG_READ),
4371  SR_RNG  ("rndrrs",		CPENC (3,3,C2,C4,1),	F_REG_READ),
4372  SR_MEMTAG ("tco",		CPENC (3,3,C4,C2,7),	0),
4373  SR_MEMTAG ("tfsre0_el1",	CPENC (3,0,C5,C6,1),	0),
4374  SR_MEMTAG ("tfsr_el1",	CPENC (3,0,C5,C6,0),	0),
4375  SR_MEMTAG ("tfsr_el2",	CPENC (3,4,C5,C6,0),	0),
4376  SR_MEMTAG ("tfsr_el3",	CPENC (3,6,C5,C6,0),	0),
4377  SR_MEMTAG ("tfsr_el12",	CPENC (3,5,C5,C6,0),	0),
4378  SR_MEMTAG ("rgsr_el1",	CPENC (3,0,C1,C0,5),	0),
4379  SR_MEMTAG ("gcr_el1",		CPENC (3,0,C1,C0,6),	0),
4380  SR_MEMTAG ("gmid_el1",	CPENC (3,1,C0,C0,4),	F_REG_READ),
4381  SR_CORE ("tpidr_el0",		CPENC (3,3,C13,C0,2),	0),
4382  SR_CORE ("tpidrro_el0",       CPENC (3,3,C13,C0,3),	0),
4383  SR_CORE ("tpidr_el1",		CPENC (3,0,C13,C0,4),	0),
4384  SR_CORE ("tpidr_el2",		CPENC (3,4,C13,C0,2),	0),
4385  SR_CORE ("tpidr_el3",		CPENC (3,6,C13,C0,2),	0),
4386  SR_SCXTNUM ("scxtnum_el0",	CPENC (3,3,C13,C0,7),	0),
4387  SR_SCXTNUM ("scxtnum_el1",	CPENC (3,0,C13,C0,7),	0),
4388  SR_SCXTNUM ("scxtnum_el2",	CPENC (3,4,C13,C0,7),	0),
4389  SR_SCXTNUM ("scxtnum_el12",   CPENC (3,5,C13,C0,7),	0),
4390  SR_SCXTNUM ("scxtnum_el3",    CPENC (3,6,C13,C0,7),	0),
4391  SR_CORE ("teecr32_el1",       CPENC (2,2,C0, C0,0),	0), /* See section 3.9.7.1.  */
4392  SR_CORE ("cntfrq_el0",	CPENC (3,3,C14,C0,0),	0),
4393  SR_CORE ("cntpct_el0",	CPENC (3,3,C14,C0,1),	F_REG_READ),
4394  SR_CORE ("cntvct_el0",	CPENC (3,3,C14,C0,2),	F_REG_READ),
4395  SR_CORE ("cntvoff_el2",       CPENC (3,4,C14,C0,3),	0),
4396  SR_CORE ("cntkctl_el1",       CPENC (3,0,C14,C1,0),	0),
4397  SR_V8_1 ("cntkctl_el12",	CPENC (3,5,C14,C1,0),	0),
4398  SR_CORE ("cnthctl_el2",	CPENC (3,4,C14,C1,0),	0),
4399  SR_CORE ("cntp_tval_el0",	CPENC (3,3,C14,C2,0),	0),
4400  SR_V8_1 ("cntp_tval_el02",	CPENC (3,5,C14,C2,0),	0),
4401  SR_CORE ("cntp_ctl_el0",      CPENC (3,3,C14,C2,1),	0),
4402  SR_V8_1 ("cntp_ctl_el02",	CPENC (3,5,C14,C2,1),	0),
4403  SR_CORE ("cntp_cval_el0",     CPENC (3,3,C14,C2,2),	0),
4404  SR_V8_1 ("cntp_cval_el02",	CPENC (3,5,C14,C2,2),	0),
4405  SR_CORE ("cntv_tval_el0",     CPENC (3,3,C14,C3,0),	0),
4406  SR_V8_1 ("cntv_tval_el02",	CPENC (3,5,C14,C3,0),	0),
4407  SR_CORE ("cntv_ctl_el0",      CPENC (3,3,C14,C3,1),	0),
4408  SR_V8_1 ("cntv_ctl_el02",	CPENC (3,5,C14,C3,1),	0),
4409  SR_CORE ("cntv_cval_el0",     CPENC (3,3,C14,C3,2),	0),
4410  SR_V8_1 ("cntv_cval_el02",	CPENC (3,5,C14,C3,2),	0),
4411  SR_CORE ("cnthp_tval_el2",	CPENC (3,4,C14,C2,0),	0),
4412  SR_CORE ("cnthp_ctl_el2",	CPENC (3,4,C14,C2,1),	0),
4413  SR_CORE ("cnthp_cval_el2",	CPENC (3,4,C14,C2,2),	0),
4414  SR_CORE ("cntps_tval_el1",	CPENC (3,7,C14,C2,0),	0),
4415  SR_CORE ("cntps_ctl_el1",	CPENC (3,7,C14,C2,1),	0),
4416  SR_CORE ("cntps_cval_el1",	CPENC (3,7,C14,C2,2),	0),
4417  SR_V8_1 ("cnthv_tval_el2",	CPENC (3,4,C14,C3,0),	0),
4418  SR_V8_1 ("cnthv_ctl_el2",	CPENC (3,4,C14,C3,1),	0),
4419  SR_V8_1 ("cnthv_cval_el2",	CPENC (3,4,C14,C3,2),	0),
4420  SR_CORE ("dacr32_el2",	CPENC (3,4,C3,C0,0),	0),
4421  SR_CORE ("ifsr32_el2",	CPENC (3,4,C5,C0,1),	0),
4422  SR_CORE ("teehbr32_el1",	CPENC (2,2,C1,C0,0),	0),
4423  SR_CORE ("sder32_el3",	CPENC (3,6,C1,C1,1),	0),
4424  SR_CORE ("mdscr_el1",		CPENC (2,0,C0,C2,2),	0),
4425  SR_CORE ("mdccsr_el0",	CPENC (2,3,C0,C1,0),	F_REG_READ),
4426  SR_CORE ("mdccint_el1",       CPENC (2,0,C0,C2,0),	0),
4427  SR_CORE ("dbgdtr_el0",	CPENC (2,3,C0,C4,0),	0),
4428  SR_CORE ("dbgdtrrx_el0",	CPENC (2,3,C0,C5,0),	F_REG_READ),
4429  SR_CORE ("dbgdtrtx_el0",	CPENC (2,3,C0,C5,0),	F_REG_WRITE),
4430  SR_CORE ("osdtrrx_el1",	CPENC (2,0,C0,C0,2),	0),
4431  SR_CORE ("osdtrtx_el1",	CPENC (2,0,C0,C3,2),	0),
4432  SR_CORE ("oseccr_el1",	CPENC (2,0,C0,C6,2),	0),
4433  SR_CORE ("dbgvcr32_el2",      CPENC (2,4,C0,C7,0),	0),
4434  SR_CORE ("dbgbvr0_el1",       CPENC (2,0,C0,C0,4),	0),
4435  SR_CORE ("dbgbvr1_el1",       CPENC (2,0,C0,C1,4),	0),
4436  SR_CORE ("dbgbvr2_el1",       CPENC (2,0,C0,C2,4),	0),
4437  SR_CORE ("dbgbvr3_el1",       CPENC (2,0,C0,C3,4),	0),
4438  SR_CORE ("dbgbvr4_el1",       CPENC (2,0,C0,C4,4),	0),
4439  SR_CORE ("dbgbvr5_el1",       CPENC (2,0,C0,C5,4),	0),
4440  SR_CORE ("dbgbvr6_el1",       CPENC (2,0,C0,C6,4),	0),
4441  SR_CORE ("dbgbvr7_el1",       CPENC (2,0,C0,C7,4),	0),
4442  SR_CORE ("dbgbvr8_el1",       CPENC (2,0,C0,C8,4),	0),
4443  SR_CORE ("dbgbvr9_el1",       CPENC (2,0,C0,C9,4),	0),
4444  SR_CORE ("dbgbvr10_el1",      CPENC (2,0,C0,C10,4),	0),
4445  SR_CORE ("dbgbvr11_el1",      CPENC (2,0,C0,C11,4),	0),
4446  SR_CORE ("dbgbvr12_el1",      CPENC (2,0,C0,C12,4),	0),
4447  SR_CORE ("dbgbvr13_el1",      CPENC (2,0,C0,C13,4),	0),
4448  SR_CORE ("dbgbvr14_el1",      CPENC (2,0,C0,C14,4),	0),
4449  SR_CORE ("dbgbvr15_el1",      CPENC (2,0,C0,C15,4),	0),
4450  SR_CORE ("dbgbcr0_el1",       CPENC (2,0,C0,C0,5),	0),
4451  SR_CORE ("dbgbcr1_el1",       CPENC (2,0,C0,C1,5),	0),
4452  SR_CORE ("dbgbcr2_el1",       CPENC (2,0,C0,C2,5),	0),
4453  SR_CORE ("dbgbcr3_el1",       CPENC (2,0,C0,C3,5),	0),
4454  SR_CORE ("dbgbcr4_el1",       CPENC (2,0,C0,C4,5),	0),
4455  SR_CORE ("dbgbcr5_el1",       CPENC (2,0,C0,C5,5),	0),
4456  SR_CORE ("dbgbcr6_el1",       CPENC (2,0,C0,C6,5),	0),
4457  SR_CORE ("dbgbcr7_el1",       CPENC (2,0,C0,C7,5),	0),
4458  SR_CORE ("dbgbcr8_el1",       CPENC (2,0,C0,C8,5),	0),
4459  SR_CORE ("dbgbcr9_el1",       CPENC (2,0,C0,C9,5),	0),
4460  SR_CORE ("dbgbcr10_el1",      CPENC (2,0,C0,C10,5),	0),
4461  SR_CORE ("dbgbcr11_el1",      CPENC (2,0,C0,C11,5),	0),
4462  SR_CORE ("dbgbcr12_el1",      CPENC (2,0,C0,C12,5),	0),
4463  SR_CORE ("dbgbcr13_el1",      CPENC (2,0,C0,C13,5),	0),
4464  SR_CORE ("dbgbcr14_el1",      CPENC (2,0,C0,C14,5),	0),
4465  SR_CORE ("dbgbcr15_el1",      CPENC (2,0,C0,C15,5),	0),
4466  SR_CORE ("dbgwvr0_el1",       CPENC (2,0,C0,C0,6),	0),
4467  SR_CORE ("dbgwvr1_el1",       CPENC (2,0,C0,C1,6),	0),
4468  SR_CORE ("dbgwvr2_el1",       CPENC (2,0,C0,C2,6),	0),
4469  SR_CORE ("dbgwvr3_el1",       CPENC (2,0,C0,C3,6),	0),
4470  SR_CORE ("dbgwvr4_el1",       CPENC (2,0,C0,C4,6),	0),
4471  SR_CORE ("dbgwvr5_el1",       CPENC (2,0,C0,C5,6),	0),
4472  SR_CORE ("dbgwvr6_el1",       CPENC (2,0,C0,C6,6),	0),
4473  SR_CORE ("dbgwvr7_el1",       CPENC (2,0,C0,C7,6),	0),
4474  SR_CORE ("dbgwvr8_el1",       CPENC (2,0,C0,C8,6),	0),
4475  SR_CORE ("dbgwvr9_el1",       CPENC (2,0,C0,C9,6),	0),
4476  SR_CORE ("dbgwvr10_el1",      CPENC (2,0,C0,C10,6),	0),
4477  SR_CORE ("dbgwvr11_el1",      CPENC (2,0,C0,C11,6),	0),
4478  SR_CORE ("dbgwvr12_el1",      CPENC (2,0,C0,C12,6),	0),
4479  SR_CORE ("dbgwvr13_el1",      CPENC (2,0,C0,C13,6),	0),
4480  SR_CORE ("dbgwvr14_el1",      CPENC (2,0,C0,C14,6),	0),
4481  SR_CORE ("dbgwvr15_el1",      CPENC (2,0,C0,C15,6),	0),
4482  SR_CORE ("dbgwcr0_el1",       CPENC (2,0,C0,C0,7),	0),
4483  SR_CORE ("dbgwcr1_el1",       CPENC (2,0,C0,C1,7),	0),
4484  SR_CORE ("dbgwcr2_el1",       CPENC (2,0,C0,C2,7),	0),
4485  SR_CORE ("dbgwcr3_el1",       CPENC (2,0,C0,C3,7),	0),
4486  SR_CORE ("dbgwcr4_el1",       CPENC (2,0,C0,C4,7),	0),
4487  SR_CORE ("dbgwcr5_el1",       CPENC (2,0,C0,C5,7),	0),
4488  SR_CORE ("dbgwcr6_el1",       CPENC (2,0,C0,C6,7),	0),
4489  SR_CORE ("dbgwcr7_el1",       CPENC (2,0,C0,C7,7),	0),
4490  SR_CORE ("dbgwcr8_el1",       CPENC (2,0,C0,C8,7),	0),
4491  SR_CORE ("dbgwcr9_el1",       CPENC (2,0,C0,C9,7),	0),
4492  SR_CORE ("dbgwcr10_el1",      CPENC (2,0,C0,C10,7),	0),
4493  SR_CORE ("dbgwcr11_el1",      CPENC (2,0,C0,C11,7),	0),
4494  SR_CORE ("dbgwcr12_el1",      CPENC (2,0,C0,C12,7),	0),
4495  SR_CORE ("dbgwcr13_el1",      CPENC (2,0,C0,C13,7),	0),
4496  SR_CORE ("dbgwcr14_el1",      CPENC (2,0,C0,C14,7),	0),
4497  SR_CORE ("dbgwcr15_el1",      CPENC (2,0,C0,C15,7),	0),
4498  SR_CORE ("mdrar_el1",		CPENC (2,0,C1,C0,0),	F_REG_READ),
4499  SR_CORE ("oslar_el1",		CPENC (2,0,C1,C0,4),	F_REG_WRITE),
4500  SR_CORE ("oslsr_el1",		CPENC (2,0,C1,C1,4),	F_REG_READ),
4501  SR_CORE ("osdlr_el1",		CPENC (2,0,C1,C3,4),	0),
4502  SR_CORE ("dbgprcr_el1",       CPENC (2,0,C1,C4,4),	0),
4503  SR_CORE ("dbgclaimset_el1",   CPENC (2,0,C7,C8,6),	0),
4504  SR_CORE ("dbgclaimclr_el1",   CPENC (2,0,C7,C9,6),	0),
4505  SR_CORE ("dbgauthstatus_el1", CPENC (2,0,C7,C14,6),	F_REG_READ),
4506  SR_PROFILE ("pmblimitr_el1",	CPENC (3,0,C9,C10,0),	0),
4507  SR_PROFILE ("pmbptr_el1",	CPENC (3,0,C9,C10,1),	0),
4508  SR_PROFILE ("pmbsr_el1",	CPENC (3,0,C9,C10,3),	0),
4509  SR_PROFILE ("pmbidr_el1",	CPENC (3,0,C9,C10,7),	F_REG_READ),
4510  SR_PROFILE ("pmscr_el1",	CPENC (3,0,C9,C9,0),	0),
4511  SR_PROFILE ("pmsicr_el1",	CPENC (3,0,C9,C9,2),	0),
4512  SR_PROFILE ("pmsirr_el1",	CPENC (3,0,C9,C9,3),	0),
4513  SR_PROFILE ("pmsfcr_el1",	CPENC (3,0,C9,C9,4),	0),
4514  SR_PROFILE ("pmsevfr_el1",	CPENC (3,0,C9,C9,5),	0),
4515  SR_PROFILE ("pmslatfr_el1",	CPENC (3,0,C9,C9,6),	0),
4516  SR_PROFILE ("pmsidr_el1",	CPENC (3,0,C9,C9,7),	F_REG_READ),
4517  SR_PROFILE ("pmscr_el2",	CPENC (3,4,C9,C9,0),	0),
4518  SR_PROFILE ("pmscr_el12",	CPENC (3,5,C9,C9,0),	0),
4519  SR_CORE ("pmcr_el0",		CPENC (3,3,C9,C12,0),	0),
4520  SR_CORE ("pmcntenset_el0",    CPENC (3,3,C9,C12,1),	0),
4521  SR_CORE ("pmcntenclr_el0",    CPENC (3,3,C9,C12,2),	0),
4522  SR_CORE ("pmovsclr_el0",      CPENC (3,3,C9,C12,3),	0),
4523  SR_CORE ("pmswinc_el0",       CPENC (3,3,C9,C12,4),	F_REG_WRITE),
4524  SR_CORE ("pmselr_el0",	CPENC (3,3,C9,C12,5),	0),
4525  SR_CORE ("pmceid0_el0",       CPENC (3,3,C9,C12,6),	F_REG_READ),
4526  SR_CORE ("pmceid1_el0",       CPENC (3,3,C9,C12,7),	F_REG_READ),
4527  SR_CORE ("pmccntr_el0",       CPENC (3,3,C9,C13,0),	0),
4528  SR_CORE ("pmxevtyper_el0",    CPENC (3,3,C9,C13,1),	0),
4529  SR_CORE ("pmxevcntr_el0",     CPENC (3,3,C9,C13,2),	0),
4530  SR_CORE ("pmuserenr_el0",     CPENC (3,3,C9,C14,0),	0),
4531  SR_CORE ("pmintenset_el1",    CPENC (3,0,C9,C14,1),	0),
4532  SR_CORE ("pmintenclr_el1",    CPENC (3,0,C9,C14,2),	0),
4533  SR_CORE ("pmovsset_el0",      CPENC (3,3,C9,C14,3),	0),
4534  SR_CORE ("pmevcntr0_el0",     CPENC (3,3,C14,C8,0),	0),
4535  SR_CORE ("pmevcntr1_el0",     CPENC (3,3,C14,C8,1),	0),
4536  SR_CORE ("pmevcntr2_el0",     CPENC (3,3,C14,C8,2),	0),
4537  SR_CORE ("pmevcntr3_el0",     CPENC (3,3,C14,C8,3),	0),
4538  SR_CORE ("pmevcntr4_el0",     CPENC (3,3,C14,C8,4),	0),
4539  SR_CORE ("pmevcntr5_el0",     CPENC (3,3,C14,C8,5),	0),
4540  SR_CORE ("pmevcntr6_el0",     CPENC (3,3,C14,C8,6),	0),
4541  SR_CORE ("pmevcntr7_el0",     CPENC (3,3,C14,C8,7),	0),
4542  SR_CORE ("pmevcntr8_el0",     CPENC (3,3,C14,C9,0),	0),
4543  SR_CORE ("pmevcntr9_el0",     CPENC (3,3,C14,C9,1),	0),
4544  SR_CORE ("pmevcntr10_el0",    CPENC (3,3,C14,C9,2),	0),
4545  SR_CORE ("pmevcntr11_el0",    CPENC (3,3,C14,C9,3),	0),
4546  SR_CORE ("pmevcntr12_el0",    CPENC (3,3,C14,C9,4),	0),
4547  SR_CORE ("pmevcntr13_el0",    CPENC (3,3,C14,C9,5),	0),
4548  SR_CORE ("pmevcntr14_el0",    CPENC (3,3,C14,C9,6),	0),
4549  SR_CORE ("pmevcntr15_el0",    CPENC (3,3,C14,C9,7),	0),
4550  SR_CORE ("pmevcntr16_el0",    CPENC (3,3,C14,C10,0),	0),
4551  SR_CORE ("pmevcntr17_el0",    CPENC (3,3,C14,C10,1),	0),
4552  SR_CORE ("pmevcntr18_el0",    CPENC (3,3,C14,C10,2),	0),
4553  SR_CORE ("pmevcntr19_el0",    CPENC (3,3,C14,C10,3),	0),
4554  SR_CORE ("pmevcntr20_el0",    CPENC (3,3,C14,C10,4),	0),
4555  SR_CORE ("pmevcntr21_el0",    CPENC (3,3,C14,C10,5),	0),
4556  SR_CORE ("pmevcntr22_el0",    CPENC (3,3,C14,C10,6),	0),
4557  SR_CORE ("pmevcntr23_el0",    CPENC (3,3,C14,C10,7),	0),
4558  SR_CORE ("pmevcntr24_el0",    CPENC (3,3,C14,C11,0),	0),
4559  SR_CORE ("pmevcntr25_el0",    CPENC (3,3,C14,C11,1),	0),
4560  SR_CORE ("pmevcntr26_el0",    CPENC (3,3,C14,C11,2),	0),
4561  SR_CORE ("pmevcntr27_el0",    CPENC (3,3,C14,C11,3),	0),
4562  SR_CORE ("pmevcntr28_el0",    CPENC (3,3,C14,C11,4),	0),
4563  SR_CORE ("pmevcntr29_el0",    CPENC (3,3,C14,C11,5),	0),
4564  SR_CORE ("pmevcntr30_el0",    CPENC (3,3,C14,C11,6),	0),
4565  SR_CORE ("pmevtyper0_el0",    CPENC (3,3,C14,C12,0),	0),
4566  SR_CORE ("pmevtyper1_el0",    CPENC (3,3,C14,C12,1),	0),
4567  SR_CORE ("pmevtyper2_el0",    CPENC (3,3,C14,C12,2),	0),
4568  SR_CORE ("pmevtyper3_el0",    CPENC (3,3,C14,C12,3),	0),
4569  SR_CORE ("pmevtyper4_el0",    CPENC (3,3,C14,C12,4),	0),
4570  SR_CORE ("pmevtyper5_el0",    CPENC (3,3,C14,C12,5),	0),
4571  SR_CORE ("pmevtyper6_el0",    CPENC (3,3,C14,C12,6),	0),
4572  SR_CORE ("pmevtyper7_el0",    CPENC (3,3,C14,C12,7),	0),
4573  SR_CORE ("pmevtyper8_el0",    CPENC (3,3,C14,C13,0),	0),
4574  SR_CORE ("pmevtyper9_el0",    CPENC (3,3,C14,C13,1),	0),
4575  SR_CORE ("pmevtyper10_el0",   CPENC (3,3,C14,C13,2),	0),
4576  SR_CORE ("pmevtyper11_el0",   CPENC (3,3,C14,C13,3),	0),
4577  SR_CORE ("pmevtyper12_el0",   CPENC (3,3,C14,C13,4),	0),
4578  SR_CORE ("pmevtyper13_el0",   CPENC (3,3,C14,C13,5),	0),
4579  SR_CORE ("pmevtyper14_el0",   CPENC (3,3,C14,C13,6),	0),
4580  SR_CORE ("pmevtyper15_el0",   CPENC (3,3,C14,C13,7),	0),
4581  SR_CORE ("pmevtyper16_el0",   CPENC (3,3,C14,C14,0),	0),
4582  SR_CORE ("pmevtyper17_el0",   CPENC (3,3,C14,C14,1),	0),
4583  SR_CORE ("pmevtyper18_el0",   CPENC (3,3,C14,C14,2),	0),
4584  SR_CORE ("pmevtyper19_el0",   CPENC (3,3,C14,C14,3),	0),
4585  SR_CORE ("pmevtyper20_el0",   CPENC (3,3,C14,C14,4),	0),
4586  SR_CORE ("pmevtyper21_el0",   CPENC (3,3,C14,C14,5),	0),
4587  SR_CORE ("pmevtyper22_el0",   CPENC (3,3,C14,C14,6),	0),
4588  SR_CORE ("pmevtyper23_el0",   CPENC (3,3,C14,C14,7),	0),
4589  SR_CORE ("pmevtyper24_el0",   CPENC (3,3,C14,C15,0),	0),
4590  SR_CORE ("pmevtyper25_el0",   CPENC (3,3,C14,C15,1),	0),
4591  SR_CORE ("pmevtyper26_el0",   CPENC (3,3,C14,C15,2),	0),
4592  SR_CORE ("pmevtyper27_el0",   CPENC (3,3,C14,C15,3),	0),
4593  SR_CORE ("pmevtyper28_el0",   CPENC (3,3,C14,C15,4),	0),
4594  SR_CORE ("pmevtyper29_el0",   CPENC (3,3,C14,C15,5),	0),
4595  SR_CORE ("pmevtyper30_el0",   CPENC (3,3,C14,C15,6),	0),
4596  SR_CORE ("pmccfiltr_el0",     CPENC (3,3,C14,C15,7),	0),
4597
4598  SR_V8_4 ("dit",		CPEN_ (3,C2,5),		0),
4599  SR_V8_4 ("trfcr_el1",		CPENC (3,0,C1,C2,1),	0),
4600  SR_V8_4 ("pmmir_el1",		CPENC (3,0,C9,C14,6),	F_REG_READ),
4601  SR_V8_4 ("trfcr_el2",		CPENC (3,4,C1,C2,1),	0),
4602  SR_V8_4 ("vstcr_el2",		CPENC (3,4,C2,C6,2),	0),
4603  SR_V8_4_A ("vsttbr_el2",	CPENC (3,4,C2,C6,0),	0),
4604  SR_V8_4 ("cnthvs_tval_el2",	CPENC (3,4,C14,C4,0),	0),
4605  SR_V8_4 ("cnthvs_cval_el2",	CPENC (3,4,C14,C4,2),	0),
4606  SR_V8_4 ("cnthvs_ctl_el2",	CPENC (3,4,C14,C4,1),	0),
4607  SR_V8_4 ("cnthps_tval_el2",	CPENC (3,4,C14,C5,0),	0),
4608  SR_V8_4 ("cnthps_cval_el2",	CPENC (3,4,C14,C5,2),	0),
4609  SR_V8_4 ("cnthps_ctl_el2",	CPENC (3,4,C14,C5,1),	0),
4610  SR_V8_4 ("sder32_el2",	CPENC (3,4,C1,C3,1),	0),
4611  SR_V8_4 ("vncr_el2",		CPENC (3,4,C2,C2,0),	0),
4612  SR_V8_4 ("trfcr_el12",	CPENC (3,5,C1,C2,1),	0),
4613
4614  SR_CORE ("mpam0_el1",		CPENC (3,0,C10,C5,1),	0),
4615  SR_CORE ("mpam1_el1",		CPENC (3,0,C10,C5,0),	0),
4616  SR_CORE ("mpam1_el12",	CPENC (3,5,C10,C5,0),	0),
4617  SR_CORE ("mpam2_el2",		CPENC (3,4,C10,C5,0),	0),
4618  SR_CORE ("mpam3_el3",		CPENC (3,6,C10,C5,0),	0),
4619  SR_CORE ("mpamhcr_el2",	CPENC (3,4,C10,C4,0),	0),
4620  SR_CORE ("mpamidr_el1",	CPENC (3,0,C10,C4,4),	F_REG_READ),
4621  SR_CORE ("mpamvpm0_el2",	CPENC (3,4,C10,C6,0),	0),
4622  SR_CORE ("mpamvpm1_el2",	CPENC (3,4,C10,C6,1),	0),
4623  SR_CORE ("mpamvpm2_el2",	CPENC (3,4,C10,C6,2),	0),
4624  SR_CORE ("mpamvpm3_el2",	CPENC (3,4,C10,C6,3),	0),
4625  SR_CORE ("mpamvpm4_el2",	CPENC (3,4,C10,C6,4),	0),
4626  SR_CORE ("mpamvpm5_el2",	CPENC (3,4,C10,C6,5),	0),
4627  SR_CORE ("mpamvpm6_el2",	CPENC (3,4,C10,C6,6),	0),
4628  SR_CORE ("mpamvpm7_el2",	CPENC (3,4,C10,C6,7),	0),
4629  SR_CORE ("mpamvpmv_el2",	CPENC (3,4,C10,C4,1),	0),
4630
4631  SR_V8_R ("mpuir_el1",		CPENC (3,0,C0,C0,4),	F_REG_READ),
4632  SR_V8_R ("mpuir_el2",		CPENC (3,4,C0,C0,4),	F_REG_READ),
4633  SR_V8_R ("prbar_el1",		CPENC (3,0,C6,C8,0),	0),
4634  SR_V8_R ("prbar_el2",		CPENC (3,4,C6,C8,0),	0),
4635
4636#define ENC_BARLAR(x,n,lar) \
4637  CPENC (3, (x-1) << 2, C6, 8 | (n >> 1), ((n & 1) << 2) | lar)
4638
4639#define PRBARn_ELx(x,n) SR_V8_R ("prbar" #n "_el" #x, ENC_BARLAR (x,n,0), 0)
4640#define PRLARn_ELx(x,n) SR_V8_R ("prlar" #n "_el" #x, ENC_BARLAR (x,n,1), 0)
4641
4642  SR_EXPAND_EL12 (PRBARn_ELx)
4643  SR_V8_R ("prenr_el1",		CPENC (3,0,C6,C1,1),	0),
4644  SR_V8_R ("prenr_el2",		CPENC (3,4,C6,C1,1),	0),
4645  SR_V8_R ("prlar_el1",		CPENC (3,0,C6,C8,1),	0),
4646  SR_V8_R ("prlar_el2",		CPENC (3,4,C6,C8,1),	0),
4647  SR_EXPAND_EL12 (PRLARn_ELx)
4648  SR_V8_R ("prselr_el1",	CPENC (3,0,C6,C2,1),	0),
4649  SR_V8_R ("prselr_el2",	CPENC (3,4,C6,C2,1),	0),
4650  SR_V8_R ("vsctlr_el2",	CPENC (3,4,C2,C0,0),	0),
4651
4652  SR_CORE("trbbaser_el1", 	CPENC (3,0,C9,C11,2),	0),
4653  SR_CORE("trbidr_el1", 	CPENC (3,0,C9,C11,7),	F_REG_READ),
4654  SR_CORE("trblimitr_el1", 	CPENC (3,0,C9,C11,0),	0),
4655  SR_CORE("trbmar_el1", 	CPENC (3,0,C9,C11,4),	0),
4656  SR_CORE("trbptr_el1", 	CPENC (3,0,C9,C11,1),	0),
4657  SR_CORE("trbsr_el1",  	CPENC (3,0,C9,C11,3),	0),
4658  SR_CORE("trbtrg_el1", 	CPENC (3,0,C9,C11,6),	0),
4659
4660  SR_CORE ("trcauthstatus", CPENC (2,1,C7,C14,6), F_REG_READ),
4661  SR_CORE ("trccidr0",      CPENC (2,1,C7,C12,7), F_REG_READ),
4662  SR_CORE ("trccidr1",      CPENC (2,1,C7,C13,7), F_REG_READ),
4663  SR_CORE ("trccidr2",      CPENC (2,1,C7,C14,7), F_REG_READ),
4664  SR_CORE ("trccidr3",      CPENC (2,1,C7,C15,7), F_REG_READ),
4665  SR_CORE ("trcdevaff0",    CPENC (2,1,C7,C10,6), F_REG_READ),
4666  SR_CORE ("trcdevaff1",    CPENC (2,1,C7,C11,6), F_REG_READ),
4667  SR_CORE ("trcdevarch",    CPENC (2,1,C7,C15,6), F_REG_READ),
4668  SR_CORE ("trcdevid",      CPENC (2,1,C7,C2,7),  F_REG_READ),
4669  SR_CORE ("trcdevtype",    CPENC (2,1,C7,C3,7),  F_REG_READ),
4670  SR_CORE ("trcidr0",       CPENC (2,1,C0,C8,7),  F_REG_READ),
4671  SR_CORE ("trcidr1",       CPENC (2,1,C0,C9,7),  F_REG_READ),
4672  SR_CORE ("trcidr2",       CPENC (2,1,C0,C10,7), F_REG_READ),
4673  SR_CORE ("trcidr3",       CPENC (2,1,C0,C11,7), F_REG_READ),
4674  SR_CORE ("trcidr4",       CPENC (2,1,C0,C12,7), F_REG_READ),
4675  SR_CORE ("trcidr5",       CPENC (2,1,C0,C13,7), F_REG_READ),
4676  SR_CORE ("trcidr6",       CPENC (2,1,C0,C14,7), F_REG_READ),
4677  SR_CORE ("trcidr7",       CPENC (2,1,C0,C15,7), F_REG_READ),
4678  SR_CORE ("trcidr8",       CPENC (2,1,C0,C0,6),  F_REG_READ),
4679  SR_CORE ("trcidr9",       CPENC (2,1,C0,C1,6),  F_REG_READ),
4680  SR_CORE ("trcidr10",      CPENC (2,1,C0,C2,6),  F_REG_READ),
4681  SR_CORE ("trcidr11",      CPENC (2,1,C0,C3,6),  F_REG_READ),
4682  SR_CORE ("trcidr12",      CPENC (2,1,C0,C4,6),  F_REG_READ),
4683  SR_CORE ("trcidr13",      CPENC (2,1,C0,C5,6),  F_REG_READ),
4684  SR_CORE ("trclsr",        CPENC (2,1,C7,C13,6), F_REG_READ),
4685  SR_CORE ("trcoslsr",      CPENC (2,1,C1,C1,4),  F_REG_READ),
4686  SR_CORE ("trcpdsr",       CPENC (2,1,C1,C5,4),  F_REG_READ),
4687  SR_CORE ("trcpidr0",      CPENC (2,1,C7,C8,7),  F_REG_READ),
4688  SR_CORE ("trcpidr1",      CPENC (2,1,C7,C9,7),  F_REG_READ),
4689  SR_CORE ("trcpidr2",      CPENC (2,1,C7,C10,7), F_REG_READ),
4690  SR_CORE ("trcpidr3",      CPENC (2,1,C7,C11,7), F_REG_READ),
4691  SR_CORE ("trcpidr4",      CPENC (2,1,C7,C4,7),  F_REG_READ),
4692  SR_CORE ("trcpidr5",      CPENC (2,1,C7,C5,7),  F_REG_READ),
4693  SR_CORE ("trcpidr6",      CPENC (2,1,C7,C6,7),  F_REG_READ),
4694  SR_CORE ("trcpidr7",      CPENC (2,1,C7,C7,7),  F_REG_READ),
4695  SR_CORE ("trcstatr",      CPENC (2,1,C0,C3,0),  F_REG_READ),
4696  SR_CORE ("trcacatr0",     CPENC (2,1,C2,C0,2),  0),
4697  SR_CORE ("trcacatr1",     CPENC (2,1,C2,C2,2),  0),
4698  SR_CORE ("trcacatr2",     CPENC (2,1,C2,C4,2),  0),
4699  SR_CORE ("trcacatr3",     CPENC (2,1,C2,C6,2),  0),
4700  SR_CORE ("trcacatr4",     CPENC (2,1,C2,C8,2),  0),
4701  SR_CORE ("trcacatr5",     CPENC (2,1,C2,C10,2), 0),
4702  SR_CORE ("trcacatr6",     CPENC (2,1,C2,C12,2), 0),
4703  SR_CORE ("trcacatr7",     CPENC (2,1,C2,C14,2), 0),
4704  SR_CORE ("trcacatr8",     CPENC (2,1,C2,C0,3),  0),
4705  SR_CORE ("trcacatr9",     CPENC (2,1,C2,C2,3),  0),
4706  SR_CORE ("trcacatr10",    CPENC (2,1,C2,C4,3),  0),
4707  SR_CORE ("trcacatr11",    CPENC (2,1,C2,C6,3),  0),
4708  SR_CORE ("trcacatr12",    CPENC (2,1,C2,C8,3),  0),
4709  SR_CORE ("trcacatr13",    CPENC (2,1,C2,C10,3), 0),
4710  SR_CORE ("trcacatr14",    CPENC (2,1,C2,C12,3), 0),
4711  SR_CORE ("trcacatr15",    CPENC (2,1,C2,C14,3), 0),
4712  SR_CORE ("trcacvr0",      CPENC (2,1,C2,C0,0),  0),
4713  SR_CORE ("trcacvr1",      CPENC (2,1,C2,C2,0),  0),
4714  SR_CORE ("trcacvr2",      CPENC (2,1,C2,C4,0),  0),
4715  SR_CORE ("trcacvr3",      CPENC (2,1,C2,C6,0),  0),
4716  SR_CORE ("trcacvr4",      CPENC (2,1,C2,C8,0),  0),
4717  SR_CORE ("trcacvr5",      CPENC (2,1,C2,C10,0), 0),
4718  SR_CORE ("trcacvr6",      CPENC (2,1,C2,C12,0), 0),
4719  SR_CORE ("trcacvr7",      CPENC (2,1,C2,C14,0), 0),
4720  SR_CORE ("trcacvr8",      CPENC (2,1,C2,C0,1),  0),
4721  SR_CORE ("trcacvr9",      CPENC (2,1,C2,C2,1),  0),
4722  SR_CORE ("trcacvr10",     CPENC (2,1,C2,C4,1),  0),
4723  SR_CORE ("trcacvr11",     CPENC (2,1,C2,C6,1),  0),
4724  SR_CORE ("trcacvr12",     CPENC (2,1,C2,C8,1),  0),
4725  SR_CORE ("trcacvr13",     CPENC (2,1,C2,C10,1), 0),
4726  SR_CORE ("trcacvr14",     CPENC (2,1,C2,C12,1), 0),
4727  SR_CORE ("trcacvr15",     CPENC (2,1,C2,C14,1), 0),
4728  SR_CORE ("trcauxctlr",    CPENC (2,1,C0,C6,0),  0),
4729  SR_CORE ("trcbbctlr",     CPENC (2,1,C0,C15,0), 0),
4730  SR_CORE ("trcccctlr",     CPENC (2,1,C0,C14,0), 0),
4731  SR_CORE ("trccidcctlr0",  CPENC (2,1,C3,C0,2),  0),
4732  SR_CORE ("trccidcctlr1",  CPENC (2,1,C3,C1,2),  0),
4733  SR_CORE ("trccidcvr0",    CPENC (2,1,C3,C0,0),  0),
4734  SR_CORE ("trccidcvr1",    CPENC (2,1,C3,C2,0),  0),
4735  SR_CORE ("trccidcvr2",    CPENC (2,1,C3,C4,0),  0),
4736  SR_CORE ("trccidcvr3",    CPENC (2,1,C3,C6,0),  0),
4737  SR_CORE ("trccidcvr4",    CPENC (2,1,C3,C8,0),  0),
4738  SR_CORE ("trccidcvr5",    CPENC (2,1,C3,C10,0), 0),
4739  SR_CORE ("trccidcvr6",    CPENC (2,1,C3,C12,0), 0),
4740  SR_CORE ("trccidcvr7",    CPENC (2,1,C3,C14,0), 0),
4741  SR_CORE ("trcclaimclr",   CPENC (2,1,C7,C9,6),  0),
4742  SR_CORE ("trcclaimset",   CPENC (2,1,C7,C8,6),  0),
4743  SR_CORE ("trccntctlr0",   CPENC (2,1,C0,C4,5),  0),
4744  SR_CORE ("trccntctlr1",   CPENC (2,1,C0,C5,5),  0),
4745  SR_CORE ("trccntctlr2",   CPENC (2,1,C0,C6,5),  0),
4746  SR_CORE ("trccntctlr3",   CPENC (2,1,C0,C7,5),  0),
4747  SR_CORE ("trccntrldvr0",  CPENC (2,1,C0,C0,5),  0),
4748  SR_CORE ("trccntrldvr1",  CPENC (2,1,C0,C1,5),  0),
4749  SR_CORE ("trccntrldvr2",  CPENC (2,1,C0,C2,5),  0),
4750  SR_CORE ("trccntrldvr3",  CPENC (2,1,C0,C3,5),  0),
4751  SR_CORE ("trccntvr0",     CPENC (2,1,C0,C8,5),  0),
4752  SR_CORE ("trccntvr1",     CPENC (2,1,C0,C9,5),  0),
4753  SR_CORE ("trccntvr2",     CPENC (2,1,C0,C10,5), 0),
4754  SR_CORE ("trccntvr3",     CPENC (2,1,C0,C11,5), 0),
4755  SR_CORE ("trcconfigr",    CPENC (2,1,C0,C4,0),  0),
4756  SR_CORE ("trcdvcmr0",     CPENC (2,1,C2,C0,6),  0),
4757  SR_CORE ("trcdvcmr1",     CPENC (2,1,C2,C4,6),  0),
4758  SR_CORE ("trcdvcmr2",     CPENC (2,1,C2,C8,6),  0),
4759  SR_CORE ("trcdvcmr3",     CPENC (2,1,C2,C12,6), 0),
4760  SR_CORE ("trcdvcmr4",     CPENC (2,1,C2,C0,7),  0),
4761  SR_CORE ("trcdvcmr5",     CPENC (2,1,C2,C4,7),  0),
4762  SR_CORE ("trcdvcmr6",     CPENC (2,1,C2,C8,7),  0),
4763  SR_CORE ("trcdvcmr7",     CPENC (2,1,C2,C12,7), 0),
4764  SR_CORE ("trcdvcvr0",     CPENC (2,1,C2,C0,4),  0),
4765  SR_CORE ("trcdvcvr1",     CPENC (2,1,C2,C4,4),  0),
4766  SR_CORE ("trcdvcvr2",     CPENC (2,1,C2,C8,4),  0),
4767  SR_CORE ("trcdvcvr3",     CPENC (2,1,C2,C12,4), 0),
4768  SR_CORE ("trcdvcvr4",     CPENC (2,1,C2,C0,5),  0),
4769  SR_CORE ("trcdvcvr5",     CPENC (2,1,C2,C4,5),  0),
4770  SR_CORE ("trcdvcvr6",     CPENC (2,1,C2,C8,5),  0),
4771  SR_CORE ("trcdvcvr7",     CPENC (2,1,C2,C12,5), 0),
4772  SR_CORE ("trceventctl0r", CPENC (2,1,C0,C8,0),  0),
4773  SR_CORE ("trceventctl1r", CPENC (2,1,C0,C9,0),  0),
4774  SR_CORE ("trcextinselr0", CPENC (2,1,C0,C8,4),  0),
4775  SR_CORE ("trcextinselr",  CPENC (2,1,C0,C8,4),  0),
4776  SR_CORE ("trcextinselr1", CPENC (2,1,C0,C9,4),  0),
4777  SR_CORE ("trcextinselr2", CPENC (2,1,C0,C10,4), 0),
4778  SR_CORE ("trcextinselr3", CPENC (2,1,C0,C11,4), 0),
4779  SR_CORE ("trcimspec0",    CPENC (2,1,C0,C0,7),  0),
4780  SR_CORE ("trcimspec1",    CPENC (2,1,C0,C1,7),  0),
4781  SR_CORE ("trcimspec2",    CPENC (2,1,C0,C2,7),  0),
4782  SR_CORE ("trcimspec3",    CPENC (2,1,C0,C3,7),  0),
4783  SR_CORE ("trcimspec4",    CPENC (2,1,C0,C4,7),  0),
4784  SR_CORE ("trcimspec5",    CPENC (2,1,C0,C5,7),  0),
4785  SR_CORE ("trcimspec6",    CPENC (2,1,C0,C6,7),  0),
4786  SR_CORE ("trcimspec7",    CPENC (2,1,C0,C7,7),  0),
4787  SR_CORE ("trcitctrl",     CPENC (2,1,C7,C0,4),  0),
4788  SR_CORE ("trcpdcr",       CPENC (2,1,C1,C4,4),  0),
4789  SR_CORE ("trcprgctlr",    CPENC (2,1,C0,C1,0),  0),
4790  SR_CORE ("trcprocselr",   CPENC (2,1,C0,C2,0),  0),
4791  SR_CORE ("trcqctlr",      CPENC (2,1,C0,C1,1),  0),
4792  SR_CORE ("trcrsr",        CPENC (2,1,C0,C10,0), 0),
4793  SR_CORE ("trcrsctlr2",    CPENC (2,1,C1,C2,0),  0),
4794  SR_CORE ("trcrsctlr3",    CPENC (2,1,C1,C3,0),  0),
4795  SR_CORE ("trcrsctlr4",    CPENC (2,1,C1,C4,0),  0),
4796  SR_CORE ("trcrsctlr5",    CPENC (2,1,C1,C5,0),  0),
4797  SR_CORE ("trcrsctlr6",    CPENC (2,1,C1,C6,0),  0),
4798  SR_CORE ("trcrsctlr7",    CPENC (2,1,C1,C7,0),  0),
4799  SR_CORE ("trcrsctlr8",    CPENC (2,1,C1,C8,0),  0),
4800  SR_CORE ("trcrsctlr9",    CPENC (2,1,C1,C9,0),  0),
4801  SR_CORE ("trcrsctlr10",   CPENC (2,1,C1,C10,0), 0),
4802  SR_CORE ("trcrsctlr11",   CPENC (2,1,C1,C11,0), 0),
4803  SR_CORE ("trcrsctlr12",   CPENC (2,1,C1,C12,0), 0),
4804  SR_CORE ("trcrsctlr13",   CPENC (2,1,C1,C13,0), 0),
4805  SR_CORE ("trcrsctlr14",   CPENC (2,1,C1,C14,0), 0),
4806  SR_CORE ("trcrsctlr15",   CPENC (2,1,C1,C15,0), 0),
4807  SR_CORE ("trcrsctlr16",   CPENC (2,1,C1,C0,1),  0),
4808  SR_CORE ("trcrsctlr17",   CPENC (2,1,C1,C1,1),  0),
4809  SR_CORE ("trcrsctlr18",   CPENC (2,1,C1,C2,1),  0),
4810  SR_CORE ("trcrsctlr19",   CPENC (2,1,C1,C3,1),  0),
4811  SR_CORE ("trcrsctlr20",   CPENC (2,1,C1,C4,1),  0),
4812  SR_CORE ("trcrsctlr21",   CPENC (2,1,C1,C5,1),  0),
4813  SR_CORE ("trcrsctlr22",   CPENC (2,1,C1,C6,1),  0),
4814  SR_CORE ("trcrsctlr23",   CPENC (2,1,C1,C7,1),  0),
4815  SR_CORE ("trcrsctlr24",   CPENC (2,1,C1,C8,1),  0),
4816  SR_CORE ("trcrsctlr25",   CPENC (2,1,C1,C9,1),  0),
4817  SR_CORE ("trcrsctlr26",   CPENC (2,1,C1,C10,1), 0),
4818  SR_CORE ("trcrsctlr27",   CPENC (2,1,C1,C11,1), 0),
4819  SR_CORE ("trcrsctlr28",   CPENC (2,1,C1,C12,1), 0),
4820  SR_CORE ("trcrsctlr29",   CPENC (2,1,C1,C13,1), 0),
4821  SR_CORE ("trcrsctlr30",   CPENC (2,1,C1,C14,1), 0),
4822  SR_CORE ("trcrsctlr31",   CPENC (2,1,C1,C15,1), 0),
4823  SR_CORE ("trcseqevr0",    CPENC (2,1,C0,C0,4),  0),
4824  SR_CORE ("trcseqevr1",    CPENC (2,1,C0,C1,4),  0),
4825  SR_CORE ("trcseqevr2",    CPENC (2,1,C0,C2,4),  0),
4826  SR_CORE ("trcseqrstevr",  CPENC (2,1,C0,C6,4),  0),
4827  SR_CORE ("trcseqstr",     CPENC (2,1,C0,C7,4),  0),
4828  SR_CORE ("trcssccr0",     CPENC (2,1,C1,C0,2),  0),
4829  SR_CORE ("trcssccr1",     CPENC (2,1,C1,C1,2),  0),
4830  SR_CORE ("trcssccr2",     CPENC (2,1,C1,C2,2),  0),
4831  SR_CORE ("trcssccr3",     CPENC (2,1,C1,C3,2),  0),
4832  SR_CORE ("trcssccr4",     CPENC (2,1,C1,C4,2),  0),
4833  SR_CORE ("trcssccr5",     CPENC (2,1,C1,C5,2),  0),
4834  SR_CORE ("trcssccr6",     CPENC (2,1,C1,C6,2),  0),
4835  SR_CORE ("trcssccr7",     CPENC (2,1,C1,C7,2),  0),
4836  SR_CORE ("trcsscsr0",     CPENC (2,1,C1,C8,2),  0),
4837  SR_CORE ("trcsscsr1",     CPENC (2,1,C1,C9,2),  0),
4838  SR_CORE ("trcsscsr2",     CPENC (2,1,C1,C10,2), 0),
4839  SR_CORE ("trcsscsr3",     CPENC (2,1,C1,C11,2), 0),
4840  SR_CORE ("trcsscsr4",     CPENC (2,1,C1,C12,2), 0),
4841  SR_CORE ("trcsscsr5",     CPENC (2,1,C1,C13,2), 0),
4842  SR_CORE ("trcsscsr6",     CPENC (2,1,C1,C14,2), 0),
4843  SR_CORE ("trcsscsr7",     CPENC (2,1,C1,C15,2), 0),
4844  SR_CORE ("trcsspcicr0",   CPENC (2,1,C1,C0,3),  0),
4845  SR_CORE ("trcsspcicr1",   CPENC (2,1,C1,C1,3),  0),
4846  SR_CORE ("trcsspcicr2",   CPENC (2,1,C1,C2,3),  0),
4847  SR_CORE ("trcsspcicr3",   CPENC (2,1,C1,C3,3),  0),
4848  SR_CORE ("trcsspcicr4",   CPENC (2,1,C1,C4,3),  0),
4849  SR_CORE ("trcsspcicr5",   CPENC (2,1,C1,C5,3),  0),
4850  SR_CORE ("trcsspcicr6",   CPENC (2,1,C1,C6,3),  0),
4851  SR_CORE ("trcsspcicr7",   CPENC (2,1,C1,C7,3),  0),
4852  SR_CORE ("trcstallctlr",  CPENC (2,1,C0,C11,0), 0),
4853  SR_CORE ("trcsyncpr",     CPENC (2,1,C0,C13,0), 0),
4854  SR_CORE ("trctraceidr",   CPENC (2,1,C0,C0,1),  0),
4855  SR_CORE ("trctsctlr",     CPENC (2,1,C0,C12,0), 0),
4856  SR_CORE ("trcvdarcctlr",  CPENC (2,1,C0,C10,2), 0),
4857  SR_CORE ("trcvdctlr",     CPENC (2,1,C0,C8,2),  0),
4858  SR_CORE ("trcvdsacctlr",  CPENC (2,1,C0,C9,2),  0),
4859  SR_CORE ("trcvictlr",     CPENC (2,1,C0,C0,2),  0),
4860  SR_CORE ("trcviiectlr",   CPENC (2,1,C0,C1,2),  0),
4861  SR_CORE ("trcvipcssctlr", CPENC (2,1,C0,C3,2),  0),
4862  SR_CORE ("trcvissctlr",   CPENC (2,1,C0,C2,2),  0),
4863  SR_CORE ("trcvmidcctlr0", CPENC (2,1,C3,C2,2),  0),
4864  SR_CORE ("trcvmidcctlr1", CPENC (2,1,C3,C3,2),  0),
4865  SR_CORE ("trcvmidcvr0",   CPENC (2,1,C3,C0,1),  0),
4866  SR_CORE ("trcvmidcvr1",   CPENC (2,1,C3,C2,1),  0),
4867  SR_CORE ("trcvmidcvr2",   CPENC (2,1,C3,C4,1),  0),
4868  SR_CORE ("trcvmidcvr3",   CPENC (2,1,C3,C6,1),  0),
4869  SR_CORE ("trcvmidcvr4",   CPENC (2,1,C3,C8,1),  0),
4870  SR_CORE ("trcvmidcvr5",   CPENC (2,1,C3,C10,1), 0),
4871  SR_CORE ("trcvmidcvr6",   CPENC (2,1,C3,C12,1), 0),
4872  SR_CORE ("trcvmidcvr7",   CPENC (2,1,C3,C14,1), 0),
4873  SR_CORE ("trclar",        CPENC (2,1,C7,C12,6), F_REG_WRITE),
4874  SR_CORE ("trcoslar",      CPENC (2,1,C1,C0,4),  F_REG_WRITE),
4875
4876  SR_CORE ("csrcr_el0",     CPENC (2,3,C8,C0,0),  0),
4877  SR_CORE ("csrptr_el0",    CPENC (2,3,C8,C0,1),  0),
4878  SR_CORE ("csridr_el0",    CPENC (2,3,C8,C0,2),  F_REG_READ),
4879  SR_CORE ("csrptridx_el0", CPENC (2,3,C8,C0,3),  F_REG_READ),
4880  SR_CORE ("csrcr_el1",     CPENC (2,0,C8,C0,0),  0),
4881  SR_CORE ("csrcr_el12",    CPENC (2,5,C8,C0,0),  0),
4882  SR_CORE ("csrptr_el1",    CPENC (2,0,C8,C0,1),  0),
4883  SR_CORE ("csrptr_el12",   CPENC (2,5,C8,C0,1),  0),
4884  SR_CORE ("csrptridx_el1", CPENC (2,0,C8,C0,3),  F_REG_READ),
4885  SR_CORE ("csrcr_el2",     CPENC (2,4,C8,C0,0),  0),
4886  SR_CORE ("csrptr_el2",    CPENC (2,4,C8,C0,1),  0),
4887  SR_CORE ("csrptridx_el2", CPENC (2,4,C8,C0,3),  F_REG_READ),
4888
4889  SR_LOR ("lorid_el1",      CPENC (3,0,C10,C4,7),  F_REG_READ),
4890  SR_LOR ("lorc_el1",       CPENC (3,0,C10,C4,3),  0),
4891  SR_LOR ("lorea_el1",      CPENC (3,0,C10,C4,1),  0),
4892  SR_LOR ("lorn_el1",       CPENC (3,0,C10,C4,2),  0),
4893  SR_LOR ("lorsa_el1",      CPENC (3,0,C10,C4,0),  0),
4894
4895  SR_CORE ("icc_ctlr_el3",  CPENC (3,6,C12,C12,4), 0),
4896  SR_CORE ("icc_sre_el1",   CPENC (3,0,C12,C12,5), 0),
4897  SR_CORE ("icc_sre_el2",   CPENC (3,4,C12,C9,5),  0),
4898  SR_CORE ("icc_sre_el3",   CPENC (3,6,C12,C12,5), 0),
4899  SR_CORE ("ich_vtr_el2",   CPENC (3,4,C12,C11,1), F_REG_READ),
4900
4901  SR_CORE ("brbcr_el1",     CPENC (2,1,C9,C0,0),  0),
4902  SR_CORE ("brbcr_el12",    CPENC (2,5,C9,C0,0),  0),
4903  SR_CORE ("brbfcr_el1",    CPENC (2,1,C9,C0,1),  0),
4904  SR_CORE ("brbts_el1",     CPENC (2,1,C9,C0,2),  0),
4905  SR_CORE ("brbinfinj_el1", CPENC (2,1,C9,C1,0),  0),
4906  SR_CORE ("brbsrcinj_el1", CPENC (2,1,C9,C1,1),  0),
4907  SR_CORE ("brbtgtinj_el1", CPENC (2,1,C9,C1,2),  0),
4908  SR_CORE ("brbidr0_el1",   CPENC (2,1,C9,C2,0),  F_REG_READ),
4909  SR_CORE ("brbcr_el2",     CPENC (2,4,C9,C0,0),  0),
4910  SR_CORE ("brbsrc0_el1",   CPENC (2,1,C8,C0,1),  F_REG_READ),
4911  SR_CORE ("brbsrc1_el1",   CPENC (2,1,C8,C1,1),  F_REG_READ),
4912  SR_CORE ("brbsrc2_el1",   CPENC (2,1,C8,C2,1),  F_REG_READ),
4913  SR_CORE ("brbsrc3_el1",   CPENC (2,1,C8,C3,1),  F_REG_READ),
4914  SR_CORE ("brbsrc4_el1",   CPENC (2,1,C8,C4,1),  F_REG_READ),
4915  SR_CORE ("brbsrc5_el1",   CPENC (2,1,C8,C5,1),  F_REG_READ),
4916  SR_CORE ("brbsrc6_el1",   CPENC (2,1,C8,C6,1),  F_REG_READ),
4917  SR_CORE ("brbsrc7_el1",   CPENC (2,1,C8,C7,1),  F_REG_READ),
4918  SR_CORE ("brbsrc8_el1",   CPENC (2,1,C8,C8,1),  F_REG_READ),
4919  SR_CORE ("brbsrc9_el1",   CPENC (2,1,C8,C9,1),  F_REG_READ),
4920  SR_CORE ("brbsrc10_el1",  CPENC (2,1,C8,C10,1), F_REG_READ),
4921  SR_CORE ("brbsrc11_el1",  CPENC (2,1,C8,C11,1), F_REG_READ),
4922  SR_CORE ("brbsrc12_el1",  CPENC (2,1,C8,C12,1), F_REG_READ),
4923  SR_CORE ("brbsrc13_el1",  CPENC (2,1,C8,C13,1), F_REG_READ),
4924  SR_CORE ("brbsrc14_el1",  CPENC (2,1,C8,C14,1), F_REG_READ),
4925  SR_CORE ("brbsrc15_el1",  CPENC (2,1,C8,C15,1), F_REG_READ),
4926  SR_CORE ("brbsrc16_el1",  CPENC (2,1,C8,C0,5),  F_REG_READ),
4927  SR_CORE ("brbsrc17_el1",  CPENC (2,1,C8,C1,5),  F_REG_READ),
4928  SR_CORE ("brbsrc18_el1",  CPENC (2,1,C8,C2,5),  F_REG_READ),
4929  SR_CORE ("brbsrc19_el1",  CPENC (2,1,C8,C3,5),  F_REG_READ),
4930  SR_CORE ("brbsrc20_el1",  CPENC (2,1,C8,C4,5),  F_REG_READ),
4931  SR_CORE ("brbsrc21_el1",  CPENC (2,1,C8,C5,5),  F_REG_READ),
4932  SR_CORE ("brbsrc22_el1",  CPENC (2,1,C8,C6,5),  F_REG_READ),
4933  SR_CORE ("brbsrc23_el1",  CPENC (2,1,C8,C7,5),  F_REG_READ),
4934  SR_CORE ("brbsrc24_el1",  CPENC (2,1,C8,C8,5),  F_REG_READ),
4935  SR_CORE ("brbsrc25_el1",  CPENC (2,1,C8,C9,5),  F_REG_READ),
4936  SR_CORE ("brbsrc26_el1",  CPENC (2,1,C8,C10,5), F_REG_READ),
4937  SR_CORE ("brbsrc27_el1",  CPENC (2,1,C8,C11,5), F_REG_READ),
4938  SR_CORE ("brbsrc28_el1",  CPENC (2,1,C8,C12,5), F_REG_READ),
4939  SR_CORE ("brbsrc29_el1",  CPENC (2,1,C8,C13,5), F_REG_READ),
4940  SR_CORE ("brbsrc30_el1",  CPENC (2,1,C8,C14,5), F_REG_READ),
4941  SR_CORE ("brbsrc31_el1",  CPENC (2,1,C8,C15,5), F_REG_READ),
4942  SR_CORE ("brbtgt0_el1",   CPENC (2,1,C8,C0,2),  F_REG_READ),
4943  SR_CORE ("brbtgt1_el1",   CPENC (2,1,C8,C1,2),  F_REG_READ),
4944  SR_CORE ("brbtgt2_el1",   CPENC (2,1,C8,C2,2),  F_REG_READ),
4945  SR_CORE ("brbtgt3_el1",   CPENC (2,1,C8,C3,2),  F_REG_READ),
4946  SR_CORE ("brbtgt4_el1",   CPENC (2,1,C8,C4,2),  F_REG_READ),
4947  SR_CORE ("brbtgt5_el1",   CPENC (2,1,C8,C5,2),  F_REG_READ),
4948  SR_CORE ("brbtgt6_el1",   CPENC (2,1,C8,C6,2),  F_REG_READ),
4949  SR_CORE ("brbtgt7_el1",   CPENC (2,1,C8,C7,2),  F_REG_READ),
4950  SR_CORE ("brbtgt8_el1",   CPENC (2,1,C8,C8,2),  F_REG_READ),
4951  SR_CORE ("brbtgt9_el1",   CPENC (2,1,C8,C9,2),  F_REG_READ),
4952  SR_CORE ("brbtgt10_el1",  CPENC (2,1,C8,C10,2), F_REG_READ),
4953  SR_CORE ("brbtgt11_el1",  CPENC (2,1,C8,C11,2), F_REG_READ),
4954  SR_CORE ("brbtgt12_el1",  CPENC (2,1,C8,C12,2), F_REG_READ),
4955  SR_CORE ("brbtgt13_el1",  CPENC (2,1,C8,C13,2), F_REG_READ),
4956  SR_CORE ("brbtgt14_el1",  CPENC (2,1,C8,C14,2), F_REG_READ),
4957  SR_CORE ("brbtgt15_el1",  CPENC (2,1,C8,C15,2), F_REG_READ),
4958  SR_CORE ("brbtgt16_el1",  CPENC (2,1,C8,C0,6),  F_REG_READ),
4959  SR_CORE ("brbtgt17_el1",  CPENC (2,1,C8,C1,6),  F_REG_READ),
4960  SR_CORE ("brbtgt18_el1",  CPENC (2,1,C8,C2,6),  F_REG_READ),
4961  SR_CORE ("brbtgt19_el1",  CPENC (2,1,C8,C3,6),  F_REG_READ),
4962  SR_CORE ("brbtgt20_el1",  CPENC (2,1,C8,C4,6),  F_REG_READ),
4963  SR_CORE ("brbtgt21_el1",  CPENC (2,1,C8,C5,6),  F_REG_READ),
4964  SR_CORE ("brbtgt22_el1",  CPENC (2,1,C8,C6,6),  F_REG_READ),
4965  SR_CORE ("brbtgt23_el1",  CPENC (2,1,C8,C7,6),  F_REG_READ),
4966  SR_CORE ("brbtgt24_el1",  CPENC (2,1,C8,C8,6),  F_REG_READ),
4967  SR_CORE ("brbtgt25_el1",  CPENC (2,1,C8,C9,6),  F_REG_READ),
4968  SR_CORE ("brbtgt26_el1",  CPENC (2,1,C8,C10,6), F_REG_READ),
4969  SR_CORE ("brbtgt27_el1",  CPENC (2,1,C8,C11,6), F_REG_READ),
4970  SR_CORE ("brbtgt28_el1",  CPENC (2,1,C8,C12,6), F_REG_READ),
4971  SR_CORE ("brbtgt29_el1",  CPENC (2,1,C8,C13,6), F_REG_READ),
4972  SR_CORE ("brbtgt30_el1",  CPENC (2,1,C8,C14,6), F_REG_READ),
4973  SR_CORE ("brbtgt31_el1",  CPENC (2,1,C8,C15,6), F_REG_READ),
4974  SR_CORE ("brbinf0_el1",   CPENC (2,1,C8,C0,0),  F_REG_READ),
4975  SR_CORE ("brbinf1_el1",   CPENC (2,1,C8,C1,0),  F_REG_READ),
4976  SR_CORE ("brbinf2_el1",   CPENC (2,1,C8,C2,0),  F_REG_READ),
4977  SR_CORE ("brbinf3_el1",   CPENC (2,1,C8,C3,0),  F_REG_READ),
4978  SR_CORE ("brbinf4_el1",   CPENC (2,1,C8,C4,0),  F_REG_READ),
4979  SR_CORE ("brbinf5_el1",   CPENC (2,1,C8,C5,0),  F_REG_READ),
4980  SR_CORE ("brbinf6_el1",   CPENC (2,1,C8,C6,0),  F_REG_READ),
4981  SR_CORE ("brbinf7_el1",   CPENC (2,1,C8,C7,0),  F_REG_READ),
4982  SR_CORE ("brbinf8_el1",   CPENC (2,1,C8,C8,0),  F_REG_READ),
4983  SR_CORE ("brbinf9_el1",   CPENC (2,1,C8,C9,0),  F_REG_READ),
4984  SR_CORE ("brbinf10_el1",  CPENC (2,1,C8,C10,0), F_REG_READ),
4985  SR_CORE ("brbinf11_el1",  CPENC (2,1,C8,C11,0), F_REG_READ),
4986  SR_CORE ("brbinf12_el1",  CPENC (2,1,C8,C12,0), F_REG_READ),
4987  SR_CORE ("brbinf13_el1",  CPENC (2,1,C8,C13,0), F_REG_READ),
4988  SR_CORE ("brbinf14_el1",  CPENC (2,1,C8,C14,0), F_REG_READ),
4989  SR_CORE ("brbinf15_el1",  CPENC (2,1,C8,C15,0), F_REG_READ),
4990  SR_CORE ("brbinf16_el1",  CPENC (2,1,C8,C0,4),  F_REG_READ),
4991  SR_CORE ("brbinf17_el1",  CPENC (2,1,C8,C1,4),  F_REG_READ),
4992  SR_CORE ("brbinf18_el1",  CPENC (2,1,C8,C2,4),  F_REG_READ),
4993  SR_CORE ("brbinf19_el1",  CPENC (2,1,C8,C3,4),  F_REG_READ),
4994  SR_CORE ("brbinf20_el1",  CPENC (2,1,C8,C4,4),  F_REG_READ),
4995  SR_CORE ("brbinf21_el1",  CPENC (2,1,C8,C5,4),  F_REG_READ),
4996  SR_CORE ("brbinf22_el1",  CPENC (2,1,C8,C6,4),  F_REG_READ),
4997  SR_CORE ("brbinf23_el1",  CPENC (2,1,C8,C7,4),  F_REG_READ),
4998  SR_CORE ("brbinf24_el1",  CPENC (2,1,C8,C8,4),  F_REG_READ),
4999  SR_CORE ("brbinf25_el1",  CPENC (2,1,C8,C9,4),  F_REG_READ),
5000  SR_CORE ("brbinf26_el1",  CPENC (2,1,C8,C10,4), F_REG_READ),
5001  SR_CORE ("brbinf27_el1",  CPENC (2,1,C8,C11,4), F_REG_READ),
5002  SR_CORE ("brbinf28_el1",  CPENC (2,1,C8,C12,4), F_REG_READ),
5003  SR_CORE ("brbinf29_el1",  CPENC (2,1,C8,C13,4), F_REG_READ),
5004  SR_CORE ("brbinf30_el1",  CPENC (2,1,C8,C14,4), F_REG_READ),
5005  SR_CORE ("brbinf31_el1",  CPENC (2,1,C8,C15,4), F_REG_READ),
5006
5007  SR_CORE ("accdata_el1",   CPENC (3,0,C13,C0,5), 0),
5008
5009  SR_CORE ("mfar_el3",      CPENC (3,6,C6,C0,5), 0),
5010  SR_CORE ("gpccr_el3",     CPENC (3,6,C2,C1,6), 0),
5011  SR_CORE ("gptbr_el3",     CPENC (3,6,C2,C1,4), 0),
5012
5013  SR_SME ("svcr",             CPENC (3,3,C4,C2,2),  0),
5014  SR_SME ("id_aa64smfr0_el1", CPENC (3,0,C0,C4,5),  F_REG_READ),
5015  SR_SME ("smcr_el1",         CPENC (3,0,C1,C2,6),  0),
5016  SR_SME ("smcr_el12",        CPENC (3,5,C1,C2,6),  0),
5017  SR_SME ("smcr_el2",         CPENC (3,4,C1,C2,6),  0),
5018  SR_SME ("smcr_el3",         CPENC (3,6,C1,C2,6),  0),
5019  SR_SME ("smpri_el1",        CPENC (3,0,C1,C2,4),  0),
5020  SR_SME ("smprimap_el2",     CPENC (3,4,C1,C2,5),  0),
5021  SR_SME ("smidr_el1",        CPENC (3,1,C0,C0,6),  F_REG_READ),
5022  SR_SME ("tpidr2_el0",       CPENC (3,3,C13,C0,5), 0),
5023  SR_SME ("mpamsm_el1",       CPENC (3,0,C10,C5,3), 0),
5024
5025  SR_AMU ("amcr_el0",           CPENC (3,3,C13,C2,0),   0),
5026  SR_AMU ("amcfgr_el0",         CPENC (3,3,C13,C2,1),   F_REG_READ),
5027  SR_AMU ("amcgcr_el0",         CPENC (3,3,C13,C2,2),   F_REG_READ),
5028  SR_AMU ("amuserenr_el0",      CPENC (3,3,C13,C2,3),   0),
5029  SR_AMU ("amcntenclr0_el0",    CPENC (3,3,C13,C2,4),   0),
5030  SR_AMU ("amcntenset0_el0",    CPENC (3,3,C13,C2,5),   0),
5031  SR_AMU ("amcntenclr1_el0",    CPENC (3,3,C13,C3,0),   0),
5032  SR_AMU ("amcntenset1_el0",    CPENC (3,3,C13,C3,1),   0),
5033  SR_AMU ("amevcntr00_el0",     CPENC (3,3,C13,C4,0),   0),
5034  SR_AMU ("amevcntr01_el0",     CPENC (3,3,C13,C4,1),   0),
5035  SR_AMU ("amevcntr02_el0",     CPENC (3,3,C13,C4,2),   0),
5036  SR_AMU ("amevcntr03_el0",     CPENC (3,3,C13,C4,3),   0),
5037  SR_AMU ("amevtyper00_el0",    CPENC (3,3,C13,C6,0),   F_REG_READ),
5038  SR_AMU ("amevtyper01_el0",    CPENC (3,3,C13,C6,1),   F_REG_READ),
5039  SR_AMU ("amevtyper02_el0",    CPENC (3,3,C13,C6,2),   F_REG_READ),
5040  SR_AMU ("amevtyper03_el0",    CPENC (3,3,C13,C6,3),   F_REG_READ),
5041  SR_AMU ("amevcntr10_el0",     CPENC (3,3,C13,C12,0),  0),
5042  SR_AMU ("amevcntr11_el0",     CPENC (3,3,C13,C12,1),  0),
5043  SR_AMU ("amevcntr12_el0",     CPENC (3,3,C13,C12,2),  0),
5044  SR_AMU ("amevcntr13_el0",     CPENC (3,3,C13,C12,3),  0),
5045  SR_AMU ("amevcntr14_el0",     CPENC (3,3,C13,C12,4),  0),
5046  SR_AMU ("amevcntr15_el0",     CPENC (3,3,C13,C12,5),  0),
5047  SR_AMU ("amevcntr16_el0",     CPENC (3,3,C13,C12,6),  0),
5048  SR_AMU ("amevcntr17_el0",     CPENC (3,3,C13,C12,7),  0),
5049  SR_AMU ("amevcntr18_el0",     CPENC (3,3,C13,C13,0),  0),
5050  SR_AMU ("amevcntr19_el0",     CPENC (3,3,C13,C13,1),  0),
5051  SR_AMU ("amevcntr110_el0",    CPENC (3,3,C13,C13,2),  0),
5052  SR_AMU ("amevcntr111_el0",    CPENC (3,3,C13,C13,3),  0),
5053  SR_AMU ("amevcntr112_el0",    CPENC (3,3,C13,C13,4),  0),
5054  SR_AMU ("amevcntr113_el0",    CPENC (3,3,C13,C13,5),  0),
5055  SR_AMU ("amevcntr114_el0",    CPENC (3,3,C13,C13,6),  0),
5056  SR_AMU ("amevcntr115_el0",    CPENC (3,3,C13,C13,7),  0),
5057  SR_AMU ("amevtyper10_el0",    CPENC (3,3,C13,C14,0),  0),
5058  SR_AMU ("amevtyper11_el0",    CPENC (3,3,C13,C14,1),  0),
5059  SR_AMU ("amevtyper12_el0",    CPENC (3,3,C13,C14,2),  0),
5060  SR_AMU ("amevtyper13_el0",    CPENC (3,3,C13,C14,3),  0),
5061  SR_AMU ("amevtyper14_el0",    CPENC (3,3,C13,C14,4),  0),
5062  SR_AMU ("amevtyper15_el0",    CPENC (3,3,C13,C14,5),  0),
5063  SR_AMU ("amevtyper16_el0",    CPENC (3,3,C13,C14,6),  0),
5064  SR_AMU ("amevtyper17_el0",    CPENC (3,3,C13,C14,7),  0),
5065  SR_AMU ("amevtyper18_el0",    CPENC (3,3,C13,C15,0),  0),
5066  SR_AMU ("amevtyper19_el0",    CPENC (3,3,C13,C15,1),  0),
5067  SR_AMU ("amevtyper110_el0",   CPENC (3,3,C13,C15,2),  0),
5068  SR_AMU ("amevtyper111_el0",   CPENC (3,3,C13,C15,3),  0),
5069  SR_AMU ("amevtyper112_el0",   CPENC (3,3,C13,C15,4),  0),
5070  SR_AMU ("amevtyper113_el0",   CPENC (3,3,C13,C15,5),  0),
5071  SR_AMU ("amevtyper114_el0",   CPENC (3,3,C13,C15,6),  0),
5072  SR_AMU ("amevtyper115_el0",   CPENC (3,3,C13,C15,7),  0),
5073
5074  SR_GIC ("icc_pmr_el1",        CPENC (3,0,C4,C6,0),    0),
5075  SR_GIC ("icc_iar0_el1",       CPENC (3,0,C12,C8,0),   F_REG_READ),
5076  SR_GIC ("icc_eoir0_el1",      CPENC (3,0,C12,C8,1),   F_REG_WRITE),
5077  SR_GIC ("icc_hppir0_el1",     CPENC (3,0,C12,C8,2),   F_REG_READ),
5078  SR_GIC ("icc_bpr0_el1",       CPENC (3,0,C12,C8,3),   0),
5079  SR_GIC ("icc_ap0r0_el1",      CPENC (3,0,C12,C8,4),   0),
5080  SR_GIC ("icc_ap0r1_el1",      CPENC (3,0,C12,C8,5),   0),
5081  SR_GIC ("icc_ap0r2_el1",      CPENC (3,0,C12,C8,6),   0),
5082  SR_GIC ("icc_ap0r3_el1",      CPENC (3,0,C12,C8,7),   0),
5083  SR_GIC ("icc_ap1r0_el1",      CPENC (3,0,C12,C9,0),   0),
5084  SR_GIC ("icc_ap1r1_el1",      CPENC (3,0,C12,C9,1),   0),
5085  SR_GIC ("icc_ap1r2_el1",      CPENC (3,0,C12,C9,2),   0),
5086  SR_GIC ("icc_ap1r3_el1",      CPENC (3,0,C12,C9,3),   0),
5087  SR_GIC ("icc_dir_el1",        CPENC (3,0,C12,C11,1),  F_REG_WRITE),
5088  SR_GIC ("icc_rpr_el1",        CPENC (3,0,C12,C11,3),  F_REG_READ),
5089  SR_GIC ("icc_sgi1r_el1",      CPENC (3,0,C12,C11,5),  F_REG_WRITE),
5090  SR_GIC ("icc_asgi1r_el1",     CPENC (3,0,C12,C11,6),  F_REG_WRITE),
5091  SR_GIC ("icc_sgi0r_el1",      CPENC (3,0,C12,C11,7),  F_REG_WRITE),
5092  SR_GIC ("icc_iar1_el1",       CPENC (3,0,C12,C12,0),  F_REG_READ),
5093  SR_GIC ("icc_eoir1_el1",      CPENC (3,0,C12,C12,1),  F_REG_WRITE),
5094  SR_GIC ("icc_hppir1_el1",     CPENC (3,0,C12,C12,2),  F_REG_READ),
5095  SR_GIC ("icc_bpr1_el1",       CPENC (3,0,C12,C12,3),  0),
5096  SR_GIC ("icc_ctlr_el1",       CPENC (3,0,C12,C12,4),  0),
5097  SR_GIC ("icc_igrpen0_el1",    CPENC (3,0,C12,C12,6),  0),
5098  SR_GIC ("icc_igrpen1_el1",    CPENC (3,0,C12,C12,7),  0),
5099  SR_GIC ("ich_ap0r0_el2",      CPENC (3,4,C12,C8,0),   0),
5100  SR_GIC ("ich_ap0r1_el2",      CPENC (3,4,C12,C8,1),   0),
5101  SR_GIC ("ich_ap0r2_el2",      CPENC (3,4,C12,C8,2),   0),
5102  SR_GIC ("ich_ap0r3_el2",      CPENC (3,4,C12,C8,3),   0),
5103  SR_GIC ("ich_ap1r0_el2",      CPENC (3,4,C12,C9,0),   0),
5104  SR_GIC ("ich_ap1r1_el2",      CPENC (3,4,C12,C9,1),   0),
5105  SR_GIC ("ich_ap1r2_el2",      CPENC (3,4,C12,C9,2),   0),
5106  SR_GIC ("ich_ap1r3_el2",      CPENC (3,4,C12,C9,3),   0),
5107  SR_GIC ("ich_hcr_el2",        CPENC (3,4,C12,C11,0),  0),
5108  SR_GIC ("ich_misr_el2",       CPENC (3,4,C12,C11,2),  F_REG_READ),
5109  SR_GIC ("ich_eisr_el2",       CPENC (3,4,C12,C11,3),  F_REG_READ),
5110  SR_GIC ("ich_elrsr_el2",      CPENC (3,4,C12,C11,5),  F_REG_READ),
5111  SR_GIC ("ich_vmcr_el2",       CPENC (3,4,C12,C11,7),  0),
5112  SR_GIC ("ich_lr0_el2",        CPENC (3,4,C12,C12,0),  0),
5113  SR_GIC ("ich_lr1_el2",        CPENC (3,4,C12,C12,1),  0),
5114  SR_GIC ("ich_lr2_el2",        CPENC (3,4,C12,C12,2),  0),
5115  SR_GIC ("ich_lr3_el2",        CPENC (3,4,C12,C12,3),  0),
5116  SR_GIC ("ich_lr4_el2",        CPENC (3,4,C12,C12,4),  0),
5117  SR_GIC ("ich_lr5_el2",        CPENC (3,4,C12,C12,5),  0),
5118  SR_GIC ("ich_lr6_el2",        CPENC (3,4,C12,C12,6),  0),
5119  SR_GIC ("ich_lr7_el2",        CPENC (3,4,C12,C12,7),  0),
5120  SR_GIC ("ich_lr8_el2",        CPENC (3,4,C12,C13,0),  0),
5121  SR_GIC ("ich_lr9_el2",        CPENC (3,4,C12,C13,1),  0),
5122  SR_GIC ("ich_lr10_el2",       CPENC (3,4,C12,C13,2),  0),
5123  SR_GIC ("ich_lr11_el2",       CPENC (3,4,C12,C13,3),  0),
5124  SR_GIC ("ich_lr12_el2",       CPENC (3,4,C12,C13,4),  0),
5125  SR_GIC ("ich_lr13_el2",       CPENC (3,4,C12,C13,5),  0),
5126  SR_GIC ("ich_lr14_el2",       CPENC (3,4,C12,C13,6),  0),
5127  SR_GIC ("ich_lr15_el2",       CPENC (3,4,C12,C13,7),  0),
5128  SR_GIC ("icc_igrpen1_el3",    CPENC (3,6,C12,C12,7),  0),
5129
5130  SR_V8_6 ("amcg1idr_el0",      CPENC (3,3,C13,C2,6),   F_REG_READ),
5131  SR_V8_6 ("cntpctss_el0",      CPENC (3,3,C14,C0,5),   F_REG_READ),
5132  SR_V8_6 ("cntvctss_el0",      CPENC (3,3,C14,C0,6),   F_REG_READ),
5133  SR_V8_6 ("hfgrtr_el2",        CPENC (3,4,C1,C1,4),    0),
5134  SR_V8_6 ("hfgwtr_el2",        CPENC (3,4,C1,C1,5),    0),
5135  SR_V8_6 ("hfgitr_el2",        CPENC (3,4,C1,C1,6),    0),
5136  SR_V8_6 ("hdfgrtr_el2",       CPENC (3,4,C3,C1,4),    0),
5137  SR_V8_6 ("hdfgwtr_el2",       CPENC (3,4,C3,C1,5),    0),
5138  SR_V8_6 ("hafgrtr_el2",       CPENC (3,4,C3,C1,6),    0),
5139  SR_V8_6 ("amevcntvoff00_el2", CPENC (3,4,C13,C8,0),   0),
5140  SR_V8_6 ("amevcntvoff01_el2", CPENC (3,4,C13,C8,1),   0),
5141  SR_V8_6 ("amevcntvoff02_el2", CPENC (3,4,C13,C8,2),   0),
5142  SR_V8_6 ("amevcntvoff03_el2", CPENC (3,4,C13,C8,3),   0),
5143  SR_V8_6 ("amevcntvoff04_el2", CPENC (3,4,C13,C8,4),   0),
5144  SR_V8_6 ("amevcntvoff05_el2", CPENC (3,4,C13,C8,5),   0),
5145  SR_V8_6 ("amevcntvoff06_el2", CPENC (3,4,C13,C8,6),   0),
5146  SR_V8_6 ("amevcntvoff07_el2", CPENC (3,4,C13,C8,7),   0),
5147  SR_V8_6 ("amevcntvoff08_el2", CPENC (3,4,C13,C9,0),   0),
5148  SR_V8_6 ("amevcntvoff09_el2", CPENC (3,4,C13,C9,1),   0),
5149  SR_V8_6 ("amevcntvoff010_el2", CPENC (3,4,C13,C9,2),  0),
5150  SR_V8_6 ("amevcntvoff011_el2", CPENC (3,4,C13,C9,3),  0),
5151  SR_V8_6 ("amevcntvoff012_el2", CPENC (3,4,C13,C9,4),  0),
5152  SR_V8_6 ("amevcntvoff013_el2", CPENC (3,4,C13,C9,5),  0),
5153  SR_V8_6 ("amevcntvoff014_el2", CPENC (3,4,C13,C9,6),  0),
5154  SR_V8_6 ("amevcntvoff015_el2", CPENC (3,4,C13,C9,7),  0),
5155  SR_V8_6 ("amevcntvoff10_el2", CPENC (3,4,C13,C10,0),  0),
5156  SR_V8_6 ("amevcntvoff11_el2", CPENC (3,4,C13,C10,1),  0),
5157  SR_V8_6 ("amevcntvoff12_el2", CPENC (3,4,C13,C10,2),  0),
5158  SR_V8_6 ("amevcntvoff13_el2", CPENC (3,4,C13,C10,3),  0),
5159  SR_V8_6 ("amevcntvoff14_el2", CPENC (3,4,C13,C10,4),  0),
5160  SR_V8_6 ("amevcntvoff15_el2", CPENC (3,4,C13,C10,5),  0),
5161  SR_V8_6 ("amevcntvoff16_el2", CPENC (3,4,C13,C10,6),  0),
5162  SR_V8_6 ("amevcntvoff17_el2", CPENC (3,4,C13,C10,7),  0),
5163  SR_V8_6 ("amevcntvoff18_el2", CPENC (3,4,C13,C11,0),  0),
5164  SR_V8_6 ("amevcntvoff19_el2", CPENC (3,4,C13,C11,1),  0),
5165  SR_V8_6 ("amevcntvoff110_el2", CPENC (3,4,C13,C11,2), 0),
5166  SR_V8_6 ("amevcntvoff111_el2", CPENC (3,4,C13,C11,3), 0),
5167  SR_V8_6 ("amevcntvoff112_el2", CPENC (3,4,C13,C11,4), 0),
5168  SR_V8_6 ("amevcntvoff113_el2", CPENC (3,4,C13,C11,5), 0),
5169  SR_V8_6 ("amevcntvoff114_el2", CPENC (3,4,C13,C11,6), 0),
5170  SR_V8_6 ("amevcntvoff115_el2", CPENC (3,4,C13,C11,7), 0),
5171  SR_V8_6 ("cntpoff_el2",       CPENC (3,4,C14,C0,6),   0),
5172
5173  SR_V8_7 ("pmsnevfr_el1",      CPENC (3,0,C9,C9,1),    0),
5174  SR_V8_7 ("hcrx_el2",          CPENC (3,4,C1,C2,2),    0),
5175
5176  SR_V8_8 ("allint",            CPENC (3,0,C4,C3,0),    0),
5177  SR_V8_8 ("icc_nmiar1_el1",    CPENC (3,0,C12,C9,5),   F_REG_READ),
5178
5179  { 0, CPENC (0,0,0,0,0), 0, 0 }
5180};
5181
5182bool
5183aarch64_sys_reg_deprecated_p (const uint32_t reg_flags)
5184{
5185  return (reg_flags & F_DEPRECATED) != 0;
5186}
5187
5188/* The CPENC below is fairly misleading, the fields
5189   here are not in CPENC form. They are in op2op1 form. The fields are encoded
5190   by ins_pstatefield, which just shifts the value by the width of the fields
5191   in a loop. So if you CPENC them only the first value will be set, the rest
5192   are masked out to 0. As an example. op2 = 3, op1=2. CPENC would produce a
5193   value of 0b110000000001000000 (0x30040) while what you want is
5194   0b011010 (0x1a).  */
5195const aarch64_sys_reg aarch64_pstatefields [] =
5196{
5197  SR_CORE ("spsel",	  0x05,	F_REG_MAX_VALUE (1)),
5198  SR_CORE ("daifset",	  0x1e,	F_REG_MAX_VALUE (15)),
5199  SR_CORE ("daifclr",	  0x1f,	F_REG_MAX_VALUE (15)),
5200  SR_PAN  ("pan",	  0x04, F_REG_MAX_VALUE (1)),
5201  SR_V8_2 ("uao",	  0x03, F_REG_MAX_VALUE (1)),
5202  SR_SSBS ("ssbs",	  0x19, F_REG_MAX_VALUE (1)),
5203  SR_V8_4 ("dit",	  0x1a,	F_REG_MAX_VALUE (1)),
5204  SR_MEMTAG ("tco",	  0x1c,	F_REG_MAX_VALUE (1)),
5205  SR_SME  ("svcrsm",	  0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x2,0x1)
5206				| F_REG_MAX_VALUE (1)),
5207  SR_SME  ("svcrza",	  0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x4,0x1)
5208				| F_REG_MAX_VALUE (1)),
5209  SR_SME  ("svcrsmza",	  0x1b, PSTATE_ENCODE_CRM_AND_IMM(0x6,0x1)
5210				| F_REG_MAX_VALUE (1)),
5211  SR_V8_8 ("allint",	  0x08,	F_REG_MAX_VALUE (1)),
5212  { 0,	  CPENC (0,0,0,0,0), 0, 0 },
5213};
5214
5215bool
5216aarch64_pstatefield_supported_p (const aarch64_feature_set features,
5217				 const aarch64_sys_reg *reg)
5218{
5219  if (!(reg->flags & F_ARCHEXT))
5220    return true;
5221
5222  return AARCH64_CPU_HAS_ALL_FEATURES (features, reg->features);
5223}
5224
5225const aarch64_sys_ins_reg aarch64_sys_regs_ic[] =
5226{
5227    { "ialluis", CPENS(0,C7,C1,0), 0 },
5228    { "iallu",   CPENS(0,C7,C5,0), 0 },
5229    { "ivau",    CPENS (3, C7, C5, 1), F_HASXT },
5230    { 0, CPENS(0,0,0,0), 0 }
5231};
5232
5233const aarch64_sys_ins_reg aarch64_sys_regs_dc[] =
5234{
5235    { "zva",	    CPENS (3, C7, C4, 1),  F_HASXT },
5236    { "gva",	    CPENS (3, C7, C4, 3),  F_HASXT | F_ARCHEXT },
5237    { "gzva",	    CPENS (3, C7, C4, 4),  F_HASXT | F_ARCHEXT },
5238    { "ivac",       CPENS (0, C7, C6, 1),  F_HASXT },
5239    { "igvac",      CPENS (0, C7, C6, 3),  F_HASXT | F_ARCHEXT },
5240    { "igsw",       CPENS (0, C7, C6, 4),  F_HASXT | F_ARCHEXT },
5241    { "isw",	    CPENS (0, C7, C6, 2),  F_HASXT },
5242    { "igdvac",	    CPENS (0, C7, C6, 5),  F_HASXT | F_ARCHEXT },
5243    { "igdsw",	    CPENS (0, C7, C6, 6),  F_HASXT | F_ARCHEXT },
5244    { "cvac",       CPENS (3, C7, C10, 1), F_HASXT },
5245    { "cgvac",      CPENS (3, C7, C10, 3), F_HASXT | F_ARCHEXT },
5246    { "cgdvac",     CPENS (3, C7, C10, 5), F_HASXT | F_ARCHEXT },
5247    { "csw",	    CPENS (0, C7, C10, 2), F_HASXT },
5248    { "cgsw",       CPENS (0, C7, C10, 4), F_HASXT | F_ARCHEXT },
5249    { "cgdsw",	    CPENS (0, C7, C10, 6), F_HASXT | F_ARCHEXT },
5250    { "cvau",       CPENS (3, C7, C11, 1), F_HASXT },
5251    { "cvap",       CPENS (3, C7, C12, 1), F_HASXT | F_ARCHEXT },
5252    { "cgvap",      CPENS (3, C7, C12, 3), F_HASXT | F_ARCHEXT },
5253    { "cgdvap",     CPENS (3, C7, C12, 5), F_HASXT | F_ARCHEXT },
5254    { "cvadp",      CPENS (3, C7, C13, 1), F_HASXT | F_ARCHEXT },
5255    { "cgvadp",     CPENS (3, C7, C13, 3), F_HASXT | F_ARCHEXT },
5256    { "cgdvadp",    CPENS (3, C7, C13, 5), F_HASXT | F_ARCHEXT },
5257    { "civac",      CPENS (3, C7, C14, 1), F_HASXT },
5258    { "cigvac",     CPENS (3, C7, C14, 3), F_HASXT | F_ARCHEXT },
5259    { "cigdvac",    CPENS (3, C7, C14, 5), F_HASXT | F_ARCHEXT },
5260    { "cisw",       CPENS (0, C7, C14, 2), F_HASXT },
5261    { "cigsw",      CPENS (0, C7, C14, 4), F_HASXT | F_ARCHEXT },
5262    { "cigdsw",     CPENS (0, C7, C14, 6), F_HASXT | F_ARCHEXT },
5263    { "cipapa",     CPENS (6, C7, C14, 1), F_HASXT },
5264    { "cigdpapa",   CPENS (6, C7, C14, 5), F_HASXT },
5265    { 0,       CPENS(0,0,0,0), 0 }
5266};
5267
5268const aarch64_sys_ins_reg aarch64_sys_regs_at[] =
5269{
5270    { "s1e1r",      CPENS (0, C7, C8, 0), F_HASXT },
5271    { "s1e1w",      CPENS (0, C7, C8, 1), F_HASXT },
5272    { "s1e0r",      CPENS (0, C7, C8, 2), F_HASXT },
5273    { "s1e0w",      CPENS (0, C7, C8, 3), F_HASXT },
5274    { "s12e1r",     CPENS (4, C7, C8, 4), F_HASXT },
5275    { "s12e1w",     CPENS (4, C7, C8, 5), F_HASXT },
5276    { "s12e0r",     CPENS (4, C7, C8, 6), F_HASXT },
5277    { "s12e0w",     CPENS (4, C7, C8, 7), F_HASXT },
5278    { "s1e2r",      CPENS (4, C7, C8, 0), F_HASXT },
5279    { "s1e2w",      CPENS (4, C7, C8, 1), F_HASXT },
5280    { "s1e3r",      CPENS (6, C7, C8, 0), F_HASXT },
5281    { "s1e3w",      CPENS (6, C7, C8, 1), F_HASXT },
5282    { "s1e1rp",     CPENS (0, C7, C9, 0), F_HASXT | F_ARCHEXT },
5283    { "s1e1wp",     CPENS (0, C7, C9, 1), F_HASXT | F_ARCHEXT },
5284    { 0,       CPENS(0,0,0,0), 0 }
5285};
5286
5287const aarch64_sys_ins_reg aarch64_sys_regs_tlbi[] =
5288{
5289    { "vmalle1",   CPENS(0,C8,C7,0), 0 },
5290    { "vae1",      CPENS (0, C8, C7, 1), F_HASXT },
5291    { "aside1",    CPENS (0, C8, C7, 2), F_HASXT },
5292    { "vaae1",     CPENS (0, C8, C7, 3), F_HASXT },
5293    { "vmalle1is", CPENS(0,C8,C3,0), 0 },
5294    { "vae1is",    CPENS (0, C8, C3, 1), F_HASXT },
5295    { "aside1is",  CPENS (0, C8, C3, 2), F_HASXT },
5296    { "vaae1is",   CPENS (0, C8, C3, 3), F_HASXT },
5297    { "ipas2e1is", CPENS (4, C8, C0, 1), F_HASXT },
5298    { "ipas2le1is",CPENS (4, C8, C0, 5), F_HASXT },
5299    { "ipas2e1",   CPENS (4, C8, C4, 1), F_HASXT },
5300    { "ipas2le1",  CPENS (4, C8, C4, 5), F_HASXT },
5301    { "vae2",      CPENS (4, C8, C7, 1), F_HASXT },
5302    { "vae2is",    CPENS (4, C8, C3, 1), F_HASXT },
5303    { "vmalls12e1",CPENS(4,C8,C7,6), 0 },
5304    { "vmalls12e1is",CPENS(4,C8,C3,6), 0 },
5305    { "vae3",      CPENS (6, C8, C7, 1), F_HASXT },
5306    { "vae3is",    CPENS (6, C8, C3, 1), F_HASXT },
5307    { "alle2",     CPENS(4,C8,C7,0), 0 },
5308    { "alle2is",   CPENS(4,C8,C3,0), 0 },
5309    { "alle1",     CPENS(4,C8,C7,4), 0 },
5310    { "alle1is",   CPENS(4,C8,C3,4), 0 },
5311    { "alle3",     CPENS(6,C8,C7,0), 0 },
5312    { "alle3is",   CPENS(6,C8,C3,0), 0 },
5313    { "vale1is",   CPENS (0, C8, C3, 5), F_HASXT },
5314    { "vale2is",   CPENS (4, C8, C3, 5), F_HASXT },
5315    { "vale3is",   CPENS (6, C8, C3, 5), F_HASXT },
5316    { "vaale1is",  CPENS (0, C8, C3, 7), F_HASXT },
5317    { "vale1",     CPENS (0, C8, C7, 5), F_HASXT },
5318    { "vale2",     CPENS (4, C8, C7, 5), F_HASXT },
5319    { "vale3",     CPENS (6, C8, C7, 5), F_HASXT },
5320    { "vaale1",    CPENS (0, C8, C7, 7), F_HASXT },
5321
5322    { "vmalle1os",    CPENS (0, C8, C1, 0), F_ARCHEXT },
5323    { "vae1os",       CPENS (0, C8, C1, 1), F_HASXT | F_ARCHEXT },
5324    { "aside1os",     CPENS (0, C8, C1, 2), F_HASXT | F_ARCHEXT },
5325    { "vaae1os",      CPENS (0, C8, C1, 3), F_HASXT | F_ARCHEXT },
5326    { "vale1os",      CPENS (0, C8, C1, 5), F_HASXT | F_ARCHEXT },
5327    { "vaale1os",     CPENS (0, C8, C1, 7), F_HASXT | F_ARCHEXT },
5328    { "ipas2e1os",    CPENS (4, C8, C4, 0), F_HASXT | F_ARCHEXT },
5329    { "ipas2le1os",   CPENS (4, C8, C4, 4), F_HASXT | F_ARCHEXT },
5330    { "vae2os",       CPENS (4, C8, C1, 1), F_HASXT | F_ARCHEXT },
5331    { "vale2os",      CPENS (4, C8, C1, 5), F_HASXT | F_ARCHEXT },
5332    { "vmalls12e1os", CPENS (4, C8, C1, 6), F_ARCHEXT },
5333    { "vae3os",       CPENS (6, C8, C1, 1), F_HASXT | F_ARCHEXT },
5334    { "vale3os",      CPENS (6, C8, C1, 5), F_HASXT | F_ARCHEXT },
5335    { "alle2os",      CPENS (4, C8, C1, 0), F_ARCHEXT },
5336    { "alle1os",      CPENS (4, C8, C1, 4), F_ARCHEXT },
5337    { "alle3os",      CPENS (6, C8, C1, 0), F_ARCHEXT },
5338
5339    { "rvae1",      CPENS (0, C8, C6, 1), F_HASXT | F_ARCHEXT },
5340    { "rvaae1",     CPENS (0, C8, C6, 3), F_HASXT | F_ARCHEXT },
5341    { "rvale1",     CPENS (0, C8, C6, 5), F_HASXT | F_ARCHEXT },
5342    { "rvaale1",    CPENS (0, C8, C6, 7), F_HASXT | F_ARCHEXT },
5343    { "rvae1is",    CPENS (0, C8, C2, 1), F_HASXT | F_ARCHEXT },
5344    { "rvaae1is",   CPENS (0, C8, C2, 3), F_HASXT | F_ARCHEXT },
5345    { "rvale1is",   CPENS (0, C8, C2, 5), F_HASXT | F_ARCHEXT },
5346    { "rvaale1is",  CPENS (0, C8, C2, 7), F_HASXT | F_ARCHEXT },
5347    { "rvae1os",    CPENS (0, C8, C5, 1), F_HASXT | F_ARCHEXT },
5348    { "rvaae1os",   CPENS (0, C8, C5, 3), F_HASXT | F_ARCHEXT },
5349    { "rvale1os",   CPENS (0, C8, C5, 5), F_HASXT | F_ARCHEXT },
5350    { "rvaale1os",  CPENS (0, C8, C5, 7), F_HASXT | F_ARCHEXT },
5351    { "ripas2e1is", CPENS (4, C8, C0, 2), F_HASXT | F_ARCHEXT },
5352    { "ripas2le1is",CPENS (4, C8, C0, 6), F_HASXT | F_ARCHEXT },
5353    { "ripas2e1",   CPENS (4, C8, C4, 2), F_HASXT | F_ARCHEXT },
5354    { "ripas2le1",  CPENS (4, C8, C4, 6), F_HASXT | F_ARCHEXT },
5355    { "ripas2e1os", CPENS (4, C8, C4, 3), F_HASXT | F_ARCHEXT },
5356    { "ripas2le1os",CPENS (4, C8, C4, 7), F_HASXT | F_ARCHEXT },
5357    { "rvae2",      CPENS (4, C8, C6, 1), F_HASXT | F_ARCHEXT },
5358    { "rvale2",     CPENS (4, C8, C6, 5), F_HASXT | F_ARCHEXT },
5359    { "rvae2is",    CPENS (4, C8, C2, 1), F_HASXT | F_ARCHEXT },
5360    { "rvale2is",   CPENS (4, C8, C2, 5), F_HASXT | F_ARCHEXT },
5361    { "rvae2os",    CPENS (4, C8, C5, 1), F_HASXT | F_ARCHEXT },
5362    { "rvale2os",   CPENS (4, C8, C5, 5), F_HASXT | F_ARCHEXT },
5363    { "rvae3",      CPENS (6, C8, C6, 1), F_HASXT | F_ARCHEXT },
5364    { "rvale3",     CPENS (6, C8, C6, 5), F_HASXT | F_ARCHEXT },
5365    { "rvae3is",    CPENS (6, C8, C2, 1), F_HASXT | F_ARCHEXT },
5366    { "rvale3is",   CPENS (6, C8, C2, 5), F_HASXT | F_ARCHEXT },
5367    { "rvae3os",    CPENS (6, C8, C5, 1), F_HASXT | F_ARCHEXT },
5368    { "rvale3os",   CPENS (6, C8, C5, 5), F_HASXT | F_ARCHEXT },
5369
5370    { "rpaos",      CPENS (6, C8, C4, 3), F_HASXT },
5371    { "rpalos",     CPENS (6, C8, C4, 7), F_HASXT },
5372    { "paallos",    CPENS (6, C8, C1, 4), 0},
5373    { "paall",      CPENS (6, C8, C7, 4), 0},
5374
5375    { 0,       CPENS(0,0,0,0), 0 }
5376};
5377
5378const aarch64_sys_ins_reg aarch64_sys_regs_sr[] =
5379{
5380    /* RCTX is somewhat unique in a way that it has different values
5381       (op2) based on the instruction in which it is used (cfp/dvp/cpp).
5382       Thus op2 is masked out and instead encoded directly in the
5383       aarch64_opcode_table entries for the respective instructions.  */
5384    { "rctx",   CPENS(3,C7,C3,0), F_HASXT | F_ARCHEXT | F_REG_WRITE}, /* WO */
5385
5386    { 0,       CPENS(0,0,0,0), 0 }
5387};
5388
5389bool
5390aarch64_sys_ins_reg_has_xt (const aarch64_sys_ins_reg *sys_ins_reg)
5391{
5392  return (sys_ins_reg->flags & F_HASXT) != 0;
5393}
5394
5395extern bool
5396aarch64_sys_ins_reg_supported_p (const aarch64_feature_set features,
5397		 const char *reg_name,
5398                 aarch64_insn reg_value,
5399                 uint32_t reg_flags,
5400                 aarch64_feature_set reg_features)
5401{
5402  /* Armv8-R has no EL3.  */
5403  if (AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_R))
5404    {
5405      const char *suffix = strrchr (reg_name, '_');
5406      if (suffix && !strcmp (suffix, "_el3"))
5407	return false;
5408    }
5409
5410  if (!(reg_flags & F_ARCHEXT))
5411    return true;
5412
5413  if (reg_features
5414      && AARCH64_CPU_HAS_ALL_FEATURES (features, reg_features))
5415    return true;
5416
5417  /* ARMv8.4 TLB instructions.  */
5418  if ((reg_value == CPENS (0, C8, C1, 0)
5419       || reg_value == CPENS (0, C8, C1, 1)
5420       || reg_value == CPENS (0, C8, C1, 2)
5421       || reg_value == CPENS (0, C8, C1, 3)
5422       || reg_value == CPENS (0, C8, C1, 5)
5423       || reg_value == CPENS (0, C8, C1, 7)
5424       || reg_value == CPENS (4, C8, C4, 0)
5425       || reg_value == CPENS (4, C8, C4, 4)
5426       || reg_value == CPENS (4, C8, C1, 1)
5427       || reg_value == CPENS (4, C8, C1, 5)
5428       || reg_value == CPENS (4, C8, C1, 6)
5429       || reg_value == CPENS (6, C8, C1, 1)
5430       || reg_value == CPENS (6, C8, C1, 5)
5431       || reg_value == CPENS (4, C8, C1, 0)
5432       || reg_value == CPENS (4, C8, C1, 4)
5433       || reg_value == CPENS (6, C8, C1, 0)
5434       || reg_value == CPENS (0, C8, C6, 1)
5435       || reg_value == CPENS (0, C8, C6, 3)
5436       || reg_value == CPENS (0, C8, C6, 5)
5437       || reg_value == CPENS (0, C8, C6, 7)
5438       || reg_value == CPENS (0, C8, C2, 1)
5439       || reg_value == CPENS (0, C8, C2, 3)
5440       || reg_value == CPENS (0, C8, C2, 5)
5441       || reg_value == CPENS (0, C8, C2, 7)
5442       || reg_value == CPENS (0, C8, C5, 1)
5443       || reg_value == CPENS (0, C8, C5, 3)
5444       || reg_value == CPENS (0, C8, C5, 5)
5445       || reg_value == CPENS (0, C8, C5, 7)
5446       || reg_value == CPENS (4, C8, C0, 2)
5447       || reg_value == CPENS (4, C8, C0, 6)
5448       || reg_value == CPENS (4, C8, C4, 2)
5449       || reg_value == CPENS (4, C8, C4, 6)
5450       || reg_value == CPENS (4, C8, C4, 3)
5451       || reg_value == CPENS (4, C8, C4, 7)
5452       || reg_value == CPENS (4, C8, C6, 1)
5453       || reg_value == CPENS (4, C8, C6, 5)
5454       || reg_value == CPENS (4, C8, C2, 1)
5455       || reg_value == CPENS (4, C8, C2, 5)
5456       || reg_value == CPENS (4, C8, C5, 1)
5457       || reg_value == CPENS (4, C8, C5, 5)
5458       || reg_value == CPENS (6, C8, C6, 1)
5459       || reg_value == CPENS (6, C8, C6, 5)
5460       || reg_value == CPENS (6, C8, C2, 1)
5461       || reg_value == CPENS (6, C8, C2, 5)
5462       || reg_value == CPENS (6, C8, C5, 1)
5463       || reg_value == CPENS (6, C8, C5, 5))
5464      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_4))
5465    return true;
5466
5467  /* DC CVAP.  Values are from aarch64_sys_regs_dc.  */
5468  if (reg_value == CPENS (3, C7, C12, 1)
5469      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5470    return true;
5471
5472  /* DC CVADP.  Values are from aarch64_sys_regs_dc.  */
5473  if (reg_value == CPENS (3, C7, C13, 1)
5474      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_CVADP))
5475    return true;
5476
5477  /* DC <dc_op> for ARMv8.5-A Memory Tagging Extension.  */
5478  if ((reg_value == CPENS (0, C7, C6, 3)
5479       || reg_value == CPENS (0, C7, C6, 4)
5480       || reg_value == CPENS (0, C7, C10, 4)
5481       || reg_value == CPENS (0, C7, C14, 4)
5482       || reg_value == CPENS (3, C7, C10, 3)
5483       || reg_value == CPENS (3, C7, C12, 3)
5484       || reg_value == CPENS (3, C7, C13, 3)
5485       || reg_value == CPENS (3, C7, C14, 3)
5486       || reg_value == CPENS (3, C7, C4, 3)
5487       || reg_value == CPENS (0, C7, C6, 5)
5488       || reg_value == CPENS (0, C7, C6, 6)
5489       || reg_value == CPENS (0, C7, C10, 6)
5490       || reg_value == CPENS (0, C7, C14, 6)
5491       || reg_value == CPENS (3, C7, C10, 5)
5492       || reg_value == CPENS (3, C7, C12, 5)
5493       || reg_value == CPENS (3, C7, C13, 5)
5494       || reg_value == CPENS (3, C7, C14, 5)
5495       || reg_value == CPENS (3, C7, C4, 4))
5496      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_MEMTAG))
5497    return true;
5498
5499  /* AT S1E1RP, AT S1E1WP.  Values are from aarch64_sys_regs_at.  */
5500  if ((reg_value == CPENS (0, C7, C9, 0)
5501       || reg_value == CPENS (0, C7, C9, 1))
5502      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_V8_2))
5503    return true;
5504
5505  /* CFP/DVP/CPP RCTX : Value are from aarch64_sys_regs_sr. */
5506  if (reg_value == CPENS (3, C7, C3, 0)
5507      && AARCH64_CPU_HAS_FEATURE (features, AARCH64_FEATURE_PREDRES))
5508    return true;
5509
5510  return false;
5511}
5512
5513#undef C0
5514#undef C1
5515#undef C2
5516#undef C3
5517#undef C4
5518#undef C5
5519#undef C6
5520#undef C7
5521#undef C8
5522#undef C9
5523#undef C10
5524#undef C11
5525#undef C12
5526#undef C13
5527#undef C14
5528#undef C15
5529
5530#define BIT(INSN,BT)     (((INSN) >> (BT)) & 1)
5531#define BITS(INSN,HI,LO) (((INSN) >> (LO)) & ((1 << (((HI) - (LO)) + 1)) - 1))
5532
5533static enum err_type
5534verify_ldpsw (const struct aarch64_inst *inst ATTRIBUTE_UNUSED,
5535	      const aarch64_insn insn, bfd_vma pc ATTRIBUTE_UNUSED,
5536	      bool encoding ATTRIBUTE_UNUSED,
5537	      aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5538	      aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5539{
5540  int t  = BITS (insn, 4, 0);
5541  int n  = BITS (insn, 9, 5);
5542  int t2 = BITS (insn, 14, 10);
5543
5544  if (BIT (insn, 23))
5545    {
5546      /* Write back enabled.  */
5547      if ((t == n || t2 == n) && n != 31)
5548	return ERR_UND;
5549    }
5550
5551  if (BIT (insn, 22))
5552    {
5553      /* Load */
5554      if (t == t2)
5555	return ERR_UND;
5556    }
5557
5558  return ERR_OK;
5559}
5560
5561/* Verifier for vector by element 3 operands functions where the
5562   conditions `if sz:L == 11 then UNDEFINED` holds.  */
5563
5564static enum err_type
5565verify_elem_sd (const struct aarch64_inst *inst, const aarch64_insn insn,
5566		bfd_vma pc ATTRIBUTE_UNUSED, bool encoding,
5567		aarch64_operand_error *mismatch_detail ATTRIBUTE_UNUSED,
5568		aarch64_instr_sequence *insn_sequence ATTRIBUTE_UNUSED)
5569{
5570  const aarch64_insn undef_pattern = 0x3;
5571  aarch64_insn value;
5572
5573  assert (inst->opcode);
5574  assert (inst->opcode->operands[2] == AARCH64_OPND_Em);
5575  value = encoding ? inst->value : insn;
5576  assert (value);
5577
5578  if (undef_pattern == extract_fields (value, 0, 2, FLD_sz, FLD_L))
5579    return ERR_UND;
5580
5581  return ERR_OK;
5582}
5583
5584/* Check an instruction that takes three register operands and that
5585   requires the register numbers to be distinct from one another.  */
5586
5587static enum err_type
5588verify_three_different_regs (const struct aarch64_inst *inst,
5589			     const aarch64_insn insn ATTRIBUTE_UNUSED,
5590			     bfd_vma pc ATTRIBUTE_UNUSED,
5591			     bool encoding ATTRIBUTE_UNUSED,
5592			     aarch64_operand_error *mismatch_detail
5593			       ATTRIBUTE_UNUSED,
5594			     aarch64_instr_sequence *insn_sequence
5595			       ATTRIBUTE_UNUSED)
5596{
5597  int rd, rs, rn;
5598
5599  rd = inst->operands[0].reg.regno;
5600  rs = inst->operands[1].reg.regno;
5601  rn = inst->operands[2].reg.regno;
5602  if (rd == rs || rd == rn || rs == rn)
5603    {
5604      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5605      mismatch_detail->error
5606	= _("the three register operands must be distinct from one another");
5607      mismatch_detail->index = -1;
5608      return ERR_UND;
5609    }
5610
5611  return ERR_OK;
5612}
5613
5614/* Add INST to the end of INSN_SEQUENCE.  */
5615
5616static void
5617add_insn_to_sequence (const struct aarch64_inst *inst,
5618		      aarch64_instr_sequence *insn_sequence)
5619{
5620  insn_sequence->instr[insn_sequence->num_added_insns++] = *inst;
5621}
5622
5623/* Initialize an instruction sequence insn_sequence with the instruction INST.
5624   If INST is NULL the given insn_sequence is cleared and the sequence is left
5625   uninitialized.  */
5626
5627void
5628init_insn_sequence (const struct aarch64_inst *inst,
5629		    aarch64_instr_sequence *insn_sequence)
5630{
5631  int num_req_entries = 0;
5632
5633  if (insn_sequence->instr)
5634    {
5635      XDELETE (insn_sequence->instr);
5636      insn_sequence->instr = NULL;
5637    }
5638
5639  /* Handle all the cases here.  May need to think of something smarter than
5640     a giant if/else chain if this grows.  At that time, a lookup table may be
5641     best.  */
5642  if (inst && inst->opcode->constraints & C_SCAN_MOVPRFX)
5643    num_req_entries = 1;
5644  if (inst && (inst->opcode->constraints & C_SCAN_MOPS_PME) == C_SCAN_MOPS_P)
5645    num_req_entries = 2;
5646
5647  insn_sequence->num_added_insns = 0;
5648  insn_sequence->num_allocated_insns = num_req_entries;
5649
5650  if (num_req_entries != 0)
5651    {
5652      insn_sequence->instr = XCNEWVEC (aarch64_inst, num_req_entries);
5653      add_insn_to_sequence (inst, insn_sequence);
5654    }
5655}
5656
5657/* Subroutine of verify_constraints.  Check whether the instruction
5658   is part of a MOPS P/M/E sequence and, if so, whether sequencing
5659   expectations are met.  Return true if the check passes, otherwise
5660   describe the problem in MISMATCH_DETAIL.
5661
5662   IS_NEW_SECTION is true if INST is assumed to start a new section.
5663   The other arguments are as for verify_constraints.  */
5664
5665static bool
5666verify_mops_pme_sequence (const struct aarch64_inst *inst,
5667			  bool is_new_section,
5668			  aarch64_operand_error *mismatch_detail,
5669			  aarch64_instr_sequence *insn_sequence)
5670{
5671  const struct aarch64_opcode *opcode;
5672  const struct aarch64_inst *prev_insn;
5673  int i;
5674
5675  opcode = inst->opcode;
5676  if (insn_sequence->instr)
5677    prev_insn = insn_sequence->instr + (insn_sequence->num_added_insns - 1);
5678  else
5679    prev_insn = NULL;
5680
5681  if (prev_insn
5682      && (prev_insn->opcode->constraints & C_SCAN_MOPS_PME)
5683      && prev_insn->opcode != opcode - 1)
5684    {
5685      mismatch_detail->kind = AARCH64_OPDE_EXPECTED_A_AFTER_B;
5686      mismatch_detail->error = NULL;
5687      mismatch_detail->index = -1;
5688      mismatch_detail->data[0].s = prev_insn->opcode[1].name;
5689      mismatch_detail->data[1].s = prev_insn->opcode->name;
5690      mismatch_detail->non_fatal = true;
5691      return false;
5692    }
5693
5694  if (opcode->constraints & C_SCAN_MOPS_PME)
5695    {
5696      if (is_new_section || !prev_insn || prev_insn->opcode != opcode - 1)
5697	{
5698	  mismatch_detail->kind = AARCH64_OPDE_A_SHOULD_FOLLOW_B;
5699	  mismatch_detail->error = NULL;
5700	  mismatch_detail->index = -1;
5701	  mismatch_detail->data[0].s = opcode->name;
5702	  mismatch_detail->data[1].s = opcode[-1].name;
5703	  mismatch_detail->non_fatal = true;
5704	  return false;
5705	}
5706
5707      for (i = 0; i < 3; ++i)
5708	/* There's no specific requirement for the data register to be
5709	   the same between consecutive SET* instructions.  */
5710	if ((opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd
5711	     || opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs
5712	     || opcode->operands[i] == AARCH64_OPND_MOPS_WB_Rn)
5713	    && prev_insn->operands[i].reg.regno != inst->operands[i].reg.regno)
5714	  {
5715	    mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5716	    if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rd)
5717	      mismatch_detail->error = _("destination register differs from "
5718					 "preceding instruction");
5719	    else if (opcode->operands[i] == AARCH64_OPND_MOPS_ADDR_Rs)
5720	      mismatch_detail->error = _("source register differs from "
5721					 "preceding instruction");
5722	    else
5723	      mismatch_detail->error = _("size register differs from "
5724					 "preceding instruction");
5725	    mismatch_detail->index = i;
5726	    mismatch_detail->non_fatal = true;
5727	    return false;
5728	  }
5729    }
5730
5731  return true;
5732}
5733
5734/*  This function verifies that the instruction INST adheres to its specified
5735    constraints.  If it does then ERR_OK is returned, if not then ERR_VFI is
5736    returned and MISMATCH_DETAIL contains the reason why verification failed.
5737
5738    The function is called both during assembly and disassembly.  If assembling
5739    then ENCODING will be TRUE, else FALSE.  If dissassembling PC will be set
5740    and will contain the PC of the current instruction w.r.t to the section.
5741
5742    If ENCODING and PC=0 then you are at a start of a section.  The constraints
5743    are verified against the given state insn_sequence which is updated as it
5744    transitions through the verification.  */
5745
5746enum err_type
5747verify_constraints (const struct aarch64_inst *inst,
5748		    const aarch64_insn insn ATTRIBUTE_UNUSED,
5749		    bfd_vma pc,
5750		    bool encoding,
5751		    aarch64_operand_error *mismatch_detail,
5752		    aarch64_instr_sequence *insn_sequence)
5753{
5754  assert (inst);
5755  assert (inst->opcode);
5756
5757  const struct aarch64_opcode *opcode = inst->opcode;
5758  if (!opcode->constraints && !insn_sequence->instr)
5759    return ERR_OK;
5760
5761  assert (insn_sequence);
5762
5763  enum err_type res = ERR_OK;
5764
5765  /* This instruction puts a constraint on the insn_sequence.  */
5766  if (opcode->flags & F_SCAN)
5767    {
5768      if (insn_sequence->instr)
5769	{
5770	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5771	  mismatch_detail->error = _("instruction opens new dependency "
5772				     "sequence without ending previous one");
5773	  mismatch_detail->index = -1;
5774	  mismatch_detail->non_fatal = true;
5775	  res = ERR_VFI;
5776	}
5777
5778      init_insn_sequence (inst, insn_sequence);
5779      return res;
5780    }
5781
5782  bool is_new_section = (!encoding && pc == 0);
5783  if (!verify_mops_pme_sequence (inst, is_new_section, mismatch_detail,
5784				 insn_sequence))
5785    {
5786      res = ERR_VFI;
5787      if ((opcode->constraints & C_SCAN_MOPS_PME) != C_SCAN_MOPS_M)
5788	init_insn_sequence (NULL, insn_sequence);
5789    }
5790
5791  /* Verify constraints on an existing sequence.  */
5792  if (insn_sequence->instr)
5793    {
5794      const struct aarch64_opcode* inst_opcode = insn_sequence->instr->opcode;
5795      /* If we're decoding and we hit PC=0 with an open sequence then we haven't
5796	 closed a previous one that we should have.  */
5797      if (is_new_section && res == ERR_OK)
5798	{
5799	  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5800	  mismatch_detail->error = _("previous `movprfx' sequence not closed");
5801	  mismatch_detail->index = -1;
5802	  mismatch_detail->non_fatal = true;
5803	  res = ERR_VFI;
5804	  /* Reset the sequence.  */
5805	  init_insn_sequence (NULL, insn_sequence);
5806	  return res;
5807	}
5808
5809      /* Validate C_SCAN_MOVPRFX constraints.  Move this to a lookup table.  */
5810      if (inst_opcode->constraints & C_SCAN_MOVPRFX)
5811	{
5812	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5813	     instruction for better error messages.  */
5814	  if (!opcode->avariant
5815	      || !(*opcode->avariant &
5816		   (AARCH64_FEATURE_SVE | AARCH64_FEATURE_SVE2)))
5817	    {
5818	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5819	      mismatch_detail->error = _("SVE instruction expected after "
5820					 "`movprfx'");
5821	      mismatch_detail->index = -1;
5822	      mismatch_detail->non_fatal = true;
5823	      res = ERR_VFI;
5824	      goto done;
5825	    }
5826
5827	  /* Check to see if the MOVPRFX SVE instruction is followed by an SVE
5828	     instruction that is allowed to be used with a MOVPRFX.  */
5829	  if (!(opcode->constraints & C_SCAN_MOVPRFX))
5830	    {
5831	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5832	      mismatch_detail->error = _("SVE `movprfx' compatible instruction "
5833					 "expected");
5834	      mismatch_detail->index = -1;
5835	      mismatch_detail->non_fatal = true;
5836	      res = ERR_VFI;
5837	      goto done;
5838	    }
5839
5840	  /* Next check for usage of the predicate register.  */
5841	  aarch64_opnd_info blk_dest = insn_sequence->instr->operands[0];
5842	  aarch64_opnd_info blk_pred, inst_pred;
5843	  memset (&blk_pred, 0, sizeof (aarch64_opnd_info));
5844	  memset (&inst_pred, 0, sizeof (aarch64_opnd_info));
5845	  bool predicated = false;
5846	  assert (blk_dest.type == AARCH64_OPND_SVE_Zd);
5847
5848	  /* Determine if the movprfx instruction used is predicated or not.  */
5849	  if (insn_sequence->instr->operands[1].type == AARCH64_OPND_SVE_Pg3)
5850	    {
5851	      predicated = true;
5852	      blk_pred = insn_sequence->instr->operands[1];
5853	    }
5854
5855	  unsigned char max_elem_size = 0;
5856	  unsigned char current_elem_size;
5857	  int num_op_used = 0, last_op_usage = 0;
5858	  int i, inst_pred_idx = -1;
5859	  int num_ops = aarch64_num_of_operands (opcode);
5860	  for (i = 0; i < num_ops; i++)
5861	    {
5862	      aarch64_opnd_info inst_op = inst->operands[i];
5863	      switch (inst_op.type)
5864		{
5865		  case AARCH64_OPND_SVE_Zd:
5866		  case AARCH64_OPND_SVE_Zm_5:
5867		  case AARCH64_OPND_SVE_Zm_16:
5868		  case AARCH64_OPND_SVE_Zn:
5869		  case AARCH64_OPND_SVE_Zt:
5870		  case AARCH64_OPND_SVE_Vm:
5871		  case AARCH64_OPND_SVE_Vn:
5872		  case AARCH64_OPND_Va:
5873		  case AARCH64_OPND_Vn:
5874		  case AARCH64_OPND_Vm:
5875		  case AARCH64_OPND_Sn:
5876		  case AARCH64_OPND_Sm:
5877		    if (inst_op.reg.regno == blk_dest.reg.regno)
5878		      {
5879			num_op_used++;
5880			last_op_usage = i;
5881		      }
5882		    current_elem_size
5883		      = aarch64_get_qualifier_esize (inst_op.qualifier);
5884		    if (current_elem_size > max_elem_size)
5885		      max_elem_size = current_elem_size;
5886		    break;
5887		  case AARCH64_OPND_SVE_Pd:
5888		  case AARCH64_OPND_SVE_Pg3:
5889		  case AARCH64_OPND_SVE_Pg4_5:
5890		  case AARCH64_OPND_SVE_Pg4_10:
5891		  case AARCH64_OPND_SVE_Pg4_16:
5892		  case AARCH64_OPND_SVE_Pm:
5893		  case AARCH64_OPND_SVE_Pn:
5894		  case AARCH64_OPND_SVE_Pt:
5895		  case AARCH64_OPND_SME_Pm:
5896		    inst_pred = inst_op;
5897		    inst_pred_idx = i;
5898		    break;
5899		  default:
5900		    break;
5901		}
5902	    }
5903
5904	   assert (max_elem_size != 0);
5905	   aarch64_opnd_info inst_dest = inst->operands[0];
5906	   /* Determine the size that should be used to compare against the
5907	      movprfx size.  */
5908	   current_elem_size
5909	     = opcode->constraints & C_MAX_ELEM
5910	       ? max_elem_size
5911	       : aarch64_get_qualifier_esize (inst_dest.qualifier);
5912
5913	  /* If movprfx is predicated do some extra checks.  */
5914	  if (predicated)
5915	    {
5916	      /* The instruction must be predicated.  */
5917	      if (inst_pred_idx < 0)
5918		{
5919		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5920		  mismatch_detail->error = _("predicated instruction expected "
5921					     "after `movprfx'");
5922		  mismatch_detail->index = -1;
5923		  mismatch_detail->non_fatal = true;
5924		  res = ERR_VFI;
5925		  goto done;
5926		}
5927
5928	      /* The instruction must have a merging predicate.  */
5929	      if (inst_pred.qualifier != AARCH64_OPND_QLF_P_M)
5930		{
5931		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5932		  mismatch_detail->error = _("merging predicate expected due "
5933					     "to preceding `movprfx'");
5934		  mismatch_detail->index = inst_pred_idx;
5935		  mismatch_detail->non_fatal = true;
5936		  res = ERR_VFI;
5937		  goto done;
5938		}
5939
5940	      /* The same register must be used in instruction.  */
5941	      if (blk_pred.reg.regno != inst_pred.reg.regno)
5942		{
5943		  mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5944		  mismatch_detail->error = _("predicate register differs "
5945					     "from that in preceding "
5946					     "`movprfx'");
5947		  mismatch_detail->index = inst_pred_idx;
5948		  mismatch_detail->non_fatal = true;
5949		  res = ERR_VFI;
5950		  goto done;
5951		}
5952	    }
5953
5954	  /* Destructive operations by definition must allow one usage of the
5955	     same register.  */
5956	  int allowed_usage
5957	    = aarch64_is_destructive_by_operands (opcode) ? 2 : 1;
5958
5959	  /* Operand is not used at all.  */
5960	  if (num_op_used == 0)
5961	    {
5962	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5963	      mismatch_detail->error = _("output register of preceding "
5964					 "`movprfx' not used in current "
5965					 "instruction");
5966	      mismatch_detail->index = 0;
5967	      mismatch_detail->non_fatal = true;
5968	      res = ERR_VFI;
5969	      goto done;
5970	    }
5971
5972	  /* We now know it's used, now determine exactly where it's used.  */
5973	  if (blk_dest.reg.regno != inst_dest.reg.regno)
5974	    {
5975	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5976	      mismatch_detail->error = _("output register of preceding "
5977					 "`movprfx' expected as output");
5978	      mismatch_detail->index = 0;
5979	      mismatch_detail->non_fatal = true;
5980	      res = ERR_VFI;
5981	      goto done;
5982	    }
5983
5984	  /* Operand used more than allowed for the specific opcode type.  */
5985	  if (num_op_used > allowed_usage)
5986	    {
5987	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
5988	      mismatch_detail->error = _("output register of preceding "
5989					 "`movprfx' used as input");
5990	      mismatch_detail->index = last_op_usage;
5991	      mismatch_detail->non_fatal = true;
5992	      res = ERR_VFI;
5993	      goto done;
5994	    }
5995
5996	  /* Now the only thing left is the qualifiers checks.  The register
5997	     must have the same maximum element size.  */
5998	  if (inst_dest.qualifier
5999	      && blk_dest.qualifier
6000	      && current_elem_size
6001		 != aarch64_get_qualifier_esize (blk_dest.qualifier))
6002	    {
6003	      mismatch_detail->kind = AARCH64_OPDE_SYNTAX_ERROR;
6004	      mismatch_detail->error = _("register size not compatible with "
6005					 "previous `movprfx'");
6006	      mismatch_detail->index = 0;
6007	      mismatch_detail->non_fatal = true;
6008	      res = ERR_VFI;
6009	      goto done;
6010	    }
6011	}
6012
6013    done:
6014      if (insn_sequence->num_added_insns == insn_sequence->num_allocated_insns)
6015	/* We've checked the last instruction in the sequence and so
6016	   don't need the sequence any more.  */
6017	init_insn_sequence (NULL, insn_sequence);
6018      else
6019	add_insn_to_sequence (inst, insn_sequence);
6020    }
6021
6022  return res;
6023}
6024
6025
6026/* Return true if VALUE cannot be moved into an SVE register using DUP
6027   (with any element size, not just ESIZE) and if using DUPM would
6028   therefore be OK.  ESIZE is the number of bytes in the immediate.  */
6029
6030bool
6031aarch64_sve_dupm_mov_immediate_p (uint64_t uvalue, int esize)
6032{
6033  int64_t svalue = uvalue;
6034  uint64_t upper = (uint64_t) -1 << (esize * 4) << (esize * 4);
6035
6036  if ((uvalue & ~upper) != uvalue && (uvalue | upper) != uvalue)
6037    return false;
6038  if (esize <= 4 || (uint32_t) uvalue == (uint32_t) (uvalue >> 32))
6039    {
6040      svalue = (int32_t) uvalue;
6041      if (esize <= 2 || (uint16_t) uvalue == (uint16_t) (uvalue >> 16))
6042	{
6043	  svalue = (int16_t) uvalue;
6044	  if (esize == 1 || (uint8_t) uvalue == (uint8_t) (uvalue >> 8))
6045	    return false;
6046	}
6047    }
6048  if ((svalue & 0xff) == 0)
6049    svalue /= 256;
6050  return svalue < -128 || svalue >= 128;
6051}
6052
6053/* Include the opcode description table as well as the operand description
6054   table.  */
6055#define VERIFIER(x) verify_##x
6056#include "aarch64-tbl.h"
6057