1/* ACLE support for AArch64 SVE (function shapes)
2   Copyright (C) 2018-2020 Free Software Foundation, Inc.
3
4   This file is part of GCC.
5
6   GCC is free software; you can redistribute it and/or modify it
7   under the terms of the GNU General Public License as published by
8   the Free Software Foundation; either version 3, or (at your option)
9   any later version.
10
11   GCC is distributed in the hope that it will be useful, but
12   WITHOUT ANY WARRANTY; without even the implied warranty of
13   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
14   General Public License for more details.
15
16   You should have received a copy of the GNU General Public License
17   along with GCC; see the file COPYING3.  If not see
18   <http://www.gnu.org/licenses/>.  */
19
20#include "config.h"
21#include "system.h"
22#include "coretypes.h"
23#include "tm.h"
24#include "tree.h"
25#include "rtl.h"
26#include "tm_p.h"
27#include "memmodel.h"
28#include "insn-codes.h"
29#include "optabs.h"
30#include "aarch64-sve-builtins.h"
31#include "aarch64-sve-builtins-shapes.h"
32
33/* In the comments below, _t0 represents the first type suffix and _t1
34   represents the second.  Square brackets enclose characters that are
35   present in only the full name, not the overloaded name.  Governing
36   predicate arguments and predicate suffixes are not shown, since they
37   depend on the predication type, which is a separate piece of
38   information from the shape.
39
40   Non-overloaded functions may have additional suffixes beyond the
41   ones shown, if those suffixes don't affect the types in the type
42   signature.  E.g. the predicate form of svtrn1 has a _b<bits> suffix,
43   but this does not affect the prototype, which is always
44   "svbool_t(svbool_t, svbool_t)".  */
45
46namespace aarch64_sve {
47
48/* Return a representation of "const T *".  */
49static tree
50build_const_pointer (tree t)
51{
52  return build_pointer_type (build_qualified_type (t, TYPE_QUAL_CONST));
53}
54
55/* If INSTANCE has a governing predicate, add it to the list of argument
56   types in ARGUMENT_TYPES.  RETURN_TYPE is the type returned by the
57   function.  */
58static void
59apply_predication (const function_instance &instance, tree return_type,
60		   vec<tree> &argument_types)
61{
62  if (instance.pred != PRED_none)
63    {
64      argument_types.quick_insert (0, get_svbool_t ());
65      /* For unary merge operations, the first argument is a vector with
66	 the same type as the result.  For unary_convert_narrowt it also
67	 provides the "bottom" half of active elements, and is present
68	 for all types of predication.  */
69      if ((argument_types.length () == 2 && instance.pred == PRED_m)
70	  || instance.shape == shapes::unary_convert_narrowt)
71	argument_types.quick_insert (0, return_type);
72    }
73}
74
75/* Parse and move past an element type in FORMAT and return it as a type
76   suffix.  The format is:
77
78   [01]    - the element type in type suffix 0 or 1 of INSTANCE
79   f<bits> - a floating-point type with the given number of bits
80   f[01]   - a floating-point type with the same width as type suffix 0 or 1
81   B       - bfloat16_t
82   h<elt>  - a half-sized version of <elt>
83   p       - a predicate (represented as TYPE_SUFFIX_b)
84   q<elt>  - a quarter-sized version of <elt>
85   s<bits> - a signed type with the given number of bits
86   s[01]   - a signed type with the same width as type suffix 0 or 1
87   u<bits> - an unsigned type with the given number of bits
88   u[01]   - an unsigned type with the same width as type suffix 0 or 1
89   w<elt>  - a 64-bit version of <elt> if <elt> is integral, otherwise <elt>
90
91   where <elt> is another element type.  */
92static type_suffix_index
93parse_element_type (const function_instance &instance, const char *&format)
94{
95  int ch = *format++;
96
97  if (ch == 'f' || ch == 's' || ch == 'u')
98    {
99      type_class_index tclass = (ch == 'f' ? TYPE_float
100				 : ch == 's' ? TYPE_signed
101				 : TYPE_unsigned);
102      char *end;
103      unsigned int bits = strtol (format, &end, 10);
104      format = end;
105      if (bits == 0 || bits == 1)
106	bits = instance.type_suffix (bits).element_bits;
107      return find_type_suffix (tclass, bits);
108    }
109
110  if (ch == 'w')
111    {
112      type_suffix_index suffix = parse_element_type (instance, format);
113      if (type_suffixes[suffix].integer_p)
114	return find_type_suffix (type_suffixes[suffix].tclass, 64);
115      return suffix;
116    }
117
118  if (ch == 'p')
119    return TYPE_SUFFIX_b;
120
121  if (ch == 'B')
122    return TYPE_SUFFIX_bf16;
123
124  if (ch == 'q')
125    {
126      type_suffix_index suffix = parse_element_type (instance, format);
127      return find_type_suffix (type_suffixes[suffix].tclass,
128			       type_suffixes[suffix].element_bits / 4);
129    }
130
131  if (ch == 'h')
132    {
133      type_suffix_index suffix = parse_element_type (instance, format);
134      /* Widening and narrowing doesn't change the type for predicates;
135	 everything's still an svbool_t.  */
136      if (suffix == TYPE_SUFFIX_b)
137	return suffix;
138      return find_type_suffix (type_suffixes[suffix].tclass,
139			       type_suffixes[suffix].element_bits / 2);
140    }
141
142  if (ch == '0' || ch == '1')
143    return instance.type_suffix_ids[ch - '0'];
144
145  gcc_unreachable ();
146}
147
148/* Read and return a type from FORMAT for function INSTANCE.  Advance
149   FORMAT beyond the type string.  The format is:
150
151   _       - void
152   al      - array pointer for loads
153   ap      - array pointer for prefetches
154   as      - array pointer for stores
155   b       - base vector type (from a _<m0>base suffix)
156   d       - displacement vector type (from a _<m1>index or _<m1>offset suffix)
157   e<name> - an enum with the given name
158   s<elt>  - a scalar type with the given element suffix
159   t<elt>  - a vector or tuple type with given element suffix [*1]
160   v<elt>  - a vector with the given element suffix
161
162   where <elt> has the format described above parse_element_type
163
164   [*1] the vectors_per_tuple function indicates whether the type should
165        be a tuple, and if so, how many vectors it should contain.  */
166static tree
167parse_type (const function_instance &instance, const char *&format)
168{
169  int ch = *format++;
170
171  if (ch == '_')
172    return void_type_node;
173
174  if (ch == 'a')
175    {
176      ch = *format++;
177      if (ch == 'l')
178	return build_const_pointer (instance.memory_scalar_type ());
179      if (ch == 'p')
180	return const_ptr_type_node;
181      if (ch == 's')
182	return build_pointer_type (instance.memory_scalar_type ());
183      gcc_unreachable ();
184    }
185
186  if (ch == 'b')
187    return instance.base_vector_type ();
188
189  if (ch == 'd')
190    return instance.displacement_vector_type ();
191
192  if (ch == 'e')
193    {
194      if (strncmp (format, "pattern", 7) == 0)
195	{
196	  format += 7;
197	  return acle_svpattern;
198	}
199      if (strncmp (format, "prfop", 5) == 0)
200	{
201	  format += 5;
202	  return acle_svprfop;
203	}
204      gcc_unreachable ();
205    }
206
207  if (ch == 's')
208    {
209      type_suffix_index suffix = parse_element_type (instance, format);
210      return scalar_types[type_suffixes[suffix].vector_type];
211    }
212
213  if (ch == 't')
214    {
215      type_suffix_index suffix = parse_element_type (instance, format);
216      vector_type_index vector_type = type_suffixes[suffix].vector_type;
217      unsigned int num_vectors = instance.vectors_per_tuple ();
218      return acle_vector_types[num_vectors - 1][vector_type];
219    }
220
221  if (ch == 'v')
222    {
223      type_suffix_index suffix = parse_element_type (instance, format);
224      return acle_vector_types[0][type_suffixes[suffix].vector_type];
225    }
226
227  gcc_unreachable ();
228}
229
230/* Read and move past any argument count at FORMAT for the function
231   signature of INSTANCE.  The counts are:
232
233   *q: one argument per element in a 128-bit quadword (as for svdupq)
234   *t: one argument per vector in a tuple (as for svcreate)
235
236   Otherwise the count is 1.  */
237static unsigned int
238parse_count (const function_instance &instance, const char *&format)
239{
240  if (format[0] == '*' && format[1] == 'q')
241    {
242      format += 2;
243      return instance.elements_per_vq (0);
244    }
245  if (format[0] == '*' && format[1] == 't')
246    {
247      format += 2;
248      return instance.vectors_per_tuple ();
249    }
250  return 1;
251}
252
253/* Read a type signature for INSTANCE from FORMAT.  Add the argument types
254   to ARGUMENT_TYPES and return the return type.
255
256   The format is a comma-separated list of types (as for parse_type),
257   with the first type being the return type and the rest being the
258   argument types.  Each argument type can be followed by an optional
259   count (as for parse_count).  */
260static tree
261parse_signature (const function_instance &instance, const char *format,
262		 vec<tree> &argument_types)
263{
264  tree return_type = parse_type (instance, format);
265  while (format[0] == ',')
266    {
267      format += 1;
268      tree argument_type = parse_type (instance, format);
269      unsigned int count = parse_count (instance, format);
270      for (unsigned int i = 0; i < count; ++i)
271	argument_types.quick_push (argument_type);
272    }
273  gcc_assert (format[0] == 0);
274  return return_type;
275}
276
277/* Add one function instance for GROUP, using mode suffix MODE_SUFFIX_ID,
278   the type suffixes at index TI and the predication suffix at index PI.
279   The other arguments are as for build_all.  */
280static void
281build_one (function_builder &b, const char *signature,
282	   const function_group_info &group, mode_suffix_index mode_suffix_id,
283	   unsigned int ti, unsigned int pi, bool force_direct_overloads)
284{
285  /* Byte forms of svdupq take 16 arguments.  */
286  auto_vec<tree, 16> argument_types;
287  function_instance instance (group.base_name, *group.base, *group.shape,
288			      mode_suffix_id, group.types[ti],
289			      group.preds[pi]);
290  tree return_type = parse_signature (instance, signature, argument_types);
291  apply_predication (instance, return_type, argument_types);
292  b.add_unique_function (instance, return_type, argument_types,
293			 group.required_extensions, force_direct_overloads);
294}
295
296/* GROUP describes some sort of gather or scatter operation.  There are
297   two cases:
298
299   - If the function has any type suffixes (as for loads and stores), the
300     first function type suffix specifies either a 32-bit or a 64-bit type,
301     which in turn selects either MODE32 or MODE64 as the addressing mode.
302     Add a function instance for every type and predicate combination
303     in GROUP for which the associated addressing mode is not MODE_none.
304
305   - If the function has no type suffixes (as for prefetches), add one
306     MODE32 form and one MODE64 form for each predication type.
307
308   The other arguments are as for build_all.  */
309static void
310build_32_64 (function_builder &b, const char *signature,
311	     const function_group_info &group, mode_suffix_index mode32,
312	     mode_suffix_index mode64, bool force_direct_overloads = false)
313{
314  for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
315    if (group.types[0][0] == NUM_TYPE_SUFFIXES)
316      {
317	gcc_assert (mode32 != MODE_none && mode64 != MODE_none);
318	build_one (b, signature, group, mode32, 0, pi,
319		   force_direct_overloads);
320	build_one (b, signature, group, mode64, 0, pi,
321		   force_direct_overloads);
322      }
323    else
324      for (unsigned int ti = 0; group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
325	{
326	  unsigned int bits = type_suffixes[group.types[ti][0]].element_bits;
327	  gcc_assert (bits == 32 || bits == 64);
328	  mode_suffix_index mode = bits == 32 ? mode32 : mode64;
329	  if (mode != MODE_none)
330	    build_one (b, signature, group, mode, ti, pi,
331		       force_direct_overloads);
332	}
333}
334
335/* For every type and predicate combination in GROUP, add one function
336   that takes a scalar (pointer) base and a signed vector array index,
337   and another that instead takes an unsigned vector array index.
338   The vector array index has the same element size as the first
339   function type suffix.  SIGNATURE is as for build_all.  */
340static void
341build_sv_index (function_builder &b, const char *signature,
342		const function_group_info &group)
343{
344  build_32_64 (b, signature, group, MODE_s32index, MODE_s64index);
345  build_32_64 (b, signature, group, MODE_u32index, MODE_u64index);
346}
347
348/* Like build_sv_index, but only handle 64-bit types.  */
349static void
350build_sv_index64 (function_builder &b, const char *signature,
351		  const function_group_info &group)
352{
353  build_32_64 (b, signature, group, MODE_none, MODE_s64index);
354  build_32_64 (b, signature, group, MODE_none, MODE_u64index);
355}
356
357/* Like build_sv_index, but taking vector byte offsets instead of vector
358   array indices.  */
359static void
360build_sv_offset (function_builder &b, const char *signature,
361		 const function_group_info &group)
362{
363  build_32_64 (b, signature, group, MODE_s32offset, MODE_s64offset);
364  build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
365}
366
367/* Like build_sv_offset, but exclude offsets that must be interpreted
368   as signed (i.e. s32offset).  */
369static void
370build_sv_uint_offset (function_builder &b, const char *signature,
371		      const function_group_info &group)
372{
373  build_32_64 (b, signature, group, MODE_none, MODE_s64offset);
374  build_32_64 (b, signature, group, MODE_u32offset, MODE_u64offset);
375}
376
377/* For every type and predicate combination in GROUP, add a function
378   that takes a vector base address and no displacement.  The vector
379   base has the same element size as the first type suffix.
380
381   The other arguments are as for build_all.  */
382static void
383build_v_base (function_builder &b, const char *signature,
384	      const function_group_info &group,
385	      bool force_direct_overloads = false)
386{
387  build_32_64 (b, signature, group, MODE_u32base, MODE_u64base,
388	       force_direct_overloads);
389}
390
391/* Like build_v_base, but for functions that also take a scalar array
392   index.  */
393static void
394build_vs_index (function_builder &b, const char *signature,
395		const function_group_info &group,
396		bool force_direct_overloads = false)
397{
398  build_32_64 (b, signature, group, MODE_u32base_index, MODE_u64base_index,
399	       force_direct_overloads);
400}
401
402/* Like build_v_base, but for functions that also take a scalar byte
403   offset.  */
404static void
405build_vs_offset (function_builder &b, const char *signature,
406		 const function_group_info &group,
407		 bool force_direct_overloads = false)
408{
409  build_32_64 (b, signature, group, MODE_u32base_offset, MODE_u64base_offset,
410	       force_direct_overloads);
411}
412
413/* Add a function instance for every type and predicate combination
414   in GROUP.  Take the function base name from GROUP and the mode suffix
415   from MODE_SUFFIX_ID.  Use SIGNATURE to construct the function signature
416   without a governing predicate, then use apply_predication to add in the
417   predicate.  FORCE_DIRECT_OVERLOADS is true if there is a one-to-one
418   mapping between "short" and "full" names, and if standard overload
419   resolution therefore isn't necessary.  */
420static void
421build_all (function_builder &b, const char *signature,
422	   const function_group_info &group, mode_suffix_index mode_suffix_id,
423	   bool force_direct_overloads = false)
424{
425  for (unsigned int pi = 0; group.preds[pi] != NUM_PREDS; ++pi)
426    for (unsigned int ti = 0;
427	 ti == 0 || group.types[ti][0] != NUM_TYPE_SUFFIXES; ++ti)
428      build_one (b, signature, group, mode_suffix_id, ti, pi,
429		 force_direct_overloads);
430}
431
432/* TYPE is the largest type suffix associated with the arguments of R,
433   but the result is twice as wide.  Return the associated type suffix
434   if it exists, otherwise report an appropriate error and return
435   NUM_TYPE_SUFFIXES.  */
436static type_suffix_index
437long_type_suffix (function_resolver &r, type_suffix_index type)
438{
439  unsigned int element_bits = type_suffixes[type].element_bits;
440  if (type_suffixes[type].integer_p && element_bits < 64)
441    return find_type_suffix (type_suffixes[type].tclass, element_bits * 2);
442
443  r.report_no_such_form (type);
444  return NUM_TYPE_SUFFIXES;
445}
446
447/* Declare the function shape NAME, pointing it to an instance
448   of class <NAME>_def.  */
449#define SHAPE(NAME) \
450  static CONSTEXPR const NAME##_def NAME##_obj; \
451  namespace shapes { const function_shape *const NAME = &NAME##_obj; }
452
453/* Base class for functions that are not overloaded.  */
454struct nonoverloaded_base : public function_shape
455{
456  bool
457  explicit_type_suffix_p (unsigned int) const OVERRIDE
458  {
459    return true;
460  }
461
462  tree
463  resolve (function_resolver &) const OVERRIDE
464  {
465    gcc_unreachable ();
466  }
467};
468
469/* Base class for overloaded functions.  Bit N of EXPLICIT_MASK is true
470   if type suffix N appears in the overloaded name.  */
471template<unsigned int EXPLICIT_MASK>
472struct overloaded_base : public function_shape
473{
474  bool
475  explicit_type_suffix_p (unsigned int i) const OVERRIDE
476  {
477    return (EXPLICIT_MASK >> i) & 1;
478  }
479};
480
481/* Base class for adr_index and adr_offset.  */
482struct adr_base : public overloaded_base<0>
483{
484  /* The function takes two arguments: a vector base and a vector displacement
485     (either an index or an offset).  Resolve based on them both.  */
486  tree
487  resolve (function_resolver &r) const OVERRIDE
488  {
489    unsigned int i, nargs;
490    mode_suffix_index mode;
491    if (!r.check_gp_argument (2, i, nargs)
492	|| (mode = r.resolve_adr_address (0)) == MODE_none)
493      return error_mark_node;
494
495    return r.resolve_to (mode);
496  };
497};
498
499/* Base class for narrowing bottom binary functions that take an
500   immediate second operand.  The result is half the size of input
501   and has class CLASS.  */
502template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
503struct binary_imm_narrowb_base : public overloaded_base<0>
504{
505  void
506  build (function_builder &b, const function_group_info &group) const OVERRIDE
507  {
508    b.add_overloaded_functions (group, MODE_n);
509    STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
510		   || CLASS == TYPE_unsigned);
511    if (CLASS == TYPE_unsigned)
512      build_all (b, "vhu0,v0,su64", group, MODE_n);
513    else
514      build_all (b, "vh0,v0,su64", group, MODE_n);
515  }
516
517  tree
518  resolve (function_resolver &r) const OVERRIDE
519  {
520    return r.resolve_uniform (1, 1);
521  }
522};
523
524/* The top equivalent of binary_imm_narrowb_base.  It takes three arguments,
525   with the first being the values of the even elements, which are typically
526   the result of the narrowb operation.  */
527template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
528struct binary_imm_narrowt_base : public overloaded_base<0>
529{
530  void
531  build (function_builder &b, const function_group_info &group) const OVERRIDE
532  {
533    b.add_overloaded_functions (group, MODE_n);
534    STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
535		   || CLASS == TYPE_unsigned);
536    if (CLASS == TYPE_unsigned)
537      build_all (b, "vhu0,vhu0,v0,su64", group, MODE_n);
538    else
539      build_all (b, "vh0,vh0,v0,su64", group, MODE_n);
540  }
541
542  tree
543  resolve (function_resolver &r) const OVERRIDE
544  {
545    unsigned int i, nargs;
546    type_suffix_index type;
547    if (!r.check_gp_argument (3, i, nargs)
548	|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
549	|| !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE)
550	|| !r.require_integer_immediate (i + 2))
551      return error_mark_node;
552
553    return r.resolve_to (r.mode_suffix_id, type);
554  }
555};
556
557/* Base class for long (i.e. narrow op narrow -> wide) binary functions
558   that take an immediate second operand.  The type suffix specifies
559   the wider type.  */
560struct binary_imm_long_base : public overloaded_base<0>
561{
562  void
563  build (function_builder &b, const function_group_info &group) const OVERRIDE
564  {
565    b.add_overloaded_functions (group, MODE_n);
566    build_all (b, "v0,vh0,su64", group, MODE_n);
567  }
568
569  tree
570  resolve (function_resolver &r) const OVERRIDE
571  {
572    unsigned int i, nargs;
573    type_suffix_index type, result_type;
574    if (!r.check_gp_argument (2, i, nargs)
575	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
576	|| !r.require_integer_immediate (i + 1)
577	|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
578      return error_mark_node;
579
580    if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
581      return res;
582
583    return r.report_no_such_form (type);
584  }
585};
586
587/* Base class for inc_dec and inc_dec_pat.  */
588struct inc_dec_base : public overloaded_base<0>
589{
590  CONSTEXPR inc_dec_base (bool pat_p) : m_pat_p (pat_p) {}
591
592  /* Resolve based on the first argument only, which must be either a
593     scalar or a vector.  If it's a scalar, it must be a 32-bit or
594     64-bit integer.  */
595  tree
596  resolve (function_resolver &r) const
597  {
598    unsigned int i, nargs;
599    if (!r.check_gp_argument (m_pat_p ? 3 : 2, i, nargs)
600	|| !r.require_vector_or_scalar_type (i))
601      return error_mark_node;
602
603    mode_suffix_index mode;
604    type_suffix_index type;
605    if (r.scalar_argument_p (i))
606      {
607	mode = MODE_n;
608	type = r.infer_integer_scalar_type (i);
609      }
610    else
611      {
612	mode = MODE_none;
613	type = r.infer_vector_type (i);
614      }
615    if (type == NUM_TYPE_SUFFIXES)
616      return error_mark_node;
617
618    for (++i; i < nargs; ++i)
619      if (!r.require_integer_immediate (i))
620	return error_mark_node;
621
622    return r.resolve_to (mode, type);
623  }
624
625  bool
626  check (function_checker &c) const OVERRIDE
627  {
628    return c.require_immediate_range (m_pat_p ? 2 : 1, 1, 16);
629  }
630
631  bool m_pat_p;
632};
633
634/* Base class for load and load_replicate.  */
635struct load_contiguous_base : public overloaded_base<0>
636{
637  /* Resolve a call based purely on a pointer argument.  The other arguments
638     are a governing predicate and (for MODE_vnum) a vnum offset.  */
639  tree
640  resolve (function_resolver &r) const OVERRIDE
641  {
642    bool vnum_p = r.mode_suffix_id == MODE_vnum;
643    gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
644
645    unsigned int i, nargs;
646    type_suffix_index type;
647    if (!r.check_gp_argument (vnum_p ? 2 : 1, i, nargs)
648	|| (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
649	|| (vnum_p && !r.require_scalar_type (i + 1, "int64_t")))
650      return error_mark_node;
651
652    return r.resolve_to (r.mode_suffix_id, type);
653  }
654};
655
656/* Base class for gather loads that take a scalar base and a vector
657   displacement (either an offset or an index).  */
658struct load_gather_sv_base : public overloaded_base<0>
659{
660  tree
661  resolve (function_resolver &r) const OVERRIDE
662  {
663    unsigned int i, nargs;
664    mode_suffix_index mode;
665    type_suffix_index type;
666    if (!r.check_gp_argument (2, i, nargs)
667	|| (type = r.infer_pointer_type (i, true)) == NUM_TYPE_SUFFIXES
668	|| (mode = r.resolve_sv_displacement (i + 1, type, true),
669	    mode == MODE_none))
670      return error_mark_node;
671
672    return r.resolve_to (mode, type);
673  }
674};
675
676/* Base class for load_ext_gather_index and load_ext_gather_offset,
677   which differ only in the units of the displacement.  */
678struct load_ext_gather_base : public overloaded_base<1>
679{
680  /* Resolve a gather load that takes one of:
681
682     - a scalar pointer base and a vector displacement
683     - a vector base with no displacement or
684     - a vector base and a scalar displacement
685
686     The function has an explicit type suffix that determines the type
687     of the loaded data.  */
688  tree
689  resolve (function_resolver &r) const OVERRIDE
690  {
691    /* No resolution is needed for a vector base with no displacement;
692       there's a one-to-one mapping between short and long names.  */
693    gcc_assert (r.displacement_units () != UNITS_none);
694
695    type_suffix_index type = r.type_suffix_ids[0];
696
697    unsigned int i, nargs;
698    mode_suffix_index mode;
699    if (!r.check_gp_argument (2, i, nargs)
700	|| (mode = r.resolve_gather_address (i, type, true)) == MODE_none)
701      return error_mark_node;
702
703    return r.resolve_to (mode, type);
704  }
705};
706
707/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t,
708		       sv<t0:quarter>_t)  (for integer t0)
709   sv<t0>_t svmmla[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)  (for floating-point t0)
710
711   The functions act like the equivalent of "ternary_qq" for integer elements
712   and normal vector-only ternary functions for floating-point elements.  */
713struct mmla_def : public overloaded_base<0>
714{
715  void
716  build (function_builder &b, const function_group_info &group) const OVERRIDE
717  {
718    b.add_overloaded_functions (group, MODE_none);
719    /* svmmla is distributed over several extensions.  Allow the common
720       denominator to define the overloaded svmmla function without
721       defining any specific versions.  */
722    if (group.types[0][0] != NUM_TYPE_SUFFIXES)
723      {
724	if (type_suffixes[group.types[0][0]].float_p)
725	  build_all (b, "v0,v0,v0,v0", group, MODE_none);
726	else
727	  build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
728      }
729  }
730
731  tree
732  resolve (function_resolver &r) const OVERRIDE
733  {
734    unsigned int i, nargs;
735    type_suffix_index type;
736    if (!r.check_gp_argument (3, i, nargs)
737	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
738      return error_mark_node;
739
740    /* Make sure that the function exists now, since not all forms
741       follow a set pattern after this point.  */
742    tree res = r.resolve_to (r.mode_suffix_id, type);
743    if (res == error_mark_node)
744      return res;
745
746    bool float_p = type_suffixes[type].float_p;
747    unsigned int modifier = float_p ? r.SAME_SIZE : r.QUARTER_SIZE;
748    if (!r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
749					modifier)
750	|| !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
751					   modifier))
752      return error_mark_node;
753
754    return res;
755  }
756};
757SHAPE (mmla)
758
759/* Base class for prefetch_gather_index and prefetch_gather_offset,
760   which differ only in the units of the displacement.  */
761struct prefetch_gather_base : public overloaded_base<0>
762{
763  /* Resolve a gather prefetch that takes one of:
764
765     - a scalar pointer base (const void *) and a vector displacement
766     - a vector base with no displacement or
767     - a vector base and a scalar displacement
768
769     The prefetch operation is the final argument.  This is purely a
770     mode-based resolution; there are no type suffixes.  */
771  tree
772  resolve (function_resolver &r) const OVERRIDE
773  {
774    bool has_displacement_p = r.displacement_units () != UNITS_none;
775
776    unsigned int i, nargs;
777    mode_suffix_index mode;
778    if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
779	|| (mode = r.resolve_gather_address (i, NUM_TYPE_SUFFIXES,
780					     false)) == MODE_none
781	|| !r.require_integer_immediate (nargs - 1))
782      return error_mark_node;
783
784    return r.resolve_to (mode);
785  }
786};
787
788/* Wraps BASE to provide a narrowing shift right function.  Argument N
789   is an immediate shift amount in the range [1, sizeof(<t0>_t) * 4].  */
790template<typename BASE, unsigned int N>
791struct shift_right_imm_narrow_wrapper : public BASE
792{
793  bool
794  check (function_checker &c) const OVERRIDE
795  {
796    unsigned int bits = c.type_suffix (0).element_bits / 2;
797    return c.require_immediate_range (N, 1, bits);
798  }
799};
800
801/* Base class for store_scatter_index and store_scatter_offset,
802   which differ only in the units of the displacement.  */
803struct store_scatter_base : public overloaded_base<0>
804{
805  /* Resolve a scatter store that takes one of:
806
807     - a scalar pointer base and a vector displacement
808     - a vector base with no displacement or
809     - a vector base and a scalar displacement
810
811     The stored data is the final argument, and it determines the
812     type suffix.  */
813  tree
814  resolve (function_resolver &r) const OVERRIDE
815  {
816    bool has_displacement_p = r.displacement_units () != UNITS_none;
817
818    unsigned int i, nargs;
819    mode_suffix_index mode;
820    type_suffix_index type;
821    if (!r.check_gp_argument (has_displacement_p ? 3 : 2, i, nargs)
822	|| (type = r.infer_sd_vector_type (nargs - 1)) == NUM_TYPE_SUFFIXES
823	|| (mode = r.resolve_gather_address (i, type, false)) == MODE_none)
824      return error_mark_node;
825
826    return r.resolve_to (mode, type);
827  }
828};
829
830/* Base class for ternary operations in which the final argument is an
831   immediate shift amount.  The derived class should check the range.  */
832struct ternary_shift_imm_base : public overloaded_base<0>
833{
834  void
835  build (function_builder &b, const function_group_info &group) const OVERRIDE
836  {
837    b.add_overloaded_functions (group, MODE_n);
838    build_all (b, "v0,v0,v0,su64", group, MODE_n);
839  }
840
841  tree
842  resolve (function_resolver &r) const OVERRIDE
843  {
844    return r.resolve_uniform (2, 1);
845  }
846};
847
848/* Base class for ternary operations in which the first argument has the
849   same element type as the result, and in which the second and third
850   arguments have an element type that is derived the first.
851
852   MODIFIER is the number of element bits in the second and third
853   arguments, or a function_resolver modifier that says how this
854   precision is derived from the first argument's elements.
855
856   TYPE_CLASS2 and TYPE_CLASS3 are the type classes of the second and
857   third arguments, or function_resolver::SAME_TYPE_CLASS if the type
858   class is the same as the first argument.  */
859template<unsigned int MODIFIER,
860	 type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
861	 type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
862struct ternary_resize2_opt_n_base : public overloaded_base<0>
863{
864  tree
865  resolve (function_resolver &r) const OVERRIDE
866  {
867    unsigned int i, nargs;
868    type_suffix_index type;
869    if (!r.check_gp_argument (3, i, nargs)
870	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
871	|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
872					   MODIFIER))
873      return error_mark_node;
874
875    return r.finish_opt_n_resolution (i + 2, i, type, TYPE_CLASS3, MODIFIER);
876  }
877};
878
879/* Like ternary_resize2_opt_n_base, but for functions that don't take
880   a final scalar argument.  */
881template<unsigned int MODIFIER,
882	 type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
883	 type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
884struct ternary_resize2_base : public overloaded_base<0>
885{
886  tree
887  resolve (function_resolver &r) const OVERRIDE
888  {
889    unsigned int i, nargs;
890    type_suffix_index type;
891    if (!r.check_gp_argument (3, i, nargs)
892	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
893	|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
894					   MODIFIER)
895	|| !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
896					   MODIFIER))
897      return error_mark_node;
898
899    return r.resolve_to (r.mode_suffix_id, type);
900  }
901};
902
903/* Like ternary_resize2_opt_n_base, but for functions that take a final
904   lane argument.  */
905template<unsigned int MODIFIER,
906	 type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
907	 type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
908struct ternary_resize2_lane_base : public overloaded_base<0>
909{
910  tree
911  resolve (function_resolver &r) const OVERRIDE
912  {
913    unsigned int i, nargs;
914    type_suffix_index type;
915    if (!r.check_gp_argument (4, i, nargs)
916	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
917	|| !r.require_derived_vector_type (i + 1, i, type, TYPE_CLASS2,
918					   MODIFIER)
919	|| !r.require_derived_vector_type (i + 2, i, type, TYPE_CLASS3,
920					   MODIFIER)
921	|| !r.require_integer_immediate (i + 3))
922      return error_mark_node;
923
924    return r.resolve_to (r.mode_suffix_id, type);
925  }
926};
927
928/* A specialization of ternary_resize2_lane_base for bfloat16 elements,
929   indexed in groups of N elements.  */
930template<unsigned int N>
931struct ternary_bfloat_lane_base
932  : public ternary_resize2_lane_base<16, TYPE_bfloat, TYPE_bfloat>
933{
934  void
935  build (function_builder &b, const function_group_info &group) const OVERRIDE
936  {
937    b.add_overloaded_functions (group, MODE_none);
938    build_all (b, "v0,v0,vB,vB,su64", group, MODE_none);
939  }
940
941  bool
942  check (function_checker &c) const OVERRIDE
943  {
944    return c.require_immediate_lane_index (3, N);
945  }
946};
947
948/* A specialization of ternary_resize2_lane_base for quarter-sized
949   elements.  */
950template<type_class_index TYPE_CLASS2 = function_resolver::SAME_TYPE_CLASS,
951	 type_class_index TYPE_CLASS3 = function_resolver::SAME_TYPE_CLASS>
952struct ternary_qq_lane_base
953  : public ternary_resize2_lane_base<function_resolver::QUARTER_SIZE,
954				     TYPE_CLASS2, TYPE_CLASS3>
955{
956  bool
957  check (function_checker &c) const OVERRIDE
958  {
959    return c.require_immediate_lane_index (3, 4);
960  }
961};
962
963/* Base class for narrowing bottom unary functions.  The result is half
964   the size of input and has class CLASS.  */
965template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
966struct unary_narrowb_base : public overloaded_base<0>
967{
968  void
969  build (function_builder &b, const function_group_info &group) const OVERRIDE
970  {
971    b.add_overloaded_functions (group, MODE_none);
972    STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
973		   || CLASS == TYPE_unsigned);
974    if (CLASS == TYPE_unsigned)
975      build_all (b, "vhu0,v0", group, MODE_none);
976    else
977      build_all (b, "vh0,v0", group, MODE_none);
978  }
979
980  tree
981  resolve (function_resolver &r) const OVERRIDE
982  {
983    return r.resolve_unary (CLASS, r.HALF_SIZE);
984  }
985};
986
987/* The top equivalent of unary_imm_narrowb_base.  All forms take the values
988   of the even elements as an extra argument, before any governing predicate.
989   These even elements are typically the result of the narrowb operation.  */
990template<type_class_index CLASS = function_resolver::SAME_TYPE_CLASS>
991struct unary_narrowt_base : public overloaded_base<0>
992{
993  void
994  build (function_builder &b, const function_group_info &group) const OVERRIDE
995  {
996    b.add_overloaded_functions (group, MODE_none);
997    STATIC_ASSERT (CLASS == function_resolver::SAME_TYPE_CLASS
998		   || CLASS == TYPE_unsigned);
999    if (CLASS == TYPE_unsigned)
1000      build_all (b, "vhu0,vhu0,v0", group, MODE_none);
1001    else
1002      build_all (b, "vh0,vh0,v0", group, MODE_none);
1003  }
1004
1005  tree
1006  resolve (function_resolver &r) const OVERRIDE
1007  {
1008    unsigned int i, nargs;
1009    type_suffix_index type;
1010    if (!r.check_gp_argument (2, i, nargs)
1011	|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
1012	|| !r.require_derived_vector_type (i, i + 1, type, CLASS, r.HALF_SIZE))
1013      return error_mark_node;
1014
1015    return r.resolve_to (r.mode_suffix_id, type);
1016  }
1017};
1018
1019/* sv<m0>_t svfoo[_m0base]_[m1]index(sv<m0>_t, sv<m1>_t)
1020
1021   for all valid combinations of vector base type <m0> and vector
1022   displacement type <m1>.  */
1023struct adr_index_def : public adr_base
1024{
1025  void
1026  build (function_builder &b, const function_group_info &group) const OVERRIDE
1027  {
1028    b.add_overloaded_functions (group, MODE_index);
1029    build_all (b, "b,b,d", group, MODE_u32base_s32index);
1030    build_all (b, "b,b,d", group, MODE_u32base_u32index);
1031    build_all (b, "b,b,d", group, MODE_u64base_s64index);
1032    build_all (b, "b,b,d", group, MODE_u64base_u64index);
1033  }
1034};
1035SHAPE (adr_index)
1036
1037/* sv<m0>_t svfoo[_m0base]_[m1]offset(sv<m0>_t, sv<m1>_t).
1038
1039   for all valid combinations of vector base type <m0> and vector
1040   displacement type <m1>.  */
1041struct adr_offset_def : public adr_base
1042{
1043  void
1044  build (function_builder &b, const function_group_info &group) const OVERRIDE
1045  {
1046    b.add_overloaded_functions (group, MODE_offset);
1047    build_all (b, "b,b,d", group, MODE_u32base_s32offset);
1048    build_all (b, "b,b,d", group, MODE_u32base_u32offset);
1049    build_all (b, "b,b,d", group, MODE_u64base_s64offset);
1050    build_all (b, "b,b,d", group, MODE_u64base_u64offset);
1051  }
1052};
1053SHAPE (adr_offset)
1054
1055/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1056
1057   i.e. a binary operation with uniform types, but with no scalar form.  */
1058struct binary_def : public overloaded_base<0>
1059{
1060  void
1061  build (function_builder &b, const function_group_info &group) const OVERRIDE
1062  {
1063    b.add_overloaded_functions (group, MODE_none);
1064    build_all (b, "v0,v0,v0", group, MODE_none);
1065  }
1066
1067  tree
1068  resolve (function_resolver &r) const OVERRIDE
1069  {
1070    return r.resolve_uniform (2);
1071  }
1072};
1073SHAPE (binary)
1074
1075/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int>_t)
1076   sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:int>_t).
1077
1078   i.e. a version of the standard binary shape binary_opt_n in which
1079   the final argument is always a signed integer.  */
1080struct binary_int_opt_n_def : public overloaded_base<0>
1081{
1082  void
1083  build (function_builder &b, const function_group_info &group) const OVERRIDE
1084  {
1085    b.add_overloaded_functions (group, MODE_none);
1086    build_all (b, "v0,v0,vs0", group, MODE_none);
1087    build_all (b, "v0,v0,ss0", group, MODE_n);
1088  }
1089
1090  tree
1091  resolve (function_resolver &r) const OVERRIDE
1092  {
1093    unsigned int i, nargs;
1094    type_suffix_index type;
1095    if (!r.check_gp_argument (2, i, nargs)
1096	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1097      return error_mark_node;
1098
1099    return r.finish_opt_n_resolution (i + 1, i, type, TYPE_signed);
1100  }
1101};
1102SHAPE (binary_int_opt_n)
1103
1104/* sv<t0>_t svfoo_<t0>(sv<t0>_t, sv<t0>_t, uint64_t)
1105
1106   where the final argument is an integer constant expression in the
1107   range [0, 16 / sizeof (<t0>_t) - 1].  */
1108struct binary_lane_def : public overloaded_base<0>
1109{
1110  void
1111  build (function_builder &b, const function_group_info &group) const OVERRIDE
1112  {
1113    b.add_overloaded_functions (group, MODE_none);
1114    build_all (b, "v0,v0,v0,su64", group, MODE_none);
1115  }
1116
1117  tree
1118  resolve (function_resolver &r) const OVERRIDE
1119  {
1120    return r.resolve_uniform (2, 1);
1121  }
1122
1123  bool
1124  check (function_checker &c) const OVERRIDE
1125  {
1126    return c.require_immediate_lane_index (2);
1127  }
1128};
1129SHAPE (binary_lane)
1130
1131/* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t, uint64_t).
1132
1133   where the final argument is an integer constant expression in the
1134   range [0, 32 / sizeof (<t0>_t) - 1].  */
1135struct binary_long_lane_def : public overloaded_base<0>
1136{
1137  void
1138  build (function_builder &b, const function_group_info &group) const OVERRIDE
1139  {
1140    b.add_overloaded_functions (group, MODE_none);
1141    build_all (b, "v0,vh0,vh0,su64", group, MODE_none);
1142  }
1143
1144  tree
1145  resolve (function_resolver &r) const OVERRIDE
1146  {
1147    unsigned int i, nargs;
1148    type_suffix_index type, result_type;
1149    if (!r.check_gp_argument (3, i, nargs)
1150	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1151	|| !r.require_matching_vector_type (i + 1, type)
1152	|| !r.require_integer_immediate (i + 2)
1153	|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1154      return error_mark_node;
1155
1156    if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
1157      return res;
1158
1159    return r.report_no_such_form (type);
1160  }
1161
1162  bool
1163  check (function_checker &c) const OVERRIDE
1164  {
1165    return c.require_immediate_lane_index (2);
1166  }
1167};
1168SHAPE (binary_long_lane)
1169
1170/* sv<t0>_t svfoo[_t0](sv<t0:half>_t, sv<t0:half>_t)
1171   sv<t0>_t svfoo[_n_t0](sv<t0:half>_t, <t0:half>_t).  */
1172struct binary_long_opt_n_def : public overloaded_base<0>
1173{
1174  void
1175  build (function_builder &b, const function_group_info &group) const OVERRIDE
1176  {
1177    b.add_overloaded_functions (group, MODE_none);
1178    build_all (b, "v0,vh0,vh0", group, MODE_none);
1179    build_all (b, "v0,vh0,sh0", group, MODE_n);
1180  }
1181
1182  tree
1183  resolve (function_resolver &r) const OVERRIDE
1184  {
1185    unsigned int i, nargs;
1186    type_suffix_index type, result_type;
1187    if (!r.check_gp_argument (2, i, nargs)
1188	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1189	|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
1190      return error_mark_node;
1191
1192    return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1193				      r.SAME_SIZE, result_type);
1194  }
1195};
1196SHAPE (binary_long_opt_n)
1197
1198/* sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t).
1199
1200   i.e. a binary operation in which the final argument is always a scalar
1201   rather than a vector.  */
1202struct binary_n_def : public overloaded_base<0>
1203{
1204  void
1205  build (function_builder &b, const function_group_info &group) const OVERRIDE
1206  {
1207    b.add_overloaded_functions (group, MODE_n);
1208    build_all (b, "v0,v0,s0", group, MODE_n);
1209  }
1210
1211  tree
1212  resolve (function_resolver &r) const OVERRIDE
1213  {
1214    unsigned int i, nargs;
1215    type_suffix_index type;
1216    if (!r.check_gp_argument (2, i, nargs)
1217	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1218	|| !r.require_derived_scalar_type (i + 1, r.SAME_TYPE_CLASS))
1219      return error_mark_node;
1220
1221    return r.resolve_to (r.mode_suffix_id, type);
1222  }
1223};
1224SHAPE (binary_n)
1225
1226/* sv<t0:half>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1227   sv<t0:half>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1228
1229   i.e. a version of binary_opt_n in which the output elements are half the
1230   width of the input elements.  */
1231struct binary_narrowb_opt_n_def : public overloaded_base<0>
1232{
1233  void
1234  build (function_builder &b, const function_group_info &group) const OVERRIDE
1235  {
1236    b.add_overloaded_functions (group, MODE_none);
1237    build_all (b, "vh0,v0,v0", group, MODE_none);
1238    build_all (b, "vh0,v0,s0", group, MODE_n);
1239  }
1240
1241  tree
1242  resolve (function_resolver &r) const OVERRIDE
1243  {
1244    return r.resolve_uniform_opt_n (2);
1245  }
1246};
1247SHAPE (binary_narrowb_opt_n)
1248
1249/* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t, sv<t0>_t)
1250   sv<t0:half>_t svfoo[_n_t0](sv<t0:half>_t, sv<t0>_t, <t0>_t)
1251
1252   This is the "top" counterpart to binary_narrowb_opt_n.  */
1253struct binary_narrowt_opt_n_def : public overloaded_base<0>
1254{
1255  void
1256  build (function_builder &b, const function_group_info &group) const OVERRIDE
1257  {
1258    b.add_overloaded_functions (group, MODE_none);
1259    build_all (b, "vh0,vh0,v0,v0", group, MODE_none);
1260    build_all (b, "vh0,vh0,v0,s0", group, MODE_n);
1261  }
1262
1263  tree
1264  resolve (function_resolver &r) const OVERRIDE
1265  {
1266    unsigned int i, nargs;
1267    type_suffix_index type;
1268    if (!r.check_gp_argument (3, i, nargs)
1269	|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES
1270	|| !r.require_derived_vector_type (i, i + 1, type, r.SAME_TYPE_CLASS,
1271					   r.HALF_SIZE))
1272      return error_mark_node;
1273
1274    return r.finish_opt_n_resolution (i + 2, i + 1, type);
1275  }
1276};
1277SHAPE (binary_narrowt_opt_n)
1278
1279/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1280   sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1281
1282   i.e. the standard shape for binary operations that operate on
1283   uniform types.  */
1284struct binary_opt_n_def : public overloaded_base<0>
1285{
1286  void
1287  build (function_builder &b, const function_group_info &group) const OVERRIDE
1288  {
1289    b.add_overloaded_functions (group, MODE_none);
1290    build_all (b, "v0,v0,v0", group, MODE_none);
1291    /* _b functions do not have an _n form, but are classified as
1292       binary_opt_n so that they can be overloaded with vector
1293       functions.  */
1294    if (group.types[0][0] == TYPE_SUFFIX_b)
1295      gcc_assert (group.types[0][1] == NUM_TYPE_SUFFIXES);
1296    else
1297      build_all (b, "v0,v0,s0", group, MODE_n);
1298  }
1299
1300  tree
1301  resolve (function_resolver &r) const OVERRIDE
1302  {
1303    return r.resolve_uniform_opt_n (2);
1304  }
1305};
1306SHAPE (binary_opt_n)
1307
1308/* svbool_t svfoo(svbool_t, svbool_t).  */
1309struct binary_pred_def : public nonoverloaded_base
1310{
1311  void
1312  build (function_builder &b, const function_group_info &group) const OVERRIDE
1313  {
1314    build_all (b, "v0,v0,v0", group, MODE_none);
1315  }
1316};
1317SHAPE (binary_pred)
1318
1319/* sv<t0>_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
1320
1321   where the final argument must be 90 or 270.  */
1322struct binary_rotate_def : public overloaded_base<0>
1323{
1324  void
1325  build (function_builder &b, const function_group_info &group) const OVERRIDE
1326  {
1327    b.add_overloaded_functions (group, MODE_none);
1328    build_all (b, "v0,v0,v0,su64", group, MODE_none);
1329  }
1330
1331  tree
1332  resolve (function_resolver &r) const OVERRIDE
1333  {
1334    return r.resolve_uniform (2, 1);
1335  }
1336
1337  bool
1338  check (function_checker &c) const OVERRIDE
1339  {
1340    return c.require_immediate_either_or (2, 90, 270);
1341  }
1342};
1343SHAPE (binary_rotate)
1344
1345/* sv<t0>_t svfoo_t0(<t0>_t, <t0>_t)
1346
1347   i.e. a binary function that takes two scalars and returns a vector.
1348   An explicit type suffix is required.  */
1349struct binary_scalar_def : public nonoverloaded_base
1350{
1351  void
1352  build (function_builder &b, const function_group_info &group) const OVERRIDE
1353  {
1354    build_all (b, "v0,s0,s0", group, MODE_none);
1355  }
1356};
1357SHAPE (binary_scalar)
1358
1359/* sv<t0:uint>_t svfoo[_t0](sv<t0>_t, sv<t0>_t).
1360
1361   i.e. a version of "binary" that returns unsigned integers.  */
1362struct binary_to_uint_def : public overloaded_base<0>
1363{
1364  void
1365  build (function_builder &b, const function_group_info &group) const OVERRIDE
1366  {
1367    b.add_overloaded_functions (group, MODE_none);
1368    build_all (b, "vu0,v0,v0", group, MODE_none);
1369  }
1370
1371  tree
1372  resolve (function_resolver &r) const OVERRIDE
1373  {
1374    return r.resolve_uniform (2);
1375  }
1376};
1377SHAPE (binary_to_uint)
1378
1379/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1380
1381   i.e. a version of "binary" in which the final argument is always an
1382   unsigned integer.  */
1383struct binary_uint_def : public overloaded_base<0>
1384{
1385  void
1386  build (function_builder &b, const function_group_info &group) const OVERRIDE
1387  {
1388    b.add_overloaded_functions (group, MODE_none);
1389    build_all (b, "v0,v0,vu0", group, MODE_none);
1390  }
1391
1392  tree
1393  resolve (function_resolver &r) const OVERRIDE
1394  {
1395    unsigned int i, nargs;
1396    type_suffix_index type;
1397    if (!r.check_gp_argument (2, i, nargs)
1398	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1399	|| !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
1400      return error_mark_node;
1401
1402    return r.resolve_to (r.mode_suffix_id, type);
1403  }
1404};
1405SHAPE (binary_uint)
1406
1407/* sv<t0>_t svfoo[_t0](sv<t0>_t, <t0:uint>_t)
1408
1409   i.e. a version of binary_n in which the final argument is always an
1410   unsigned integer.  */
1411struct binary_uint_n_def : public overloaded_base<0>
1412{
1413  void
1414  build (function_builder &b, const function_group_info &group) const OVERRIDE
1415  {
1416    b.add_overloaded_functions (group, MODE_none);
1417    build_all (b, "v0,v0,su0", group, MODE_none);
1418  }
1419
1420  tree
1421  resolve (function_resolver &r) const OVERRIDE
1422  {
1423    unsigned int i, nargs;
1424    type_suffix_index type;
1425    if (!r.check_gp_argument (2, i, nargs)
1426	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1427	|| !r.require_derived_scalar_type (i + 1, TYPE_unsigned))
1428      return error_mark_node;
1429
1430    return r.resolve_to (r.mode_suffix_id, type);
1431  }
1432};
1433SHAPE (binary_uint_n)
1434
1435/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint>_t)
1436   sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:uint>_t)
1437
1438   i.e. a version of the standard binary shape binary_opt_n in which
1439   the final argument is always an unsigned integer.  */
1440struct binary_uint_opt_n_def : public overloaded_base<0>
1441{
1442  void
1443  build (function_builder &b, const function_group_info &group) const OVERRIDE
1444  {
1445    b.add_overloaded_functions (group, MODE_none);
1446    build_all (b, "v0,v0,vu0", group, MODE_none);
1447    build_all (b, "v0,v0,su0", group, MODE_n);
1448  }
1449
1450  tree
1451  resolve (function_resolver &r) const OVERRIDE
1452  {
1453    unsigned int i, nargs;
1454    type_suffix_index type;
1455    if (!r.check_gp_argument (2, i, nargs)
1456	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1457      return error_mark_node;
1458
1459    return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned);
1460  }
1461};
1462SHAPE (binary_uint_opt_n)
1463
1464/* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t).
1465
1466   i.e. a version of binary_n in which the final argument is always
1467   a 64-bit unsigned integer.  */
1468struct binary_uint64_n_def : public overloaded_base<0>
1469{
1470  void
1471  build (function_builder &b, const function_group_info &group) const OVERRIDE
1472  {
1473    b.add_overloaded_functions (group, MODE_none);
1474    build_all (b, "v0,v0,su64", group, MODE_none);
1475  }
1476
1477  tree
1478  resolve (function_resolver &r) const OVERRIDE
1479  {
1480    unsigned int i, nargs;
1481    type_suffix_index type;
1482    if (!r.check_gp_argument (2, i, nargs)
1483	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1484	|| !r.require_scalar_type (i + 1, "uint64_t"))
1485      return error_mark_node;
1486
1487    return r.resolve_to (r.mode_suffix_id, type);
1488  }
1489};
1490SHAPE (binary_uint64_n)
1491
1492/* sv<t0>_t svfoo[_t0](sv<t0>_t, svuint64_t)
1493   sv<t0>_t svfoo[_n_t0](sv<t0>_t, uint64_t)
1494
1495   i.e. a version of the standard binary shape binary_opt_n in which
1496   the final argument is always a uint64_t.  */
1497struct binary_uint64_opt_n_def : public overloaded_base<0>
1498{
1499  void
1500  build (function_builder &b, const function_group_info &group) const OVERRIDE
1501  {
1502    b.add_overloaded_functions (group, MODE_none);
1503    build_all (b, "v0,v0,vu64", group, MODE_none);
1504    build_all (b, "v0,v0,su64", group, MODE_n);
1505  }
1506
1507  tree
1508  resolve (function_resolver &r) const OVERRIDE
1509  {
1510    unsigned int i, nargs;
1511    type_suffix_index type;
1512    if (!r.check_gp_argument (2, i, nargs)
1513	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1514      return error_mark_node;
1515
1516    return r.finish_opt_n_resolution (i + 1, i, type, TYPE_unsigned, 64);
1517  }
1518};
1519SHAPE (binary_uint64_opt_n)
1520
1521/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t).  */
1522struct binary_wide_def : public overloaded_base<0>
1523{
1524  void
1525  build (function_builder &b, const function_group_info &group) const OVERRIDE
1526  {
1527    b.add_overloaded_functions (group, MODE_none);
1528    build_all (b, "v0,v0,vh0", group, MODE_none);
1529  }
1530
1531  tree
1532  resolve (function_resolver &r) const OVERRIDE
1533  {
1534    unsigned int i, nargs;
1535    type_suffix_index type;
1536    if (!r.check_gp_argument (2, i, nargs)
1537	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1538	|| !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
1539					   r.HALF_SIZE))
1540      return error_mark_node;
1541
1542    return r.resolve_to (r.mode_suffix_id, type);
1543  }
1544};
1545SHAPE (binary_wide)
1546
1547/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t)
1548   sv<t0>_t svfoo[_n_t0](sv<t0>_t, <t0:half>_t).  */
1549struct binary_wide_opt_n_def : public overloaded_base<0>
1550{
1551  void
1552  build (function_builder &b, const function_group_info &group) const OVERRIDE
1553  {
1554    b.add_overloaded_functions (group, MODE_none);
1555    build_all (b, "v0,v0,vh0", group, MODE_none);
1556    build_all (b, "v0,v0,sh0", group, MODE_n);
1557  }
1558
1559  tree
1560  resolve (function_resolver &r) const OVERRIDE
1561  {
1562    unsigned int i, nargs;
1563    type_suffix_index type;
1564    if (!r.check_gp_argument (2, i, nargs)
1565	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1566      return error_mark_node;
1567
1568    return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS,
1569				      r.HALF_SIZE);
1570  }
1571};
1572SHAPE (binary_wide_opt_n)
1573
1574/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1575   <t0>_t svfoo[_n_t0](<t0>_t, sv<t0>_t).  */
1576struct clast_def : public overloaded_base<0>
1577{
1578  void
1579  build (function_builder &b, const function_group_info &group) const OVERRIDE
1580  {
1581    b.add_overloaded_functions (group, MODE_none);
1582    build_all (b, "v0,v0,v0", group, MODE_none);
1583    build_all (b, "s0,s0,v0", group, MODE_n);
1584  }
1585
1586  tree
1587  resolve (function_resolver &r) const OVERRIDE
1588  {
1589    unsigned int i, nargs;
1590    if (!r.check_gp_argument (2, i, nargs)
1591	|| !r.require_vector_or_scalar_type (i))
1592      return error_mark_node;
1593
1594    if (r.scalar_argument_p (i))
1595      {
1596	type_suffix_index type;
1597	if (!r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1598	    || (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1599	  return error_mark_node;
1600	return r.resolve_to (MODE_n, type);
1601      }
1602    else
1603      {
1604	type_suffix_index type;
1605	if ((type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1606	    || !r.require_matching_vector_type (i + 1, type))
1607	  return error_mark_node;
1608	return r.resolve_to (MODE_none, type);
1609      }
1610  }
1611};
1612SHAPE (clast)
1613
1614/* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t).  */
1615struct compare_def : public overloaded_base<0>
1616{
1617  void
1618  build (function_builder &b, const function_group_info &group) const OVERRIDE
1619  {
1620    b.add_overloaded_functions (group, MODE_none);
1621    build_all (b, "vp,v0,v0", group, MODE_none);
1622  }
1623
1624  tree
1625  resolve (function_resolver &r) const OVERRIDE
1626  {
1627    return r.resolve_uniform (2);
1628  }
1629};
1630SHAPE (compare)
1631
1632/* svbool_t svfoo[_t0](sv<t0>_t, sv<t0>_t)
1633   svbool_t svfoo[_n_t0](sv<t0>_t, <t0>_t)
1634
1635   i.e. a comparison between two vectors, or between a vector and a scalar.  */
1636struct compare_opt_n_def : public overloaded_base<0>
1637{
1638  void
1639  build (function_builder &b, const function_group_info &group) const OVERRIDE
1640  {
1641    b.add_overloaded_functions (group, MODE_none);
1642    build_all (b, "vp,v0,v0", group, MODE_none);
1643    build_all (b, "vp,v0,s0", group, MODE_n);
1644  }
1645
1646  tree
1647  resolve (function_resolver &r) const OVERRIDE
1648  {
1649    return r.resolve_uniform_opt_n (2);
1650  }
1651};
1652SHAPE (compare_opt_n)
1653
1654/* svbool_t svfoo[_t0](const <t0>_t *, const <t0>_t *).  */
1655struct compare_ptr_def : public overloaded_base<0>
1656{
1657  void
1658  build (function_builder &b, const function_group_info &group) const OVERRIDE
1659  {
1660    b.add_overloaded_functions (group, MODE_none);
1661    build_all (b, "vp,al,al", group, MODE_none);
1662  }
1663
1664  tree
1665  resolve (function_resolver &r) const OVERRIDE
1666  {
1667    unsigned int i, nargs;
1668    type_suffix_index type;
1669    if (!r.check_gp_argument (2, i, nargs)
1670	|| (type = r.infer_pointer_type (i)) == NUM_TYPE_SUFFIXES
1671	|| !r.require_matching_pointer_type (i + 1, i, type))
1672      return error_mark_node;
1673
1674    return r.resolve_to (r.mode_suffix_id, type);
1675  }
1676};
1677SHAPE (compare_ptr)
1678
1679/* svbool_t svfoo_t0[_t1](<t1>_t, <t1>_t)
1680
1681   where _t0 is a _b<bits> suffix that describes the predicate result.
1682   There is no direct relationship between the element sizes of _t0
1683   and _t1.  */
1684struct compare_scalar_def : public overloaded_base<1>
1685{
1686  void
1687  build (function_builder &b, const function_group_info &group) const OVERRIDE
1688  {
1689    b.add_overloaded_functions (group, MODE_none);
1690    build_all (b, "vp,s1,s1", group, MODE_none);
1691  }
1692
1693  tree
1694  resolve (function_resolver &r) const OVERRIDE
1695  {
1696    unsigned int i, nargs;
1697    type_suffix_index type;
1698    if (!r.check_gp_argument (2, i, nargs)
1699	|| (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
1700	|| !r.require_matching_integer_scalar_type (i + 1, i, type))
1701      return error_mark_node;
1702
1703    return r.resolve_to (r.mode_suffix_id, r.type_suffix_ids[0], type);
1704  }
1705};
1706SHAPE (compare_scalar)
1707
1708/* svbool_t svfoo[_t0](sv<t0>_t, svint64_t)  (for signed t0)
1709   svbool_t svfoo[_n_t0](sv<t0>_t, int64_t)  (for signed t0)
1710   svbool_t svfoo[_t0](sv<t0>_t, svuint64_t)  (for unsigned t0)
1711   svbool_t svfoo[_n_t0](sv<t0>_t, uint64_t)  (for unsigned t0)
1712
1713   i.e. a comparison in which the second argument is 64 bits.  */
1714struct compare_wide_opt_n_def : public overloaded_base<0>
1715{
1716  void
1717  build (function_builder &b, const function_group_info &group) const OVERRIDE
1718  {
1719    b.add_overloaded_functions (group, MODE_none);
1720    build_all (b, "vp,v0,vw0", group, MODE_none);
1721    build_all (b, "vp,v0,sw0", group, MODE_n);
1722  }
1723
1724  tree
1725  resolve (function_resolver &r) const OVERRIDE
1726  {
1727    unsigned int i, nargs;
1728    type_suffix_index type;
1729    if (!r.check_gp_argument (2, i, nargs)
1730	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
1731      return error_mark_node;
1732
1733    return r.finish_opt_n_resolution (i + 1, i, type, r.SAME_TYPE_CLASS, 64);
1734  }
1735};
1736SHAPE (compare_wide_opt_n)
1737
1738/* uint64_t svfoo().  */
1739struct count_inherent_def : public nonoverloaded_base
1740{
1741  void
1742  build (function_builder &b, const function_group_info &group) const OVERRIDE
1743  {
1744    build_all (b, "su64", group, MODE_none);
1745  }
1746};
1747SHAPE (count_inherent)
1748
1749/* uint64_t svfoo(enum svpattern).  */
1750struct count_pat_def : public nonoverloaded_base
1751{
1752  void
1753  build (function_builder &b, const function_group_info &group) const OVERRIDE
1754  {
1755    build_all (b, "su64,epattern", group, MODE_none);
1756  }
1757};
1758SHAPE (count_pat)
1759
1760/* uint64_t svfoo(svbool_t).  */
1761struct count_pred_def : public nonoverloaded_base
1762{
1763  void
1764  build (function_builder &b, const function_group_info &group) const OVERRIDE
1765  {
1766    build_all (b, "su64,vp", group, MODE_none);
1767  }
1768};
1769SHAPE (count_pred)
1770
1771/* uint64_t svfoo[_t0](sv<t0>_t).  */
1772struct count_vector_def : public overloaded_base<0>
1773{
1774  void
1775  build (function_builder &b, const function_group_info &group) const OVERRIDE
1776  {
1777    b.add_overloaded_functions (group, MODE_none);
1778    build_all (b, "su64,v0", group, MODE_none);
1779  }
1780
1781  tree
1782  resolve (function_resolver &r) const OVERRIDE
1783  {
1784    return r.resolve_uniform (1);
1785  }
1786};
1787SHAPE (count_vector)
1788
1789/* sv<t0>xN_t svfoo[_t0](sv<t0>_t, ..., sv<t0>_t)
1790
1791   where there are N arguments in total.  */
1792struct create_def : public overloaded_base<0>
1793{
1794  void
1795  build (function_builder &b, const function_group_info &group) const OVERRIDE
1796  {
1797    b.add_overloaded_functions (group, MODE_none);
1798    build_all (b, "t0,v0*t", group, MODE_none);
1799  }
1800
1801  tree
1802  resolve (function_resolver &r) const OVERRIDE
1803  {
1804    return r.resolve_uniform (r.vectors_per_tuple ());
1805  }
1806};
1807SHAPE (create)
1808
1809/* sv<t0>_t svfoo[_n]_t0(<t0>_t, ..., <t0>_t)
1810
1811   where there are enough arguments to fill 128 bits of data (or to
1812   control 128 bits of data in the case of predicates).  */
1813struct dupq_def : public overloaded_base<1>
1814{
1815  void
1816  build (function_builder &b, const function_group_info &group) const OVERRIDE
1817  {
1818    /* The "_n" suffix is optional; the full name has it, but the short
1819       name doesn't.  */
1820    build_all (b, "v0,s0*q", group, MODE_n, true);
1821  }
1822
1823  tree
1824  resolve (function_resolver &) const OVERRIDE
1825  {
1826    /* The short forms just make "_n" implicit, so no resolution is needed.  */
1827    gcc_unreachable ();
1828  }
1829};
1830SHAPE (dupq)
1831
1832/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, uint64_t)
1833
1834   where the final argument is an integer constant expression that when
1835   multiplied by the number of bytes in t0 is in the range [0, 255].  */
1836struct ext_def : public overloaded_base<0>
1837{
1838  void
1839  build (function_builder &b, const function_group_info &group) const OVERRIDE
1840  {
1841    b.add_overloaded_functions (group, MODE_none);
1842    build_all (b, "v0,v0,v0,su64", group, MODE_none);
1843  }
1844
1845  tree
1846  resolve (function_resolver &r) const OVERRIDE
1847  {
1848    return r.resolve_uniform (2, 1);
1849  }
1850
1851  bool
1852  check (function_checker &c) const OVERRIDE
1853  {
1854    unsigned int bytes = c.type_suffix (0).element_bytes;
1855    return c.require_immediate_range (2, 0, 256 / bytes - 1);
1856  }
1857};
1858SHAPE (ext)
1859
1860/* <t0>_t svfoo[_t0](<t0>_t, sv<t0>_t).  */
1861struct fold_left_def : public overloaded_base<0>
1862{
1863  void
1864  build (function_builder &b, const function_group_info &group) const OVERRIDE
1865  {
1866    b.add_overloaded_functions (group, MODE_none);
1867    build_all (b, "s0,s0,v0", group, MODE_none);
1868  }
1869
1870  tree
1871  resolve (function_resolver &r) const OVERRIDE
1872  {
1873    unsigned int i, nargs;
1874    type_suffix_index type;
1875    if (!r.check_gp_argument (2, i, nargs)
1876	|| !r.require_derived_scalar_type (i, r.SAME_TYPE_CLASS)
1877	|| (type = r.infer_vector_type (i + 1)) == NUM_TYPE_SUFFIXES)
1878      return error_mark_node;
1879
1880    return r.resolve_to (r.mode_suffix_id, type);
1881  }
1882};
1883SHAPE (fold_left)
1884
1885/* sv<t0>_t svfoo[_t0](sv<t0>xN_t, uint64_t)
1886
1887   where the final argument is an integer constant expression in
1888   the range [0, N - 1].  */
1889struct get_def : public overloaded_base<0>
1890{
1891  void
1892  build (function_builder &b, const function_group_info &group) const OVERRIDE
1893  {
1894    b.add_overloaded_functions (group, MODE_none);
1895    build_all (b, "v0,t0,su64", group, MODE_none);
1896  }
1897
1898  tree
1899  resolve (function_resolver &r) const OVERRIDE
1900  {
1901    unsigned int i, nargs;
1902    type_suffix_index type;
1903    if (!r.check_gp_argument (2, i, nargs)
1904	|| (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
1905	|| !r.require_integer_immediate (i + 1))
1906      return error_mark_node;
1907
1908    return r.resolve_to (r.mode_suffix_id, type);
1909  }
1910
1911  bool
1912  check (function_checker &c) const OVERRIDE
1913  {
1914    unsigned int nvectors = c.vectors_per_tuple ();
1915    return c.require_immediate_range (1, 0, nvectors - 1);
1916  }
1917};
1918SHAPE (get)
1919
1920/* sv<t0>_t svfoo[_t0](sv<t0>_t, uint64_t)
1921   <t0>_t svfoo[_n_t0](<t0>_t, uint64_t)
1922
1923   where the t0 in the vector form is a signed or unsigned integer
1924   whose size is tied to the [bhwd] suffix of "svfoo".  */
1925struct inc_dec_def : public inc_dec_base
1926{
1927  CONSTEXPR inc_dec_def () : inc_dec_base (false) {}
1928
1929  void
1930  build (function_builder &b, const function_group_info &group) const OVERRIDE
1931  {
1932    b.add_overloaded_functions (group, MODE_none);
1933    /* These functions are unusual in that the type suffixes for
1934       the scalar and vector forms are not related.  The vector
1935       form always has exactly two potential suffixes while the
1936       scalar form always has four.  */
1937    if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1938      build_all (b, "v0,v0,su64", group, MODE_none);
1939    else
1940      build_all (b, "s0,s0,su64", group, MODE_n);
1941  }
1942};
1943SHAPE (inc_dec)
1944
1945/* sv<t0>_t svfoo[_t0](sv<t0>_t, enum svpattern, uint64_t)
1946   <t0>_t svfoo[_n_t0](<t0>_t, enum svpattern, uint64_t)
1947
1948   where the t0 in the vector form is a signed or unsigned integer
1949   whose size is tied to the [bhwd] suffix of "svfoo".  */
1950struct inc_dec_pat_def : public inc_dec_base
1951{
1952  CONSTEXPR inc_dec_pat_def () : inc_dec_base (true) {}
1953
1954  void
1955  build (function_builder &b, const function_group_info &group) const OVERRIDE
1956  {
1957    b.add_overloaded_functions (group, MODE_none);
1958    /* These functions are unusual in that the type suffixes for
1959       the scalar and vector forms are not related.  The vector
1960       form always has exactly two potential suffixes while the
1961       scalar form always has four.  */
1962    if (group.types[2][0] == NUM_TYPE_SUFFIXES)
1963      build_all (b, "v0,v0,epattern,su64", group, MODE_none);
1964    else
1965      build_all (b, "s0,s0,epattern,su64", group, MODE_n);
1966  }
1967};
1968SHAPE (inc_dec_pat)
1969
1970/* sv<t0>_t svfoo[_t0](sv<t0>_t, svbool_t).  */
1971struct inc_dec_pred_def : public overloaded_base<0>
1972{
1973  void
1974  build (function_builder &b, const function_group_info &group) const OVERRIDE
1975  {
1976    b.add_overloaded_functions (group, MODE_none);
1977    build_all (b, "v0,v0,vp", group, MODE_none);
1978  }
1979
1980  tree
1981  resolve (function_resolver &r) const OVERRIDE
1982  {
1983    unsigned int i, nargs;
1984    type_suffix_index type;
1985    if (!r.check_gp_argument (2, i, nargs)
1986	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
1987	|| !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
1988      return error_mark_node;
1989
1990    return r.resolve_to (r.mode_suffix_id, type);
1991  }
1992};
1993SHAPE (inc_dec_pred)
1994
1995/* <t0>_t svfoo[_n_t0]_t1(<t0>_t, svbool_t)
1996
1997   where _t1 is a _b<bits> suffix that describes the svbool_t argument.  */
1998struct inc_dec_pred_scalar_def : public overloaded_base<2>
1999{
2000  void
2001  build (function_builder &b, const function_group_info &group) const OVERRIDE
2002  {
2003    b.add_overloaded_functions (group, MODE_n);
2004    build_all (b, "s0,s0,vp", group, MODE_n);
2005  }
2006
2007  tree
2008  resolve (function_resolver &r) const OVERRIDE
2009  {
2010    unsigned int i, nargs;
2011    type_suffix_index type;
2012    if (!r.check_gp_argument (2, i, nargs)
2013	|| (type = r.infer_integer_scalar_type (i)) == NUM_TYPE_SUFFIXES
2014	|| !r.require_vector_type (i + 1, VECTOR_TYPE_svbool_t))
2015      return error_mark_node;
2016
2017    return r.resolve_to (r.mode_suffix_id, type, r.type_suffix_ids[1]);
2018  }
2019};
2020SHAPE (inc_dec_pred_scalar)
2021
2022/* sv<t0>[xN]_t svfoo_t0().  */
2023struct inherent_def : public nonoverloaded_base
2024{
2025  void
2026  build (function_builder &b, const function_group_info &group) const OVERRIDE
2027  {
2028    build_all (b, "t0", group, MODE_none);
2029  }
2030};
2031SHAPE (inherent)
2032
2033/* svbool_t svfoo[_b]().  */
2034struct inherent_b_def : public overloaded_base<0>
2035{
2036  void
2037  build (function_builder &b, const function_group_info &group) const OVERRIDE
2038  {
2039    /* The "_b" suffix is optional; the full name has it, but the short
2040       name doesn't.  */
2041    build_all (b, "v0", group, MODE_none, true);
2042  }
2043
2044  tree
2045  resolve (function_resolver &) const OVERRIDE
2046  {
2047    /* The short forms just make "_b" implicit, so no resolution is needed.  */
2048    gcc_unreachable ();
2049  }
2050};
2051SHAPE (inherent_b)
2052
2053/* sv<t0>[xN]_t svfoo[_t0](const <t0>_t *)
2054   sv<t0>[xN]_t svfoo_vnum[_t0](const <t0>_t *, int64_t).  */
2055struct load_def : public load_contiguous_base
2056{
2057  void
2058  build (function_builder &b, const function_group_info &group) const OVERRIDE
2059  {
2060    b.add_overloaded_functions (group, MODE_none);
2061    b.add_overloaded_functions (group, MODE_vnum);
2062    build_all (b, "t0,al", group, MODE_none);
2063    build_all (b, "t0,al,ss64", group, MODE_vnum);
2064  }
2065};
2066SHAPE (load)
2067
2068/* sv<t0>_t svfoo_t0(const <X>_t *)
2069   sv<t0>_t svfoo_vnum_t0(const <X>_t *, int64_t)
2070
2071   where <X> is determined by the function base name.  */
2072struct load_ext_def : public nonoverloaded_base
2073{
2074  void
2075  build (function_builder &b, const function_group_info &group) const OVERRIDE
2076  {
2077    build_all (b, "t0,al", group, MODE_none);
2078    build_all (b, "t0,al,ss64", group, MODE_vnum);
2079  }
2080};
2081SHAPE (load_ext)
2082
2083/* sv<t0>_t svfoo_[s32]index_t0(const <X>_t *, svint32_t)
2084   sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
2085   sv<t0>_t svfoo_[u32]index_t0(const <X>_t *, svuint32_t)
2086   sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
2087
2088   sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2089   sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2090
2091   where <X> is determined by the function base name.  */
2092struct load_ext_gather_index_def : public load_ext_gather_base
2093{
2094  void
2095  build (function_builder &b, const function_group_info &group) const OVERRIDE
2096  {
2097    b.add_overloaded_functions (group, MODE_index);
2098    build_sv_index (b, "t0,al,d", group);
2099    build_vs_index (b, "t0,b,ss64", group);
2100  }
2101};
2102SHAPE (load_ext_gather_index)
2103
2104/* sv<t0>_t svfoo_[s64]index_t0(const <X>_t *, svint64_t)
2105   sv<t0>_t svfoo_[u64]index_t0(const <X>_t *, svuint64_t)
2106
2107   sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2108   sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2109
2110   where <X> is determined by the function base name.  This is
2111   load_ext_gather_index that doesn't support 32-bit vector indices.  */
2112struct load_ext_gather_index_restricted_def : public load_ext_gather_base
2113{
2114  void
2115  build (function_builder &b, const function_group_info &group) const OVERRIDE
2116  {
2117    b.add_overloaded_functions (group, MODE_index);
2118    build_sv_index64 (b, "t0,al,d", group);
2119    build_vs_index (b, "t0,b,ss64", group);
2120  }
2121};
2122SHAPE (load_ext_gather_index_restricted)
2123
2124/* sv<t0>_t svfoo_[s32]offset_t0(const <X>_t *, svint32_t)
2125   sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2126   sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2127   sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2128
2129   sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2130   sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2131
2132   sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2133   sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2134
2135   where <X> is determined by the function base name.  */
2136struct load_ext_gather_offset_def : public load_ext_gather_base
2137{
2138  void
2139  build (function_builder &b, const function_group_info &group) const OVERRIDE
2140  {
2141    b.add_overloaded_functions (group, MODE_offset);
2142    build_sv_offset (b, "t0,al,d", group);
2143    build_v_base (b, "t0,b", group, true);
2144    build_vs_offset (b, "t0,b,ss64", group);
2145  }
2146};
2147SHAPE (load_ext_gather_offset)
2148
2149/* sv<t0>_t svfoo_[s64]offset_t0(const <X>_t *, svint64_t)
2150   sv<t0>_t svfoo_[u32]offset_t0(const <X>_t *, svuint32_t)
2151   sv<t0>_t svfoo_[u64]offset_t0(const <X>_t *, svuint64_t)
2152
2153   sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2154   sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2155
2156   sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2157   sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t)
2158
2159   where <X> is determined by the function base name.  This is
2160   load_ext_gather_offset without the s32 vector offset form.  */
2161struct load_ext_gather_offset_restricted_def : public load_ext_gather_base
2162{
2163  void
2164  build (function_builder &b, const function_group_info &group) const OVERRIDE
2165  {
2166    b.add_overloaded_functions (group, MODE_offset);
2167    build_sv_uint_offset (b, "t0,al,d", group);
2168    build_v_base (b, "t0,b", group, true);
2169    build_vs_offset (b, "t0,b,ss64", group);
2170  }
2171};
2172SHAPE (load_ext_gather_offset_restricted)
2173
2174/* sv<t0>_t svfoo_[s32]index[_t0](const <t0>_t *, svint32_t)
2175   sv<t0>_t svfoo_[s64]index[_t0](const <t0>_t *, svint64_t)
2176   sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2177   sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2178
2179   sv<t0>_t svfoo_[s32]offset[_t0](const <t0>_t *, svint32_t)
2180   sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2181   sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2182   sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t).  */
2183struct load_gather_sv_def : public load_gather_sv_base
2184{
2185  void
2186  build (function_builder &b, const function_group_info &group) const OVERRIDE
2187  {
2188    b.add_overloaded_functions (group, MODE_index);
2189    b.add_overloaded_functions (group, MODE_offset);
2190    build_sv_index (b, "t0,al,d", group);
2191    build_sv_offset (b, "t0,al,d", group);
2192  }
2193};
2194SHAPE (load_gather_sv)
2195
2196/* sv<t0>_t svfoo_[u32]index[_t0](const <t0>_t *, svuint32_t)
2197   sv<t0>_t svfoo_[u64]index[_t0](const <t0>_t *, svuint64_t)
2198
2199   sv<t0>_t svfoo_[s64]offset[_t0](const <t0>_t *, svint64_t)
2200   sv<t0>_t svfoo_[u32]offset[_t0](const <t0>_t *, svuint32_t)
2201   sv<t0>_t svfoo_[u64]offset[_t0](const <t0>_t *, svuint64_t)
2202
2203   This is load_gather_sv without the 32-bit vector index forms and
2204   without the s32 vector offset form.  */
2205struct load_gather_sv_restricted_def : public load_gather_sv_base
2206{
2207  void
2208  build (function_builder &b, const function_group_info &group) const OVERRIDE
2209  {
2210    b.add_overloaded_functions (group, MODE_index);
2211    b.add_overloaded_functions (group, MODE_offset);
2212    build_sv_index64 (b, "t0,al,d", group);
2213    build_sv_uint_offset (b, "t0,al,d", group);
2214  }
2215};
2216SHAPE (load_gather_sv_restricted)
2217
2218/* sv<t0>_t svfoo[_u32base]_t0(svuint32_t)
2219   sv<t0>_t svfoo[_u64base]_t0(svuint64_t)
2220
2221   sv<t0>_t svfoo[_u32base]_index_t0(svuint32_t, int64_t)
2222   sv<t0>_t svfoo[_u64base]_index_t0(svuint64_t, int64_t)
2223
2224   sv<t0>_t svfoo[_u32base]_offset_t0(svuint32_t, int64_t)
2225   sv<t0>_t svfoo[_u64base]_offset_t0(svuint64_t, int64_t).  */
2226struct load_gather_vs_def : public overloaded_base<1>
2227{
2228  void
2229  build (function_builder &b, const function_group_info &group) const OVERRIDE
2230  {
2231    /* The base vector mode is optional; the full name has it but the
2232       short name doesn't.  There is no ambiguity with SHAPE_load_gather_sv
2233       because the latter uses an implicit type suffix.  */
2234    build_v_base (b, "t0,b", group, true);
2235    build_vs_index (b, "t0,b,ss64", group, true);
2236    build_vs_offset (b, "t0,b,ss64", group, true);
2237  }
2238
2239  tree
2240  resolve (function_resolver &) const OVERRIDE
2241  {
2242    /* The short name just makes the base vector mode implicit;
2243       no resolution is needed.  */
2244    gcc_unreachable ();
2245  }
2246};
2247SHAPE (load_gather_vs)
2248
2249/* sv<t0>_t svfoo[_t0](const <t0>_t *)
2250
2251   The only difference from "load" is that this shape has no vnum form.  */
2252struct load_replicate_def : public load_contiguous_base
2253{
2254  void
2255  build (function_builder &b, const function_group_info &group) const OVERRIDE
2256  {
2257    b.add_overloaded_functions (group, MODE_none);
2258    build_all (b, "t0,al", group, MODE_none);
2259  }
2260};
2261SHAPE (load_replicate)
2262
2263/* svbool_t svfoo(enum svpattern).  */
2264struct pattern_pred_def : public nonoverloaded_base
2265{
2266  void
2267  build (function_builder &b, const function_group_info &group) const OVERRIDE
2268  {
2269    build_all (b, "vp,epattern", group, MODE_none);
2270  }
2271};
2272SHAPE (pattern_pred)
2273
2274/* void svfoo(const void *, svprfop)
2275   void svfoo_vnum(const void *, int64_t, svprfop).  */
2276struct prefetch_def : public nonoverloaded_base
2277{
2278  void
2279  build (function_builder &b, const function_group_info &group) const OVERRIDE
2280  {
2281    build_all (b, "_,ap,eprfop", group, MODE_none);
2282    build_all (b, "_,ap,ss64,eprfop", group, MODE_vnum);
2283  }
2284};
2285SHAPE (prefetch)
2286
2287/* void svfoo_[s32]index(const void *, svint32_t, svprfop)
2288   void svfoo_[s64]index(const void *, svint64_t, svprfop)
2289   void svfoo_[u32]index(const void *, svuint32_t, svprfop)
2290   void svfoo_[u64]index(const void *, svuint64_t, svprfop)
2291
2292   void svfoo[_u32base](svuint32_t, svprfop)
2293   void svfoo[_u64base](svuint64_t, svprfop)
2294
2295   void svfoo[_u32base]_index(svuint32_t, int64_t, svprfop)
2296   void svfoo[_u64base]_index(svuint64_t, int64_t, svprfop).  */
2297struct prefetch_gather_index_def : public prefetch_gather_base
2298{
2299  void
2300  build (function_builder &b, const function_group_info &group) const OVERRIDE
2301  {
2302    b.add_overloaded_functions (group, MODE_none);
2303    b.add_overloaded_functions (group, MODE_index);
2304    build_sv_index (b, "_,ap,d,eprfop", group);
2305    build_v_base (b, "_,b,eprfop", group);
2306    build_vs_index (b, "_,b,ss64,eprfop", group);
2307  }
2308};
2309SHAPE (prefetch_gather_index)
2310
2311/* void svfoo_[s32]offset(const void *, svint32_t, svprfop)
2312   void svfoo_[s64]offset(const void *, svint64_t, svprfop)
2313   void svfoo_[u32]offset(const void *, svuint32_t, svprfop)
2314   void svfoo_[u64]offset(const void *, svuint64_t, svprfop)
2315
2316   void svfoo[_u32base](svuint32_t, svprfop)
2317   void svfoo[_u64base](svuint64_t, svprfop)
2318
2319   void svfoo[_u32base]_offset(svuint32_t, int64_t, svprfop)
2320   void svfoo[_u64base]_offset(svuint64_t, int64_t, svprfop).  */
2321struct prefetch_gather_offset_def : public prefetch_gather_base
2322{
2323  void
2324  build (function_builder &b, const function_group_info &group) const OVERRIDE
2325  {
2326    b.add_overloaded_functions (group, MODE_none);
2327    b.add_overloaded_functions (group, MODE_offset);
2328    build_sv_offset (b, "_,ap,d,eprfop", group);
2329    build_v_base (b, "_,b,eprfop", group);
2330    build_vs_offset (b, "_,b,ss64,eprfop", group);
2331  }
2332};
2333SHAPE (prefetch_gather_offset)
2334
2335/* bool svfoo(svbool_t).  */
2336struct ptest_def : public nonoverloaded_base
2337{
2338  void
2339  build (function_builder &b, const function_group_info &group) const OVERRIDE
2340  {
2341    build_all (b, "sp,vp", group, MODE_none);
2342  }
2343};
2344SHAPE (ptest)
2345
2346/* svbool_t svfoo().  */
2347struct rdffr_def : public nonoverloaded_base
2348{
2349  void
2350  build (function_builder &b, const function_group_info &group) const OVERRIDE
2351  {
2352    build_all (b, "vp", group, MODE_none);
2353  }
2354};
2355SHAPE (rdffr)
2356
2357/* <t0>_t svfoo[_t0](sv<t0>_t).  */
2358struct reduction_def : public overloaded_base<0>
2359{
2360  void
2361  build (function_builder &b, const function_group_info &group) const OVERRIDE
2362  {
2363    b.add_overloaded_functions (group, MODE_none);
2364    build_all (b, "s0,v0", group, MODE_none);
2365  }
2366
2367  tree
2368  resolve (function_resolver &r) const OVERRIDE
2369  {
2370    return r.resolve_uniform (1);
2371  }
2372};
2373SHAPE (reduction)
2374
2375/* int64_t svfoo[_t0](sv<t0>_t)  (for signed t0)
2376   uint64_t svfoo[_t0](sv<t0>_t)  (for unsigned t0)
2377   <t0>_t svfoo[_t0](sv<t0>_t)  (for floating-point t0)
2378
2379   i.e. a version of "reduction" in which the return type for integers
2380   always has 64 bits.  */
2381struct reduction_wide_def : public overloaded_base<0>
2382{
2383  void
2384  build (function_builder &b, const function_group_info &group) const OVERRIDE
2385  {
2386    b.add_overloaded_functions (group, MODE_none);
2387    build_all (b, "sw0,v0", group, MODE_none);
2388  }
2389
2390  tree
2391  resolve (function_resolver &r) const OVERRIDE
2392  {
2393    return r.resolve_uniform (1);
2394  }
2395};
2396SHAPE (reduction_wide)
2397
2398/* sv<t0>xN_t svfoo[_t0](sv<t0>xN_t, uint64_t, sv<t0>_t)
2399
2400   where the second argument is an integer constant expression in the
2401   range [0, N - 1].  */
2402struct set_def : public overloaded_base<0>
2403{
2404  void
2405  build (function_builder &b, const function_group_info &group) const OVERRIDE
2406  {
2407    b.add_overloaded_functions (group, MODE_none);
2408    build_all (b, "t0,t0,su64,v0", group, MODE_none);
2409  }
2410
2411  tree
2412  resolve (function_resolver &r) const OVERRIDE
2413  {
2414    unsigned int i, nargs;
2415    type_suffix_index type;
2416    if (!r.check_gp_argument (3, i, nargs)
2417	|| (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2418	|| !r.require_integer_immediate (i + 1)
2419	|| !r.require_derived_vector_type (i + 2, i, type))
2420      return error_mark_node;
2421
2422    return r.resolve_to (r.mode_suffix_id, type);
2423  }
2424
2425  bool
2426  check (function_checker &c) const OVERRIDE
2427  {
2428    unsigned int nvectors = c.vectors_per_tuple ();
2429    return c.require_immediate_range (1, 0, nvectors - 1);
2430  }
2431};
2432SHAPE (set)
2433
2434/* void svfoo().  */
2435struct setffr_def : public nonoverloaded_base
2436{
2437  void
2438  build (function_builder &b, const function_group_info &group) const OVERRIDE
2439  {
2440    build_all (b, "_", group, MODE_none);
2441  }
2442};
2443SHAPE (setffr)
2444
2445/* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2446
2447   where the final argument must be an integer constant expression in the
2448   range [0, sizeof (<t0>_t) * 8 - 1].  */
2449struct shift_left_imm_def : public overloaded_base<0>
2450{
2451  void
2452  build (function_builder &b, const function_group_info &group) const OVERRIDE
2453  {
2454    b.add_overloaded_functions (group, MODE_n);
2455    build_all (b, "v0,v0,su64", group, MODE_n);
2456  }
2457
2458  tree
2459  resolve (function_resolver &r) const OVERRIDE
2460  {
2461    return r.resolve_uniform (1, 1);
2462  }
2463
2464  bool
2465  check (function_checker &c) const OVERRIDE
2466  {
2467    unsigned int bits = c.type_suffix (0).element_bits;
2468    return c.require_immediate_range (1, 0, bits - 1);
2469  }
2470};
2471SHAPE (shift_left_imm)
2472
2473/* sv<t0>_t svfoo[_n_t0])(sv<t0:half>_t, uint64_t)
2474
2475   where the final argument must be an integer constant expression in the
2476   range [0, sizeof (<t0>_t) * 4 - 1].  */
2477struct shift_left_imm_long_def : public binary_imm_long_base
2478{
2479  bool
2480  check (function_checker &c) const OVERRIDE
2481  {
2482    unsigned int bits = c.type_suffix (0).element_bits / 2;
2483    return c.require_immediate_range (1, 0, bits - 1);
2484  }
2485};
2486SHAPE (shift_left_imm_long)
2487
2488/* sv<t0:uint>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2489
2490   where the final argument must be an integer constant expression in the
2491   range [0, sizeof (<t0>_t) * 8 - 1].  */
2492struct shift_left_imm_to_uint_def : public shift_left_imm_def
2493{
2494  void
2495  build (function_builder &b, const function_group_info &group) const OVERRIDE
2496  {
2497    b.add_overloaded_functions (group, MODE_n);
2498    build_all (b, "vu0,v0,su64", group, MODE_n);
2499  }
2500};
2501SHAPE (shift_left_imm_to_uint)
2502
2503/* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2504
2505   where the final argument must be an integer constant expression in the
2506   range [1, sizeof (<t0>_t) * 8].  */
2507struct shift_right_imm_def : public overloaded_base<0>
2508{
2509  void
2510  build (function_builder &b, const function_group_info &group) const OVERRIDE
2511  {
2512    b.add_overloaded_functions (group, MODE_n);
2513    build_all (b, "v0,v0,su64", group, MODE_n);
2514  }
2515
2516  tree
2517  resolve (function_resolver &r) const OVERRIDE
2518  {
2519    return r.resolve_uniform (1, 1);
2520  }
2521
2522  bool
2523  check (function_checker &c) const OVERRIDE
2524  {
2525    unsigned int bits = c.type_suffix (0).element_bits;
2526    return c.require_immediate_range (1, 1, bits);
2527  }
2528};
2529SHAPE (shift_right_imm)
2530
2531/* sv<t0:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2532
2533   where the final argument must be an integer constant expression in the
2534   range [1, sizeof (<t0>_t) * 4].  */
2535typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base<>, 1>
2536  shift_right_imm_narrowb_def;
2537SHAPE (shift_right_imm_narrowb)
2538
2539/* sv<t0:half>_t svfoo[_n_t0])(sv<t0:half>_t, sv<t0>_t, uint64_t)
2540
2541   where the final argument must be an integer constant expression in the
2542   range [1, sizeof (<t0>_t) * 4].  */
2543typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base<>, 2>
2544  shift_right_imm_narrowt_def;
2545SHAPE (shift_right_imm_narrowt)
2546
2547/* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0>_t, uint64_t)
2548
2549   where the final argument must be an integer constant expression in the
2550   range [1, sizeof (<t0>_t) * 4].  */
2551typedef binary_imm_narrowb_base<TYPE_unsigned>
2552  binary_imm_narrowb_base_unsigned;
2553typedef shift_right_imm_narrow_wrapper<binary_imm_narrowb_base_unsigned, 1>
2554  shift_right_imm_narrowb_to_uint_def;
2555SHAPE (shift_right_imm_narrowb_to_uint)
2556
2557/* sv<t0:uint:half>_t svfoo[_n_t0])(sv<t0:uint:half>_t, sv<t0>_t, uint64_t)
2558
2559   where the final argument must be an integer constant expression in the
2560   range [1, sizeof (<t0>_t) * 4].  */
2561typedef binary_imm_narrowt_base<TYPE_unsigned>
2562  binary_imm_narrowt_base_unsigned;
2563typedef shift_right_imm_narrow_wrapper<binary_imm_narrowt_base_unsigned, 2>
2564  shift_right_imm_narrowt_to_uint_def;
2565SHAPE (shift_right_imm_narrowt_to_uint)
2566
2567/* void svfoo[_t0](<X>_t *, sv<t0>[xN]_t)
2568   void svfoo_vnum[_t0](<X>_t *, int64_t, sv<t0>[xN]_t)
2569
2570   where <X> might be tied to <t0> (for non-truncating stores) or might
2571   depend on the function base name (for truncating stores).  */
2572struct store_def : public overloaded_base<0>
2573{
2574  void
2575  build (function_builder &b, const function_group_info &group) const OVERRIDE
2576  {
2577    b.add_overloaded_functions (group, MODE_none);
2578    b.add_overloaded_functions (group, MODE_vnum);
2579    build_all (b, "_,as,t0", group, MODE_none);
2580    build_all (b, "_,as,ss64,t0", group, MODE_vnum);
2581  }
2582
2583  tree
2584  resolve (function_resolver &r) const OVERRIDE
2585  {
2586    bool vnum_p = r.mode_suffix_id == MODE_vnum;
2587    gcc_assert (r.mode_suffix_id == MODE_none || vnum_p);
2588
2589    unsigned int i, nargs;
2590    type_suffix_index type;
2591    if (!r.check_gp_argument (vnum_p ? 3 : 2, i, nargs)
2592	|| !r.require_pointer_type (i)
2593	|| (vnum_p && !r.require_scalar_type (i + 1, "int64_t"))
2594	|| ((type = r.infer_tuple_type (nargs - 1)) == NUM_TYPE_SUFFIXES))
2595      return error_mark_node;
2596
2597    return r.resolve_to (r.mode_suffix_id, type);
2598  }
2599};
2600SHAPE (store)
2601
2602/* void svfoo_[s32]index[_t0](<X>_t *, svint32_t, sv<t0>_t)
2603   void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2604   void svfoo_[u32]index[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2605   void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2606
2607   void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2608   void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2609
2610   where <X> might be tied to <t0> (for non-truncating stores) or might
2611   depend on the function base name (for truncating stores).  */
2612struct store_scatter_index_def : public store_scatter_base
2613{
2614  void
2615  build (function_builder &b, const function_group_info &group) const OVERRIDE
2616  {
2617    b.add_overloaded_functions (group, MODE_index);
2618    build_sv_index (b, "_,as,d,t0", group);
2619    build_vs_index (b, "_,b,ss64,t0", group);
2620  }
2621};
2622SHAPE (store_scatter_index)
2623
2624/* void svfoo_[s64]index[_t0](<X>_t *, svint64_t, sv<t0>_t)
2625   void svfoo_[u64]index[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2626
2627   void svfoo[_u32base]_index[_t0](svuint32_t, int64_t, sv<t0>_t)
2628   void svfoo[_u64base]_index[_t0](svuint64_t, int64_t, sv<t0>_t)
2629
2630   i.e. a version of store_scatter_index that doesn't support 32-bit
2631   vector indices.  */
2632struct store_scatter_index_restricted_def : public store_scatter_base
2633{
2634  void
2635  build (function_builder &b, const function_group_info &group) const OVERRIDE
2636  {
2637    b.add_overloaded_functions (group, MODE_index);
2638    build_sv_index64 (b, "_,as,d,t0", group);
2639    build_vs_index (b, "_,b,ss64,t0", group);
2640  }
2641};
2642SHAPE (store_scatter_index_restricted)
2643
2644/* void svfoo_[s32]offset[_t0](<X>_t *, svint32_t, sv<t0>_t)
2645   void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2646   void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2647   void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2648
2649   void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2650   void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2651
2652   void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2653   void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2654
2655   where <X> might be tied to <t0> (for non-truncating stores) or might
2656   depend on the function base name (for truncating stores).  */
2657struct store_scatter_offset_def : public store_scatter_base
2658{
2659  void
2660  build (function_builder &b, const function_group_info &group) const OVERRIDE
2661  {
2662    b.add_overloaded_functions (group, MODE_none);
2663    b.add_overloaded_functions (group, MODE_offset);
2664    build_sv_offset (b, "_,as,d,t0", group);
2665    build_v_base (b, "_,b,t0", group);
2666    build_vs_offset (b, "_,b,ss64,t0", group);
2667  }
2668};
2669SHAPE (store_scatter_offset)
2670
2671/* void svfoo_[s64]offset[_t0](<X>_t *, svint64_t, sv<t0>_t)
2672   void svfoo_[u32]offset[_t0](<X>_t *, svuint32_t, sv<t0>_t)
2673   void svfoo_[u64]offset[_t0](<X>_t *, svuint64_t, sv<t0>_t)
2674
2675   void svfoo[_u32base_t0](svuint32_t, sv<t0>_t)
2676   void svfoo[_u64base_t0](svuint64_t, sv<t0>_t)
2677
2678   void svfoo[_u32base]_offset[_t0](svuint32_t, int64_t, sv<t0>_t)
2679   void svfoo[_u64base]_offset[_t0](svuint64_t, int64_t, sv<t0>_t)
2680
2681   i.e. a version of store_scatter_offset that doesn't support svint32_t
2682   offsets.  */
2683struct store_scatter_offset_restricted_def : public store_scatter_base
2684{
2685  void
2686  build (function_builder &b, const function_group_info &group) const OVERRIDE
2687  {
2688    b.add_overloaded_functions (group, MODE_none);
2689    b.add_overloaded_functions (group, MODE_offset);
2690    build_sv_uint_offset (b, "_,as,d,t0", group);
2691    build_v_base (b, "_,b,t0", group);
2692    build_vs_offset (b, "_,b,ss64,t0", group);
2693  }
2694};
2695SHAPE (store_scatter_offset_restricted)
2696
2697/* sv<t0>_t svfoo[_t0](sv<t0>xN_t, sv<t0:uint>_t).  */
2698struct tbl_tuple_def : public overloaded_base<0>
2699{
2700  void
2701  build (function_builder &b, const function_group_info &group) const OVERRIDE
2702  {
2703    b.add_overloaded_functions (group, MODE_none);
2704    build_all (b, "v0,t0,vu0", group, MODE_none);
2705  }
2706
2707  tree
2708  resolve (function_resolver &r) const OVERRIDE
2709  {
2710    unsigned int i, nargs;
2711    type_suffix_index type;
2712    if (!r.check_gp_argument (2, i, nargs)
2713	|| (type = r.infer_tuple_type (i)) == NUM_TYPE_SUFFIXES
2714	|| !r.require_derived_vector_type (i + 1, i, type, TYPE_unsigned))
2715      return error_mark_node;
2716
2717    return r.resolve_to (r.mode_suffix_id, type);
2718  }
2719};
2720SHAPE (tbl_tuple)
2721
2722/* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloatt16_t, svbfloat16_t).  */
2723struct ternary_bfloat_def
2724  : public ternary_resize2_base<16, TYPE_bfloat, TYPE_bfloat>
2725{
2726  void
2727  build (function_builder &b, const function_group_info &group) const OVERRIDE
2728  {
2729    b.add_overloaded_functions (group, MODE_none);
2730    build_all (b, "v0,v0,vB,vB", group, MODE_none);
2731  }
2732};
2733SHAPE (ternary_bfloat)
2734
2735/* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloat16_t, svbfloat16_t, uint64_t)
2736
2737   where the final argument is an integer constant expression in the range
2738   [0, 7].  */
2739typedef ternary_bfloat_lane_base<1> ternary_bfloat_lane_def;
2740SHAPE (ternary_bfloat_lane)
2741
2742/* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloat16_t, svbfloat16_t, uint64_t)
2743
2744   where the final argument is an integer constant expression in the range
2745   [0, 3].  */
2746typedef ternary_bfloat_lane_base<2> ternary_bfloat_lanex2_def;
2747SHAPE (ternary_bfloat_lanex2)
2748
2749/* sv<t0>_t svfoo[_t0](sv<t0>_t, svbfloatt16_t, svbfloat16_t)
2750   sv<t0>_t svfoo[_n_t0](sv<t0>_t, svbfloat16_t, bfloat16_t).  */
2751struct ternary_bfloat_opt_n_def
2752  : public ternary_resize2_opt_n_base<16, TYPE_bfloat, TYPE_bfloat>
2753{
2754  void
2755  build (function_builder &b, const function_group_info &group) const OVERRIDE
2756  {
2757    b.add_overloaded_functions (group, MODE_none);
2758    build_all (b, "v0,v0,vB,vB", group, MODE_none);
2759    build_all (b, "v0,v0,vB,sB", group, MODE_n);
2760  }
2761};
2762SHAPE (ternary_bfloat_opt_n)
2763
2764/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int:quarter>_t, sv<t0:uint:quarter>_t,
2765		       uint64_t)
2766
2767   where the final argument is an integer constant expression in the range
2768   [0, 16 / sizeof (<t0>_t) - 1].  */
2769struct ternary_intq_uintq_lane_def
2770  : public ternary_qq_lane_base<TYPE_signed, TYPE_unsigned>
2771{
2772  void
2773  build (function_builder &b, const function_group_info &group) const OVERRIDE
2774  {
2775    b.add_overloaded_functions (group, MODE_none);
2776    build_all (b, "v0,v0,vqs0,vqu0,su64", group, MODE_none);
2777  }
2778};
2779SHAPE (ternary_intq_uintq_lane)
2780
2781/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:int:quarter>_t, sv<t0:uint:quarter>_t)
2782   sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:int:quarter>_t,
2783			 <t0:uint:quarter>_t).  */
2784struct ternary_intq_uintq_opt_n_def
2785  : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE,
2786				      TYPE_signed, TYPE_unsigned>
2787{
2788  void
2789  build (function_builder &b, const function_group_info &group) const OVERRIDE
2790  {
2791    b.add_overloaded_functions (group, MODE_none);
2792    build_all (b, "v0,v0,vqs0,vqu0", group, MODE_none);
2793    build_all (b, "v0,v0,vqs0,squ0", group, MODE_n);
2794  }
2795};
2796SHAPE (ternary_intq_uintq_opt_n)
2797
2798/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
2799
2800   where the final argument is an integer constant expression in the
2801   range [0, 16 / sizeof (<t0>_t) - 1].  */
2802struct ternary_lane_def : public overloaded_base<0>
2803{
2804  void
2805  build (function_builder &b, const function_group_info &group) const OVERRIDE
2806  {
2807    b.add_overloaded_functions (group, MODE_none);
2808    build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
2809  }
2810
2811  tree
2812  resolve (function_resolver &r) const OVERRIDE
2813  {
2814    return r.resolve_uniform (3, 1);
2815  }
2816
2817  bool
2818  check (function_checker &c) const OVERRIDE
2819  {
2820    return c.require_immediate_lane_index (3);
2821  }
2822};
2823SHAPE (ternary_lane)
2824
2825/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t, uint64_t)
2826
2827   where the penultimate argument is an integer constant expression in
2828   the range [0, 8 / sizeof (<t0>_t) - 1] and where the final argument
2829   is an integer constant expression in {0, 90, 180, 270}.  */
2830struct ternary_lane_rotate_def : public overloaded_base<0>
2831{
2832  void
2833  build (function_builder &b, const function_group_info &group) const OVERRIDE
2834  {
2835    b.add_overloaded_functions (group, MODE_none);
2836    build_all (b, "v0,v0,v0,v0,su64,su64", group, MODE_none);
2837  }
2838
2839  tree
2840  resolve (function_resolver &r) const OVERRIDE
2841  {
2842    return r.resolve_uniform (3, 2);
2843  }
2844
2845  bool
2846  check (function_checker &c) const OVERRIDE
2847  {
2848    return (c.require_immediate_lane_index (3, 2)
2849	    && c.require_immediate_one_of (4, 0, 90, 180, 270));
2850  }
2851};
2852SHAPE (ternary_lane_rotate)
2853
2854/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t, uint64_t)
2855
2856   where the final argument is an integer constant expression in the range
2857   [0, 32 / sizeof (<t0>_t) - 1].  */
2858struct ternary_long_lane_def
2859  : public ternary_resize2_lane_base<function_resolver::HALF_SIZE>
2860{
2861  void
2862  build (function_builder &b, const function_group_info &group) const OVERRIDE
2863  {
2864    b.add_overloaded_functions (group, MODE_none);
2865    build_all (b, "v0,v0,vh0,vh0,su64", group, MODE_none);
2866  }
2867
2868  bool
2869  check (function_checker &c) const OVERRIDE
2870  {
2871    return c.require_immediate_lane_index (3);
2872  }
2873};
2874SHAPE (ternary_long_lane)
2875
2876/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:half>_t, sv<t0:half>_t)
2877   sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:half>_t, <t0:half>_t)
2878
2879   i.e. a version of the standard ternary shape ternary_opt_n in which
2880   the element type of the last two arguments is the half-sized
2881   equivalent of <t0>.  */
2882struct ternary_long_opt_n_def
2883  : public ternary_resize2_opt_n_base<function_resolver::HALF_SIZE>
2884{
2885  void
2886  build (function_builder &b, const function_group_info &group) const OVERRIDE
2887  {
2888    b.add_overloaded_functions (group, MODE_none);
2889    build_all (b, "v0,v0,vh0,vh0", group, MODE_none);
2890    build_all (b, "v0,v0,vh0,sh0", group, MODE_n);
2891  }
2892};
2893SHAPE (ternary_long_opt_n)
2894
2895/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0>_t)
2896   sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0>_t, <t0>_t)
2897
2898   i.e. the standard shape for ternary operations that operate on
2899   uniform types.  */
2900struct ternary_opt_n_def : public overloaded_base<0>
2901{
2902  void
2903  build (function_builder &b, const function_group_info &group) const OVERRIDE
2904  {
2905    b.add_overloaded_functions (group, MODE_none);
2906    build_all (b, "v0,v0,v0,v0", group, MODE_none);
2907    build_all (b, "v0,v0,v0,s0", group, MODE_n);
2908  }
2909
2910  tree
2911  resolve (function_resolver &r) const OVERRIDE
2912  {
2913    return r.resolve_uniform_opt_n (3);
2914  }
2915};
2916SHAPE (ternary_opt_n)
2917
2918/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t, uint64_t)
2919
2920   where the final argument is an integer constant expression in the range
2921   [0, 16 / sizeof (<t0>_t) - 1].  */
2922struct ternary_qq_lane_def : public ternary_qq_lane_base<>
2923{
2924  void
2925  build (function_builder &b, const function_group_info &group) const OVERRIDE
2926  {
2927    b.add_overloaded_functions (group, MODE_none);
2928    build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
2929  }
2930};
2931SHAPE (ternary_qq_lane)
2932
2933/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2934			 uint64_t)
2935
2936   where the final argument is an integer constant expression in
2937   {0, 90, 180, 270}.  */
2938struct ternary_qq_lane_rotate_def : public overloaded_base<0>
2939{
2940  void
2941  build (function_builder &b, const function_group_info &group) const OVERRIDE
2942  {
2943    b.add_overloaded_functions (group, MODE_none);
2944    build_all (b, "v0,v0,vq0,vq0,su64,su64", group, MODE_none);
2945  }
2946
2947  tree
2948  resolve (function_resolver &r) const OVERRIDE
2949  {
2950    unsigned int i, nargs;
2951    type_suffix_index type;
2952    if (!r.check_gp_argument (5, i, nargs)
2953	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
2954	|| !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
2955					   r.QUARTER_SIZE)
2956	|| !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
2957					   r.QUARTER_SIZE)
2958	|| !r.require_integer_immediate (i + 3)
2959	|| !r.require_integer_immediate (i + 4))
2960      return error_mark_node;
2961
2962    return r.resolve_to (r.mode_suffix_id, type);
2963  }
2964
2965  bool
2966  check (function_checker &c) const OVERRIDE
2967  {
2968    return (c.require_immediate_lane_index (3, 4)
2969	    && c.require_immediate_one_of (4, 0, 90, 180, 270));
2970  }
2971};
2972SHAPE (ternary_qq_lane_rotate)
2973
2974/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t)
2975   sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:quarter>_t, <t0:quarter>_t)
2976
2977   i.e. a version of the standard ternary shape ternary_opt_n in which
2978   the element type of the last two arguments is the quarter-sized
2979   equivalent of <t0>.  */
2980struct ternary_qq_opt_n_def
2981  : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE>
2982{
2983  void
2984  build (function_builder &b, const function_group_info &group) const OVERRIDE
2985  {
2986    b.add_overloaded_functions (group, MODE_none);
2987    build_all (b, "v0,v0,vq0,vq0", group, MODE_none);
2988    build_all (b, "v0,v0,vq0,sq0", group, MODE_n);
2989  }
2990};
2991SHAPE (ternary_qq_opt_n)
2992
2993/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0:quarter>_t, sv<t0:quarter>_t,
2994			 uint64_t)
2995
2996   where the final argument is an integer constant expression in
2997   {0, 90, 180, 270}.  */
2998struct ternary_qq_rotate_def : public overloaded_base<0>
2999{
3000  void
3001  build (function_builder &b, const function_group_info &group) const OVERRIDE
3002  {
3003    b.add_overloaded_functions (group, MODE_none);
3004    build_all (b, "v0,v0,vq0,vq0,su64", group, MODE_none);
3005  }
3006
3007  tree
3008  resolve (function_resolver &r) const OVERRIDE
3009  {
3010    unsigned int i, nargs;
3011    type_suffix_index type;
3012    if (!r.check_gp_argument (4, i, nargs)
3013	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3014	|| !r.require_derived_vector_type (i + 1, i, type, r.SAME_TYPE_CLASS,
3015					   r.QUARTER_SIZE)
3016	|| !r.require_derived_vector_type (i + 2, i, type, r.SAME_TYPE_CLASS,
3017					   r.QUARTER_SIZE)
3018	|| !r.require_integer_immediate (i + 3))
3019      return error_mark_node;
3020
3021    return r.resolve_to (r.mode_suffix_id, type);
3022  }
3023
3024  bool
3025  check (function_checker &c) const OVERRIDE
3026  {
3027    return c.require_immediate_one_of (3, 0, 90, 180, 270);
3028  }
3029};
3030SHAPE (ternary_qq_rotate)
3031
3032/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, sv<t0>_t, uint64_t)
3033
3034   where the final argument is an integer constant expression in
3035   {0, 90, 180, 270}.  */
3036struct ternary_rotate_def : public overloaded_base<0>
3037{
3038  void
3039  build (function_builder &b, const function_group_info &group) const OVERRIDE
3040  {
3041    b.add_overloaded_functions (group, MODE_none);
3042    build_all (b, "v0,v0,v0,v0,su64", group, MODE_none);
3043  }
3044
3045  tree
3046  resolve (function_resolver &r) const OVERRIDE
3047  {
3048    return r.resolve_uniform (3, 1);
3049  }
3050
3051  bool
3052  check (function_checker &c) const OVERRIDE
3053  {
3054    return c.require_immediate_one_of (3, 0, 90, 180, 270);
3055  }
3056};
3057SHAPE (ternary_rotate)
3058
3059/* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
3060
3061   where the final argument must be an integer constant expression in the
3062   range [0, sizeof (<t0>_t) * 8 - 1].  */
3063struct ternary_shift_left_imm_def : public ternary_shift_imm_base
3064{
3065  bool
3066  check (function_checker &c) const OVERRIDE
3067  {
3068    unsigned int bits = c.type_suffix (0).element_bits;
3069    return c.require_immediate_range (2, 0, bits - 1);
3070  }
3071};
3072SHAPE (ternary_shift_left_imm)
3073
3074/* sv<t0>_t svfoo[_n_t0])(sv<t0>_t, sv<t0>_t, uint64_t)
3075
3076   where the final argument must be an integer constant expression in the
3077   range [1, sizeof (<t0>_t) * 8].  */
3078struct ternary_shift_right_imm_def : public ternary_shift_imm_base
3079{
3080  bool
3081  check (function_checker &c) const OVERRIDE
3082  {
3083    unsigned int bits = c.type_suffix (0).element_bits;
3084    return c.require_immediate_range (2, 1, bits);
3085  }
3086};
3087SHAPE (ternary_shift_right_imm)
3088
3089/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0>_t, sv<t0:uint>_t).  */
3090struct ternary_uint_def : public overloaded_base<0>
3091{
3092  void
3093  build (function_builder &b, const function_group_info &group) const OVERRIDE
3094  {
3095    b.add_overloaded_functions (group, MODE_none);
3096    build_all (b, "v0,v0,v0,vu0", group, MODE_none);
3097  }
3098
3099  tree
3100  resolve (function_resolver &r) const OVERRIDE
3101  {
3102    unsigned int i, nargs;
3103    type_suffix_index type;
3104    if (!r.check_gp_argument (3, i, nargs)
3105	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3106	|| !r.require_matching_vector_type (i + 1, type)
3107	|| !r.require_derived_vector_type (i + 2, i, type, TYPE_unsigned))
3108      return error_mark_node;
3109
3110    return r.resolve_to (r.mode_suffix_id, type);
3111  }
3112};
3113SHAPE (ternary_uint)
3114
3115/* sv<t0>_t svfoo[_t0](sv<t0>_t, svu<t0:uint:quarter>_t,
3116		       sv<t0:int:quarter>_t).  */
3117struct ternary_uintq_intq_def
3118  : public ternary_resize2_base<function_resolver::QUARTER_SIZE,
3119				TYPE_unsigned, TYPE_signed>
3120{
3121  void
3122  build (function_builder &b, const function_group_info &group) const OVERRIDE
3123  {
3124    b.add_overloaded_functions (group, MODE_none);
3125    build_all (b, "v0,v0,vqu0,vqs0", group, MODE_none);
3126  }
3127};
3128SHAPE (ternary_uintq_intq)
3129
3130/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint:quarter>_t, sv<t0:int:quarter>_t,
3131		       uint64_t)
3132
3133   where the final argument is an integer constant expression in the range
3134   [0, 16 / sizeof (<t0>_t) - 1].  */
3135struct ternary_uintq_intq_lane_def
3136  : public ternary_qq_lane_base<TYPE_unsigned, TYPE_signed>
3137{
3138  void
3139  build (function_builder &b, const function_group_info &group) const OVERRIDE
3140  {
3141    b.add_overloaded_functions (group, MODE_none);
3142    build_all (b, "v0,v0,vqu0,vqs0,su64", group, MODE_none);
3143  }
3144};
3145SHAPE (ternary_uintq_intq_lane)
3146
3147/* sv<t0>_t svfoo[_t0](sv<t0>_t, sv<t0:uint:quarter>_t, sv<t0:int:quarter>_t)
3148   sv<t0>_t svfoo[_n_t0](sv<t0>_t, sv<t0:uint:quarter>_t,
3149			 <t0:int:quarter>_t).  */
3150struct ternary_uintq_intq_opt_n_def
3151  : public ternary_resize2_opt_n_base<function_resolver::QUARTER_SIZE,
3152				      TYPE_unsigned, TYPE_signed>
3153{
3154  void
3155  build (function_builder &b, const function_group_info &group) const OVERRIDE
3156  {
3157    b.add_overloaded_functions (group, MODE_none);
3158    build_all (b, "v0,v0,vqu0,vqs0", group, MODE_none);
3159    build_all (b, "v0,v0,vqu0,sqs0", group, MODE_n);
3160  }
3161};
3162SHAPE (ternary_uintq_intq_opt_n)
3163
3164/* svbool_t svfoo[_<t0>](sv<t0>_t, sv<t0>_t, uint64_t)
3165
3166   where the final argument is an integer constant expression in the
3167   range [0, 7].  */
3168struct tmad_def : public overloaded_base<0>
3169{
3170  void
3171  build (function_builder &b, const function_group_info &group) const OVERRIDE
3172  {
3173    b.add_overloaded_functions (group, MODE_none);
3174    build_all (b, "v0,v0,v0,su64", group, MODE_none);
3175  }
3176
3177  tree
3178  resolve (function_resolver &r) const OVERRIDE
3179  {
3180    return r.resolve_uniform (2, 1);
3181  }
3182
3183  bool
3184  check (function_checker &c) const OVERRIDE
3185  {
3186    return c.require_immediate_range (2, 0, 7);
3187  }
3188};
3189SHAPE (tmad)
3190
3191/* sv<t0>_t svfoo[_t0](sv<t0>_t)
3192
3193   i.e. the standard shape for unary operations that operate on
3194   uniform types.  */
3195struct unary_def : public overloaded_base<0>
3196{
3197  void
3198  build (function_builder &b, const function_group_info &group) const OVERRIDE
3199  {
3200    b.add_overloaded_functions (group, MODE_none);
3201    build_all (b, "v0,v0", group, MODE_none);
3202  }
3203
3204  tree
3205  resolve (function_resolver &r) const OVERRIDE
3206  {
3207    return r.resolve_unary ();
3208  }
3209};
3210SHAPE (unary)
3211
3212/* sv<t0>_t svfoo_t0[_t1](sv<t1>_t)
3213
3214   where the target type <t0> must be specified explicitly but the source
3215   type <t1> can be inferred.  */
3216struct unary_convert_def : public overloaded_base<1>
3217{
3218  void
3219  build (function_builder &b, const function_group_info &group) const OVERRIDE
3220  {
3221    b.add_overloaded_functions (group, MODE_none);
3222    build_all (b, "v0,v1", group, MODE_none);
3223  }
3224
3225  tree
3226  resolve (function_resolver &r) const OVERRIDE
3227  {
3228    return r.resolve_unary (r.type_suffix (0).tclass,
3229			    r.type_suffix (0).element_bits);
3230  }
3231};
3232SHAPE (unary_convert)
3233
3234/* sv<t0>_t svfoo_t0[_t1](sv<t0>_t, sv<t1>_t)
3235
3236   This is a version of unary_convert in which the even-indexed
3237   elements are passed in as a first parameter, before any governing
3238   predicate.  */
3239struct unary_convert_narrowt_def : public overloaded_base<1>
3240{
3241  void
3242  build (function_builder &b, const function_group_info &group) const OVERRIDE
3243  {
3244    b.add_overloaded_functions (group, MODE_none);
3245    build_all (b, "v0,v1", group, MODE_none);
3246  }
3247
3248  tree
3249  resolve (function_resolver &r) const OVERRIDE
3250  {
3251    return r.resolve_unary (r.type_suffix (0).tclass,
3252			    r.type_suffix (0).element_bits, true);
3253  }
3254};
3255SHAPE (unary_convert_narrowt)
3256
3257/* sv<t0>_t svfoo[_t0](sv<t0:half>_t).  */
3258struct unary_long_def : public overloaded_base<0>
3259{
3260  void
3261  build (function_builder &b, const function_group_info &group) const OVERRIDE
3262  {
3263    b.add_overloaded_functions (group, MODE_none);
3264    build_all (b, "v0,vh0", group, MODE_none);
3265  }
3266
3267  tree
3268  resolve (function_resolver &r) const OVERRIDE
3269  {
3270    unsigned int i, nargs;
3271    type_suffix_index type, result_type;
3272    if (!r.check_gp_argument (1, i, nargs)
3273	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES
3274	|| (result_type = long_type_suffix (r, type)) == NUM_TYPE_SUFFIXES)
3275      return error_mark_node;
3276
3277    if (tree res = r.lookup_form (r.mode_suffix_id, result_type))
3278      return res;
3279
3280    return r.report_no_such_form (type);
3281  }
3282};
3283SHAPE (unary_long)
3284
3285/* sv<t0>_t svfoo[_n]_t0(<t0>_t).  */
3286struct unary_n_def : public overloaded_base<1>
3287{
3288  void
3289  build (function_builder &b, const function_group_info &group) const OVERRIDE
3290  {
3291    /* The "_n" suffix is optional; the full name has it, but the short
3292       name doesn't.  */
3293    build_all (b, "v0,s0", group, MODE_n, true);
3294  }
3295
3296  tree
3297  resolve (function_resolver &) const OVERRIDE
3298  {
3299    /* The short forms just make "_n" implicit, so no resolution is needed.  */
3300    gcc_unreachable ();
3301  }
3302};
3303SHAPE (unary_n)
3304
3305/* sv<t0:half>_t svfoo[_t0](sv<t0>_t).  */
3306typedef unary_narrowb_base<> unary_narrowb_def;
3307SHAPE (unary_narrowb)
3308
3309/* sv<t0:half>_t svfoo[_t0](sv<t0:half>_t, sv<t0>_t).  */
3310typedef unary_narrowt_base<> unary_narrowt_def;
3311SHAPE (unary_narrowt)
3312
3313/* sv<t0:uint:half>_t svfoo[_t0](sv<t0>_t).  */
3314typedef unary_narrowb_base<TYPE_unsigned> unary_narrowb_to_uint_def;
3315SHAPE (unary_narrowb_to_uint)
3316
3317/* sv<t0:uint:half>_t svfoo[_t0](sv<t0:uint:half>_t, sv<t0>_t).  */
3318typedef unary_narrowt_base<TYPE_unsigned> unary_narrowt_to_uint_def;
3319SHAPE (unary_narrowt_to_uint)
3320
3321/* svbool_t svfoo(svbool_t).  */
3322struct unary_pred_def : public nonoverloaded_base
3323{
3324  void
3325  build (function_builder &b, const function_group_info &group) const OVERRIDE
3326  {
3327    build_all (b, "v0,v0", group, MODE_none);
3328  }
3329};
3330SHAPE (unary_pred)
3331
3332/* sv<t0:int>_t svfoo[_t0](sv<t0>_t)
3333
3334   i.e. a version of "unary" in which the returned vector contains
3335   signed integers.  */
3336struct unary_to_int_def : public overloaded_base<0>
3337{
3338  void
3339  build (function_builder &b, const function_group_info &group) const OVERRIDE
3340  {
3341    b.add_overloaded_functions (group, MODE_none);
3342    build_all (b, "vs0,v0", group, MODE_none);
3343  }
3344
3345  tree
3346  resolve (function_resolver &r) const OVERRIDE
3347  {
3348    return r.resolve_unary (TYPE_signed);
3349  }
3350};
3351SHAPE (unary_to_int)
3352
3353/* sv<t0:uint>_t svfoo[_t0](sv<t0>_t)
3354
3355   i.e. a version of "unary" in which the returned vector contains
3356   unsigned integers.  */
3357struct unary_to_uint_def : public overloaded_base<0>
3358{
3359  void
3360  build (function_builder &b, const function_group_info &group) const OVERRIDE
3361  {
3362    b.add_overloaded_functions (group, MODE_none);
3363    build_all (b, "vu0,v0", group, MODE_none);
3364  }
3365
3366  tree
3367  resolve (function_resolver &r) const OVERRIDE
3368  {
3369    return r.resolve_unary (TYPE_unsigned);
3370  }
3371};
3372SHAPE (unary_to_uint)
3373
3374/* sv<t0>_t svfoo[_t0](sv<t0:uint>_t)
3375
3376   where <t0> always belongs a certain type class, and where <t0:uint>
3377   therefore uniquely determines <t0>.  */
3378struct unary_uint_def : public overloaded_base<0>
3379{
3380  void
3381  build (function_builder &b, const function_group_info &group) const OVERRIDE
3382  {
3383    b.add_overloaded_functions (group, MODE_none);
3384    build_all (b, "v0,vu0", group, MODE_none);
3385  }
3386
3387  tree
3388  resolve (function_resolver &r) const OVERRIDE
3389  {
3390    unsigned int i, nargs;
3391    type_suffix_index type;
3392    if (!r.check_gp_argument (1, i, nargs)
3393	|| (type = r.infer_unsigned_vector_type (i)) == NUM_TYPE_SUFFIXES)
3394      return error_mark_node;
3395
3396    /* Search for a valid suffix with the same number of bits as TYPE.  */
3397    unsigned int element_bits = type_suffixes[type].element_bits;
3398    if (type_suffixes[type].unsigned_p)
3399      for (unsigned int j = 0; j < NUM_TYPE_SUFFIXES; ++j)
3400	if (type_suffixes[j].element_bits == element_bits)
3401	  if (tree res = r.lookup_form (r.mode_suffix_id,
3402					type_suffix_index (j)))
3403	    return res;
3404
3405    return r.report_no_such_form (type);
3406  }
3407};
3408SHAPE (unary_uint)
3409
3410/* sv<t0>_t svfoo[_<t0>](sv<t0:half>_t)
3411
3412   i.e. a version of "unary" in which the source elements are half the
3413   size of the destination elements, but have the same type class.  */
3414struct unary_widen_def : public overloaded_base<0>
3415{
3416  void
3417  build (function_builder &b, const function_group_info &group) const OVERRIDE
3418  {
3419    b.add_overloaded_functions (group, MODE_none);
3420    build_all (b, "v0,vh0", group, MODE_none);
3421  }
3422
3423  tree
3424  resolve (function_resolver &r) const OVERRIDE
3425  {
3426    unsigned int i, nargs;
3427    type_suffix_index type;
3428    if (!r.check_gp_argument (1, i, nargs)
3429	|| (type = r.infer_vector_type (i)) == NUM_TYPE_SUFFIXES)
3430      return error_mark_node;
3431
3432    /* There is only a single form for predicates.  */
3433    if (type == TYPE_SUFFIX_b)
3434      return r.resolve_to (r.mode_suffix_id, type);
3435
3436    if (type_suffixes[type].integer_p
3437	&& type_suffixes[type].element_bits < 64)
3438      {
3439	type_suffix_index wide_suffix
3440	  = find_type_suffix (type_suffixes[type].tclass,
3441			      type_suffixes[type].element_bits * 2);
3442	if (tree res = r.lookup_form (r.mode_suffix_id, wide_suffix))
3443	  return res;
3444      }
3445
3446    return r.report_no_such_form (type);
3447  }
3448};
3449SHAPE (unary_widen)
3450
3451}
3452