1/* Subroutines for the C front end on the PowerPC architecture.
2   Copyright (C) 2002-2020 Free Software Foundation, Inc.
3
4   Contributed by Zack Weinberg <zack@codesourcery.com>
5   and Paolo Bonzini <bonzini@gnu.org>
6
7   This file is part of GCC.
8
9   GCC is free software; you can redistribute it and/or modify it
10   under the terms of the GNU General Public License as published
11   by the Free Software Foundation; either version 3, or (at your
12   option) any later version.
13
14   GCC is distributed in the hope that it will be useful, but WITHOUT
15   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
17   License for more details.
18
19   You should have received a copy of the GNU General Public License
20   along with GCC; see the file COPYING3.  If not see
21   <http://www.gnu.org/licenses/>.  */
22
23#define IN_TARGET_CODE 1
24
25#include "config.h"
26#include "system.h"
27#include "coretypes.h"
28#include "target.h"
29#include "c-family/c-common.h"
30#include "memmodel.h"
31#include "tm_p.h"
32#include "stringpool.h"
33#include "stor-layout.h"
34#include "c-family/c-pragma.h"
35#include "langhooks.h"
36#include "c/c-tree.h"
37
38
39
40/* Handle the machine specific pragma longcall.  Its syntax is
41
42   # pragma longcall ( TOGGLE )
43
44   where TOGGLE is either 0 or 1.
45
46   rs6000_default_long_calls is set to the value of TOGGLE, changing
47   whether or not new function declarations receive a longcall
48   attribute by default.  */
49
50void
51rs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
52{
53#define SYNTAX_ERROR(gmsgid) do {					\
54  warning (OPT_Wpragmas, gmsgid);					\
55  warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>");	\
56  return;								\
57} while (0)
58
59
60
61  tree x, n;
62
63  /* If we get here, generic code has already scanned the directive
64     leader and the word "longcall".  */
65
66  if (pragma_lex (&x) != CPP_OPEN_PAREN)
67    SYNTAX_ERROR ("missing open paren");
68  if (pragma_lex (&n) != CPP_NUMBER)
69    SYNTAX_ERROR ("missing number");
70  if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71    SYNTAX_ERROR ("missing close paren");
72
73  if (n != integer_zero_node && n != integer_one_node)
74    SYNTAX_ERROR ("number must be 0 or 1");
75
76  if (pragma_lex (&x) != CPP_EOF)
77    warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
78
79  rs6000_default_long_calls = (n == integer_one_node);
80}
81
82/* Handle defining many CPP flags based on TARGET_xxx.  As a general
83   policy, rather than trying to guess what flags a user might want a
84   #define for, it's better to define a flag for everything.  */
85
86#define builtin_define(TXT) cpp_define (pfile, TXT)
87#define builtin_assert(TXT) cpp_assert (pfile, TXT)
88
89/* Keep the AltiVec keywords handy for fast comparisons.  */
90static GTY(()) tree __vector_keyword;
91static GTY(()) tree vector_keyword;
92static GTY(()) tree __pixel_keyword;
93static GTY(()) tree pixel_keyword;
94static GTY(()) tree __bool_keyword;
95static GTY(()) tree bool_keyword;
96static GTY(()) tree _Bool_keyword;
97static GTY(()) tree __int128_type;
98static GTY(()) tree __uint128_type;
99
100/* Preserved across calls.  */
101static tree expand_bool_pixel;
102
103static cpp_hashnode *
104altivec_categorize_keyword (const cpp_token *tok)
105{
106  if (tok->type == CPP_NAME)
107    {
108      cpp_hashnode *ident = tok->val.node.node;
109
110      if (ident == C_CPP_HASHNODE (vector_keyword))
111	return C_CPP_HASHNODE (__vector_keyword);
112
113      if (ident == C_CPP_HASHNODE (pixel_keyword))
114	return C_CPP_HASHNODE (__pixel_keyword);
115
116      if (ident == C_CPP_HASHNODE (bool_keyword))
117	return C_CPP_HASHNODE (__bool_keyword);
118
119      if (ident == C_CPP_HASHNODE (_Bool_keyword))
120	return C_CPP_HASHNODE (__bool_keyword);
121
122      return ident;
123    }
124
125  return 0;
126}
127
128static void
129init_vector_keywords (void)
130{
131  /* Keywords without two leading underscores are context-sensitive, and hence
132     implemented as conditional macros, controlled by the
133     rs6000_macro_to_expand() function below.  If we have ISA 2.07 64-bit
134     support, record the __int128_t and __uint128_t types.  */
135
136  __vector_keyword = get_identifier ("__vector");
137  C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
138
139  __pixel_keyword = get_identifier ("__pixel");
140  C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
141
142  __bool_keyword = get_identifier ("__bool");
143  C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
144
145  vector_keyword = get_identifier ("vector");
146  C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
147
148  pixel_keyword = get_identifier ("pixel");
149  C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
150
151  bool_keyword = get_identifier ("bool");
152  C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
153
154  _Bool_keyword = get_identifier ("_Bool");
155  C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
156
157  if (TARGET_VADDUQM)
158    {
159      __int128_type = get_identifier ("__int128_t");
160      __uint128_type = get_identifier ("__uint128_t");
161    }
162}
163
164/* Helper function to find out which RID_INT_N_* code is the one for
165   __int128, if any.  Returns RID_MAX+1 if none apply, which is safe
166   (for our purposes, since we always expect to have __int128) to
167   compare against.  */
168static int
169rid_int128(void)
170{
171  int i;
172
173  for (i = 0; i < NUM_INT_N_ENTS; i ++)
174    if (int_n_enabled_p[i]
175	&& int_n_data[i].bitsize == 128)
176      return RID_INT_N_0 + i;
177
178  return RID_MAX + 1;
179}
180
181/* Called to decide whether a conditional macro should be expanded.
182   Since we have exactly one such macro (i.e, 'vector'), we do not
183   need to examine the 'tok' parameter.  */
184
185static cpp_hashnode *
186rs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
187{
188  cpp_hashnode *expand_this = tok->val.node.node;
189  cpp_hashnode *ident;
190
191  /* If the current machine does not have altivec, don't look for the
192     keywords.  */
193  if (!TARGET_ALTIVEC)
194    return NULL;
195
196  ident = altivec_categorize_keyword (tok);
197
198  if (ident != expand_this)
199    expand_this = NULL;
200
201  if (ident == C_CPP_HASHNODE (__vector_keyword))
202    {
203      int idx = 0;
204      do
205	tok = cpp_peek_token (pfile, idx++);
206      while (tok->type == CPP_PADDING);
207      ident = altivec_categorize_keyword (tok);
208
209      if (ident == C_CPP_HASHNODE (__pixel_keyword))
210	{
211	  expand_this = C_CPP_HASHNODE (__vector_keyword);
212	  expand_bool_pixel = __pixel_keyword;
213	}
214      else if (ident == C_CPP_HASHNODE (__bool_keyword))
215	{
216	  expand_this = C_CPP_HASHNODE (__vector_keyword);
217	  expand_bool_pixel = __bool_keyword;
218	}
219      /* The boost libraries have code with Iterator::vector vector in it.  If
220	 we allow the normal handling, this module will be called recursively,
221	 and the vector will be skipped.; */
222      else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
223	{
224	  enum rid rid_code = (enum rid)(ident->rid_code);
225	  bool is_macro = cpp_macro_p (ident);
226
227	  /* If there is a function-like macro, check if it is going to be
228	     invoked with or without arguments.  Without following ( treat
229	     it like non-macro, otherwise the following cpp_get_token eats
230	     what should be preserved.  */
231	  if (is_macro && cpp_fun_like_macro_p (ident))
232	    {
233	      int idx2 = idx;
234	      do
235		tok = cpp_peek_token (pfile, idx2++);
236	      while (tok->type == CPP_PADDING);
237	      if (tok->type != CPP_OPEN_PAREN)
238		is_macro = false;
239	    }
240
241	  if (is_macro)
242	    {
243	      do
244		(void) cpp_get_token (pfile);
245	      while (--idx > 0);
246	      do
247		tok = cpp_peek_token (pfile, idx++);
248	      while (tok->type == CPP_PADDING);
249	      ident = altivec_categorize_keyword (tok);
250	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
251		{
252		  expand_this = C_CPP_HASHNODE (__vector_keyword);
253		  expand_bool_pixel = __pixel_keyword;
254		  rid_code = RID_MAX;
255		}
256	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
257		{
258		  expand_this = C_CPP_HASHNODE (__vector_keyword);
259		  expand_bool_pixel = __bool_keyword;
260		  rid_code = RID_MAX;
261		}
262	      else if (ident)
263		rid_code = (enum rid)(ident->rid_code);
264	    }
265
266	  if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267	      || rid_code == RID_SHORT || rid_code == RID_SIGNED
268	      || rid_code == RID_INT || rid_code == RID_CHAR
269	      || rid_code == RID_FLOAT
270	      || (rid_code == RID_DOUBLE && TARGET_VSX)
271	      || (rid_code == rid_int128 () && TARGET_VADDUQM))
272	    {
273	      expand_this = C_CPP_HASHNODE (__vector_keyword);
274	      /* If the next keyword is bool or pixel, it
275		 will need to be expanded as well.  */
276	      do
277		tok = cpp_peek_token (pfile, idx++);
278	      while (tok->type == CPP_PADDING);
279	      ident = altivec_categorize_keyword (tok);
280
281	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
282		expand_bool_pixel = __pixel_keyword;
283	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
284		expand_bool_pixel = __bool_keyword;
285	      else
286		{
287		  /* Try two tokens down, too.  */
288		  do
289		    tok = cpp_peek_token (pfile, idx++);
290		  while (tok->type == CPP_PADDING);
291		  ident = altivec_categorize_keyword (tok);
292		  if (ident == C_CPP_HASHNODE (__pixel_keyword))
293		    expand_bool_pixel = __pixel_keyword;
294		  else if (ident == C_CPP_HASHNODE (__bool_keyword))
295		    expand_bool_pixel = __bool_keyword;
296		}
297	    }
298
299	  /* Support vector __int128_t, but we don't need to worry about bool
300	     or pixel on this type.  */
301	  else if (TARGET_VADDUQM
302		   && (ident == C_CPP_HASHNODE (__int128_type)
303		       || ident == C_CPP_HASHNODE (__uint128_type)))
304	    expand_this = C_CPP_HASHNODE (__vector_keyword);
305	}
306    }
307  else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
308    {
309      expand_this = C_CPP_HASHNODE (__pixel_keyword);
310      expand_bool_pixel = 0;
311    }
312  else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
313    {
314      expand_this = C_CPP_HASHNODE (__bool_keyword);
315      expand_bool_pixel = 0;
316    }
317
318  return expand_this;
319}
320
321
322/* Define or undefine a single macro.  */
323
324static void
325rs6000_define_or_undefine_macro (bool define_p, const char *name)
326{
327  if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328    fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
329
330  if (define_p)
331    cpp_define (parse_in, name);
332  else
333    cpp_undef (parse_in, name);
334}
335
336/* Define or undefine macros based on the current target.  If the user does
337   #pragma GCC target, we need to adjust the macros dynamically.  Note, some of
338   the options needed for builtins have been moved to separate variables, so
339   have both the target flags and the builtin flags as arguments.  */
340
341void
342rs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343			     HOST_WIDE_INT bu_mask)
344{
345  if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346    fprintf (stderr,
347	     "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348	     ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349	     (define_p) ? "define" : "undef",
350	     flags, bu_mask);
351
352  /* Each of the flags mentioned below controls whether certain
353     preprocessor macros will be automatically defined when
354     preprocessing source files for compilation by this compiler.
355     While most of these flags can be enabled or disabled
356     explicitly by specifying certain command-line options when
357     invoking the compiler, there are also many ways in which these
358     flags are enabled or disabled implicitly, based on compiler
359     defaults, configuration choices, and on the presence of certain
360     related command-line options.  Many, but not all, of these
361     implicit behaviors can be found in file "rs6000.c", the
362     rs6000_option_override_internal() function.
363
364     In general, each of the flags may be automatically enabled in
365     any of the following conditions:
366
367     1. If no -mcpu target is specified on the command line and no
368	--with-cpu target is specified to the configure command line
369	and the TARGET_DEFAULT macro for this default cpu host
370	includes the flag, and the flag has not been explicitly disabled
371	by command-line options.
372
373     2. If the target specified with -mcpu=target on the command line, or
374	in the absence of a -mcpu=target command-line option, if the
375	target specified using --with-cpu=target on the configure
376	command line, is disqualified because the associated binary
377	tools (e.g. the assembler) lack support for the requested cpu,
378	and the TARGET_DEFAULT macro for this default cpu host
379	includes the flag, and the flag has not been explicitly disabled
380	by command-line options.
381
382     3. If either of the above two conditions apply except that the
383	TARGET_DEFAULT macro is defined to equal zero, and
384	TARGET_POWERPC64 and
385	a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386	   MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387	   target), or
388	b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389	   MASK_POWERPC64 or it is one of the flags included in
390	   ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
391
392     4. If a cpu has been requested with a -mcpu=target command-line option
393	and this cpu has not been disqualified due to shortcomings of the
394	binary tools, and the set of flags associated with the requested cpu
395	include the flag to be enabled.  See rs6000-cpus.def for macro
396	definitions that represent various ABI standards
397	(e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398	the specific flags that are associated with each of the cpu
399	choices that can be specified as the target of a -mcpu=target
400	compile option, or as the target of a --with-cpu=target
401	configure option.  Target flags that are specified in either
402	of these two ways are considered "implicit" since the flags
403	are not mentioned specifically by name.
404
405	Additional documentation describing behavior specific to
406	particular flags is provided below, immediately preceding the
407	use of each relevant flag.
408
409     5. If there is no -mcpu=target command-line option, and the cpu
410	requested by a --with-cpu=target command-line option has not
411	been disqualified due to shortcomings of the binary tools, and
412	the set of flags associated with the specified target include
413	the flag to be enabled.  See the notes immediately above for a
414	summary of the flags associated with particular cpu
415	definitions.  */
416
417  /* rs6000_isa_flags based options.  */
418  rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419  if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421  if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423  if ((flags & OPTION_MASK_POWERPC64) != 0)
424    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425  if ((flags & OPTION_MASK_MFCRF) != 0)
426    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427  if ((flags & OPTION_MASK_POPCNTB) != 0)
428    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429  if ((flags & OPTION_MASK_FPRND) != 0)
430    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431  if ((flags & OPTION_MASK_CMPB) != 0)
432    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433  if ((flags & OPTION_MASK_POPCNTD) != 0)
434    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435  /* Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
436     turned on in the following condition:
437     1. TARGET_P8_VECTOR is enabled and OPTION_MASK_DIRECT_MOVE is not
438        explicitly disabled.
439        Hereafter, the OPTION_MASK_DIRECT_MOVE flag is considered to
440        have been turned on explicitly.
441     Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
442     turned off in any of the following conditions:
443     1. TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX is explicitly
444	disabled and OPTION_MASK_DIRECT_MOVE was not explicitly
445	enabled.
446     2. TARGET_VSX is off.  */
447  if ((flags & OPTION_MASK_DIRECT_MOVE) != 0)
448    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
449  if ((flags & OPTION_MASK_MODULO) != 0)
450    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
451  if ((flags & OPTION_MASK_POWER10) != 0)
452    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
453  if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
454    rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
455  if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
456    rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
457  /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
458     in any of the following conditions:
459     1. The operating system is Darwin and it is configured for 64
460	bit.  (See darwin_rs6000_override_options.)
461     2. The operating system is Darwin and the operating system
462	version is 10.5 or higher and the user has not explicitly
463	disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
464	the compiler is not producing code for integration within the
465	kernel.  (See darwin_rs6000_override_options.)
466     Note that the OPTION_MASK_ALTIVEC flag is automatically turned
467     off in any of the following conditions:
468     1. The operating system does not support saving of AltiVec
469	registers (OS_MISSING_ALTIVEC).
470     2. If an inner context (as introduced by
471	__attribute__((__target__())) or #pragma GCC target()
472	requests a target that normally enables the
473	OPTION_MASK_ALTIVEC flag but the outer-most "main target"
474	does not support the rs6000_altivec_abi, this flag is
475	turned off for the inner context unless OPTION_MASK_ALTIVEC
476	was explicitly enabled for the inner context.  */
477  if ((flags & OPTION_MASK_ALTIVEC) != 0)
478    {
479      const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
480      rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
481      rs6000_define_or_undefine_macro (define_p, vec_str);
482
483	  /* Define this when supporting context-sensitive keywords.  */
484      if (!flag_iso)
485	rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
486      if (rs6000_aix_extabi)
487	rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
488    }
489  /* Note that the OPTION_MASK_VSX flag is automatically turned on in
490     the following conditions:
491     1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
492        was not explicitly turned off.  Hereafter, the OPTION_MASK_VSX
493        flag is considered to have been explicitly turned on.
494     Note that the OPTION_MASK_VSX flag is automatically turned off in
495     the following conditions:
496     1. The operating system does not support saving of AltiVec
497	registers (OS_MISSING_ALTIVEC).
498     2. If the option TARGET_HARD_FLOAT is turned off.  Hereafter, the
499	OPTION_MASK_VSX flag is considered to have been turned off
500	explicitly.
501     3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
502	compilation context, or if it is turned on by any means in an
503	inner compilation context.  Hereafter, the OPTION_MASK_VSX
504	flag is considered to have been turned off explicitly.
505     4. If TARGET_ALTIVEC was explicitly disabled.  Hereafter, the
506	OPTION_MASK_VSX flag is considered to have been turned off
507	explicitly.
508     5. If an inner context (as introduced by
509	__attribute__((__target__())) or #pragma GCC target()
510	requests a target that normally enables the
511	OPTION_MASK_VSX flag but the outer-most "main target"
512	does not support the rs6000_altivec_abi, this flag is
513	turned off for the inner context unless OPTION_MASK_VSX
514	was explicitly enabled for the inner context.  */
515  if ((flags & OPTION_MASK_VSX) != 0)
516    rs6000_define_or_undefine_macro (define_p, "__VSX__");
517  if ((flags & OPTION_MASK_HTM) != 0)
518    {
519      rs6000_define_or_undefine_macro (define_p, "__HTM__");
520      /* Tell the user that our HTM insn patterns act as memory barriers.  */
521      rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
522    }
523  /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
524     on in the following conditions:
525     1. TARGET_P9_VECTOR is explicitly turned on and
526        OPTION_MASK_P8_VECTOR is not explicitly turned off.
527        Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
528        have been turned off explicitly.
529     Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
530     off in the following conditions:
531     1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
532	were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
533	not turned on explicitly.
534     2. If TARGET_ALTIVEC is turned off.  Hereafter, the
535	OPTION_MASK_P8_VECTOR flag is considered to have been turned off
536	explicitly.
537     3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
538        explicitly enabled.  If TARGET_VSX is explicitly enabled, the
539        OPTION_MASK_P8_VECTOR flag is hereafter also considered to
540	have been turned off explicitly.  */
541  if ((flags & OPTION_MASK_P8_VECTOR) != 0)
542    rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
543  /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
544     off in the following conditions:
545     1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
546        not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
547        was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
548        also considered to have been turned off explicitly.
549     Note that the OPTION_MASK_P9_VECTOR is automatically turned on
550     in the following conditions:
551     1. If TARGET_P9_MINMAX was turned on explicitly.
552        Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
553        have been turned on explicitly.  */
554  if ((flags & OPTION_MASK_P9_VECTOR) != 0)
555    rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
556  /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
557     turned off in the following conditions:
558     1. If TARGET_POWERPC64 is turned off.
559     2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
560	load/store are disabled on little endian).  */
561  if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
562    rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
563  /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
564     turned off in the following conditions:
565     1. If TARGET_POWERPC64 is turned off.
566     Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
567     automatically turned on in the following conditions:
568     1. If TARGET_QUAD_MEMORY and this flag was not explicitly
569	disabled.  */
570  if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
571    rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
572  /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
573     in the following conditions:
574     1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
575	are turned off explicitly and OPTION_MASK_CRYPTO is not turned
576	on explicitly.
577     2. If TARGET_ALTIVEC is turned off.  */
578  if ((flags & OPTION_MASK_CRYPTO) != 0)
579    rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
580  if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
581    {
582      rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
583      if (define_p)
584	rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
585      else
586	rs6000_define_or_undefine_macro (false, "__float128");
587    }
588  /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
589     via the target attribute/pragma.  */
590  if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
591    rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
592
593  /* options from the builtin masks.  */
594  /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
595     PROCESSOR_CELL) (e.g. -mcpu=cell).  */
596  if ((bu_mask & RS6000_BTM_CELL) != 0)
597    rs6000_define_or_undefine_macro (define_p, "__PPU__");
598
599  /* Tell the user if we support the MMA instructions.  */
600  if ((flags & OPTION_MASK_MMA) != 0)
601    rs6000_define_or_undefine_macro (define_p, "__MMA__");
602  /* Whether pc-relative code is being generated.  */
603  if ((flags & OPTION_MASK_PCREL) != 0)
604    rs6000_define_or_undefine_macro (define_p, "__PCREL__");
605}
606
607void
608rs6000_cpu_cpp_builtins (cpp_reader *pfile)
609{
610  /* Define all of the common macros.  */
611  rs6000_target_modify_macros (true, rs6000_isa_flags,
612			       rs6000_builtin_mask_calculate ());
613
614  if (TARGET_FRE)
615    builtin_define ("__RECIP__");
616  if (TARGET_FRES)
617    builtin_define ("__RECIPF__");
618  if (TARGET_FRSQRTE)
619    builtin_define ("__RSQRTE__");
620  if (TARGET_FRSQRTES)
621    builtin_define ("__RSQRTEF__");
622  if (TARGET_FLOAT128_TYPE)
623    builtin_define ("__FLOAT128_TYPE__");
624#ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
625  builtin_define ("__BUILTIN_CPU_SUPPORTS__");
626#endif
627
628  if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
629    {
630      /* Define the AltiVec syntactic elements.  */
631      builtin_define ("__vector=__attribute__((altivec(vector__)))");
632      builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
633      builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
634
635      if (!flag_iso)
636	{
637	  builtin_define ("vector=vector");
638	  builtin_define ("pixel=pixel");
639	  builtin_define ("bool=bool");
640	  builtin_define ("_Bool=_Bool");
641	  init_vector_keywords ();
642
643	  /* Enable context-sensitive macros.  */
644	  cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
645	}
646    }
647  if (!TARGET_HARD_FLOAT)
648    builtin_define ("_SOFT_DOUBLE");
649  /* Used by lwarx/stwcx. errata work-around.  */
650  if (rs6000_cpu == PROCESSOR_PPC405)
651    builtin_define ("__PPC405__");
652  /* Used by libstdc++.  */
653  if (TARGET_NO_LWSYNC)
654    builtin_define ("__NO_LWSYNC__");
655
656  if (TARGET_EXTRA_BUILTINS)
657    {
658      /* For the VSX builtin functions identical to Altivec functions, just map
659	 the altivec builtin into the vsx version (the altivec functions
660	 generate VSX code if -mvsx).  */
661      builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
662      builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
663      builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
664      builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
665      builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
666      builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
667      builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
668
669      /* Also map the a and m versions of the multiply/add instructions to the
670	 builtin for people blindly going off the instruction manual.  */
671      builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
672      builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
673      builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
674      builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
675      builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
676      builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
677      builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
678      builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
679      builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
680      builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
681      builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
682      builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
683      builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
684      builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
685      builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
686      builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
687    }
688
689  /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
690  if (TARGET_FLOAT128_TYPE)
691    {
692      builtin_define ("__builtin_fabsq=__builtin_fabsf128");
693      builtin_define ("__builtin_copysignq=__builtin_copysignf128");
694      builtin_define ("__builtin_nanq=__builtin_nanf128");
695      builtin_define ("__builtin_nansq=__builtin_nansf128");
696      builtin_define ("__builtin_infq=__builtin_inff128");
697      builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
698    }
699
700  /* Tell users they can use __builtin_bswap{16,64}.  */
701  builtin_define ("__HAVE_BSWAP__");
702
703  /* May be overridden by target configuration.  */
704  RS6000_CPU_CPP_ENDIAN_BUILTINS();
705
706  if (TARGET_LONG_DOUBLE_128)
707    {
708      builtin_define ("__LONG_DOUBLE_128__");
709      builtin_define ("__LONGDOUBLE128");
710
711      if (TARGET_IEEEQUAD)
712	{
713	  /* Older versions of GLIBC used __attribute__((__KC__)) to create the
714	     IEEE 128-bit floating point complex type for C++ (which does not
715	     support _Float128 _Complex).  If the default for long double is
716	     IEEE 128-bit mode, the library would need to use
717	     __attribute__((__TC__)) instead.  Defining __KF__ and __KC__
718	     is a stop-gap to build with the older libraries, until we
719	     get an updated library.  */
720	  builtin_define ("__LONG_DOUBLE_IEEE128__");
721	  builtin_define ("__KF__=__TF__");
722	  builtin_define ("__KC__=__TC__");
723	}
724      else
725	builtin_define ("__LONG_DOUBLE_IBM128__");
726    }
727
728  switch (TARGET_CMODEL)
729    {
730      /* Deliberately omit __CMODEL_SMALL__ since that was the default
731	 before --mcmodel support was added.  */
732    case CMODEL_MEDIUM:
733      builtin_define ("__CMODEL_MEDIUM__");
734      break;
735    case CMODEL_LARGE:
736      builtin_define ("__CMODEL_LARGE__");
737      break;
738    default:
739      break;
740    }
741
742  switch (rs6000_current_abi)
743    {
744    case ABI_V4:
745      builtin_define ("_CALL_SYSV");
746      break;
747    case ABI_AIX:
748      builtin_define ("_CALL_AIXDESC");
749      builtin_define ("_CALL_AIX");
750      builtin_define ("_CALL_ELF=1");
751      break;
752    case ABI_ELFv2:
753      builtin_define ("_CALL_ELF=2");
754      break;
755    case ABI_DARWIN:
756      builtin_define ("_CALL_DARWIN");
757      break;
758    default:
759      break;
760    }
761
762  /* Vector element order.  */
763  if (BYTES_BIG_ENDIAN)
764    builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
765  else
766    builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
767
768  /* Let the compiled code know if 'f' class registers will not be available.  */
769  if (TARGET_SOFT_FLOAT)
770    builtin_define ("__NO_FPRS__");
771
772  /* Whether aggregates passed by value are aligned to a 16 byte boundary
773     if their alignment is 16 bytes or larger.  */
774  if ((TARGET_MACHO && rs6000_darwin64_abi)
775      || DEFAULT_ABI == ABI_ELFv2
776      || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
777    builtin_define ("__STRUCT_PARM_ALIGN__=16");
778}
779
780
781
782/* Convert a type stored into a struct altivec_builtin_types as ID,
783   into a tree.  The types are in rs6000_builtin_types: negative values
784   create a pointer type for the type associated to ~ID.  Note it is
785   a logical NOT, rather than a negation, otherwise you cannot represent
786   a pointer type for ID 0.  */
787
788static inline tree
789rs6000_builtin_type (int id)
790{
791  tree t;
792  t = rs6000_builtin_types[id < 0 ? ~id : id];
793  return id < 0 ? build_pointer_type (t) : t;
794}
795
796/* Check whether the type of an argument, T, is compatible with a type ID
797   stored into a struct altivec_builtin_types.  Integer types are considered
798   compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
799   the decision.  Also allow long double and _Float128 to be compatible if
800   -mabi=ieeelongdouble.  */
801
802static inline bool
803is_float128_p (tree t)
804{
805  return (t == float128_type_node
806	  || (TARGET_IEEEQUAD
807	      && TARGET_LONG_DOUBLE_128
808	      && t == long_double_type_node));
809}
810
811static inline bool
812rs6000_builtin_type_compatible (tree t, int id)
813{
814  tree builtin_type;
815  builtin_type = rs6000_builtin_type (id);
816  if (t == error_mark_node)
817    return false;
818  if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
819    return true;
820  else if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
821	   && is_float128_p (t) && is_float128_p (builtin_type))
822    return true;
823  else
824    return lang_hooks.types_compatible_p (t, builtin_type);
825}
826
827
828/* In addition to calling fold_convert for EXPR of type TYPE, also
829   call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
830   hiding there (PR47197).  */
831
832static tree
833fully_fold_convert (tree type, tree expr)
834{
835  tree result = fold_convert (type, expr);
836  bool maybe_const = true;
837
838  if (!c_dialect_cxx ())
839    result = c_fully_fold (result, false, &maybe_const);
840
841  return result;
842}
843
844/* Build a tree for a function call to an Altivec non-overloaded builtin.
845   The overloaded builtin that matched the types and args is described
846   by DESC.  The N arguments are given in ARGS, respectively.
847
848   Actually the only thing it does is calling fold_convert on ARGS, with
849   a small exception for vec_{all,any}_{ge,le} predicates. */
850
851static tree
852altivec_build_resolved_builtin (tree *args, int n,
853				const struct altivec_builtin_types *desc)
854{
855  tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
856  tree ret_type = rs6000_builtin_type (desc->ret_type);
857  tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
858  tree arg_type[3];
859  tree call;
860
861  int i;
862  for (i = 0; i < n; i++)
863    arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
864
865  /* The AltiVec overloading implementation is overall gross, but this
866     is particularly disgusting.  The vec_{all,any}_{ge,le} builtins
867     are completely different for floating-point vs. integer vector
868     types, because the former has vcmpgefp, but the latter should use
869     vcmpgtXX.
870
871     In practice, the second and third arguments are swapped, and the
872     condition (LT vs. EQ, which is recognizable by bit 1 of the first
873     argument) is reversed.  Patch the arguments here before building
874     the resolved CALL_EXPR.  */
875  if (n == 3
876      && desc->code == ALTIVEC_BUILTIN_VEC_VCMPGE_P
877      && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P
878      && desc->overloaded_code != VSX_BUILTIN_XVCMPGEDP_P)
879    {
880      std::swap (args[1], args[2]);
881      std::swap (arg_type[1], arg_type[2]);
882
883      args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
884			     build_int_cst (NULL_TREE, 2));
885    }
886
887  switch (n)
888    {
889    case 0:
890      call = build_call_expr (impl_fndecl, 0);
891      break;
892    case 1:
893      call = build_call_expr (impl_fndecl, 1,
894			      fully_fold_convert (arg_type[0], args[0]));
895      break;
896    case 2:
897      call = build_call_expr (impl_fndecl, 2,
898			      fully_fold_convert (arg_type[0], args[0]),
899			      fully_fold_convert (arg_type[1], args[1]));
900      break;
901    case 3:
902      call = build_call_expr (impl_fndecl, 3,
903			      fully_fold_convert (arg_type[0], args[0]),
904			      fully_fold_convert (arg_type[1], args[1]),
905			      fully_fold_convert (arg_type[2], args[2]));
906      break;
907    default:
908      gcc_unreachable ();
909    }
910  return fold_convert (ret_type, call);
911}
912
913/* Implementation of the resolve_overloaded_builtin target hook, to
914   support Altivec's overloaded builtins.  */
915
916tree
917altivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
918				    void *passed_arglist)
919{
920  vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
921  unsigned int nargs = vec_safe_length (arglist);
922  enum rs6000_builtins fcode
923    = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl);
924  tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
925  tree types[3], args[3];
926  const struct altivec_builtin_types *desc;
927  unsigned int n;
928
929  if (!rs6000_overloaded_builtin_p (fcode))
930    return NULL_TREE;
931
932  if (TARGET_DEBUG_BUILTIN)
933    fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
934	     (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
935
936  /* vec_lvsl and vec_lvsr are deprecated for use with LE element order.  */
937  if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !BYTES_BIG_ENDIAN)
938    warning (OPT_Wdeprecated,
939	     "%<vec_lvsl%> is deprecated for little endian; use "
940	     "assignment for unaligned loads and stores");
941  else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !BYTES_BIG_ENDIAN)
942    warning (OPT_Wdeprecated,
943	     "%<vec_lvsr%> is deprecated for little endian; use "
944	     "assignment for unaligned loads and stores");
945
946  if (fcode == ALTIVEC_BUILTIN_VEC_MUL)
947    {
948      /* vec_mul needs to be special cased because there are no instructions
949	 for it for the {un}signed char, {un}signed short, and {un}signed int
950	 types.  */
951      if (nargs != 2)
952	{
953	  error ("builtin %qs only accepts 2 arguments", "vec_mul");
954	  return error_mark_node;
955	}
956
957      tree arg0 = (*arglist)[0];
958      tree arg0_type = TREE_TYPE (arg0);
959      tree arg1 = (*arglist)[1];
960      tree arg1_type = TREE_TYPE (arg1);
961
962      /* Both arguments must be vectors and the types must be compatible.  */
963      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
964	goto bad;
965      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
966	goto bad;
967
968      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
969	{
970	  case E_QImode:
971	  case E_HImode:
972	  case E_SImode:
973	  case E_DImode:
974	  case E_TImode:
975	    {
976	      /* For scalar types just use a multiply expression.  */
977	      return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
978				      fold_convert (TREE_TYPE (arg0), arg1));
979	    }
980	  case E_SFmode:
981	    {
982	      /* For floats use the xvmulsp instruction directly.  */
983	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULSP];
984	      return build_call_expr (call, 2, arg0, arg1);
985	    }
986	  case E_DFmode:
987	    {
988	      /* For doubles use the xvmuldp instruction directly.  */
989	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULDP];
990	      return build_call_expr (call, 2, arg0, arg1);
991	    }
992	  /* Other types are errors.  */
993	  default:
994	    goto bad;
995	}
996    }
997
998  if (fcode == ALTIVEC_BUILTIN_VEC_CMPNE)
999    {
1000      /* vec_cmpne needs to be special cased because there are no instructions
1001	 for it (prior to power 9).  */
1002      if (nargs != 2)
1003	{
1004	  error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
1005	  return error_mark_node;
1006	}
1007
1008      tree arg0 = (*arglist)[0];
1009      tree arg0_type = TREE_TYPE (arg0);
1010      tree arg1 = (*arglist)[1];
1011      tree arg1_type = TREE_TYPE (arg1);
1012
1013      /* Both arguments must be vectors and the types must be compatible.  */
1014      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1015	goto bad;
1016      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
1017	goto bad;
1018
1019      /* Power9 instructions provide the most efficient implementation of
1020	 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1021	 or SFmode or DFmode.  */
1022      if (!TARGET_P9_VECTOR
1023	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DImode)
1024	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == TImode)
1025	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == SFmode)
1026	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DFmode))
1027	{
1028	  switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1029	    {
1030	      /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1031		 vec_cmpeq (va, vb)).  */
1032	      /* Note:  vec_nand also works but opt changes vec_nand's
1033		 to vec_nor's anyway.  */
1034	    case E_QImode:
1035	    case E_HImode:
1036	    case E_SImode:
1037	    case E_DImode:
1038	    case E_TImode:
1039	    case E_SFmode:
1040	    case E_DFmode:
1041	      {
1042		/* call = vec_cmpeq (va, vb)
1043		   result = vec_nor (call, call).  */
1044		vec<tree, va_gc> *params = make_tree_vector ();
1045		vec_safe_push (params, arg0);
1046		vec_safe_push (params, arg1);
1047		tree call = altivec_resolve_overloaded_builtin
1048		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_CMPEQ],
1049		   params);
1050		/* Use save_expr to ensure that operands used more than once
1051		   that may have side effects (like calls) are only evaluated
1052		   once.  */
1053		call = save_expr (call);
1054		params = make_tree_vector ();
1055		vec_safe_push (params, call);
1056		vec_safe_push (params, call);
1057		return altivec_resolve_overloaded_builtin
1058		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_NOR], params);
1059	      }
1060	      /* Other types are errors.  */
1061	    default:
1062	      goto bad;
1063	    }
1064	}
1065      /* else, fall through and process the Power9 alternative below */
1066    }
1067
1068  if (fcode == ALTIVEC_BUILTIN_VEC_ADDE
1069      || fcode == ALTIVEC_BUILTIN_VEC_SUBE)
1070    {
1071      /* vec_adde needs to be special cased because there is no instruction
1072	  for the {un}signed int version.  */
1073      if (nargs != 3)
1074	{
1075	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDE ?
1076	    "vec_adde": "vec_sube";
1077	  error ("builtin %qs only accepts 3 arguments", name);
1078	  return error_mark_node;
1079	}
1080
1081      tree arg0 = (*arglist)[0];
1082      tree arg0_type = TREE_TYPE (arg0);
1083      tree arg1 = (*arglist)[1];
1084      tree arg1_type = TREE_TYPE (arg1);
1085      tree arg2 = (*arglist)[2];
1086      tree arg2_type = TREE_TYPE (arg2);
1087
1088      /* All 3 arguments must be vectors of (signed or unsigned) (int or
1089	 __int128) and the types must be compatible.  */
1090      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1091	goto bad;
1092      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1093	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1094	goto bad;
1095
1096      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1097	{
1098	  /* For {un}signed ints,
1099	     vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1100						   vec_and (carryv, 1)).
1101	     vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1102						   vec_and (carryv, 1)).  */
1103	  case E_SImode:
1104	    {
1105	      tree add_sub_builtin;
1106
1107	      vec<tree, va_gc> *params = make_tree_vector ();
1108	      vec_safe_push (params, arg0);
1109	      vec_safe_push (params, arg1);
1110
1111	      if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1112		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1113	      else
1114		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1115
1116	      tree call = altivec_resolve_overloaded_builtin (loc,
1117							      add_sub_builtin,
1118							      params);
1119	      tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1120	      tree ones_vector = build_vector_from_val (arg0_type, const1);
1121	      tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1122					       arg2, ones_vector);
1123	      params = make_tree_vector ();
1124	      vec_safe_push (params, call);
1125	      vec_safe_push (params, and_expr);
1126	      return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1127							 params);
1128	    }
1129	  /* For {un}signed __int128s use the vaddeuqm instruction
1130		directly.  */
1131	  case E_TImode:
1132	    {
1133	       tree bii;
1134
1135	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1136		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
1137
1138	       else
1139		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBEUQM];
1140
1141	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1142	    }
1143
1144	  /* Types other than {un}signed int and {un}signed __int128
1145		are errors.  */
1146	  default:
1147	    goto bad;
1148	}
1149    }
1150
1151  if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC
1152      || fcode == ALTIVEC_BUILTIN_VEC_SUBEC)
1153    {
1154      /* vec_addec and vec_subec needs to be special cased because there is
1155	 no instruction for the {un}signed int version.  */
1156      if (nargs != 3)
1157	{
1158	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDEC ?
1159	    "vec_addec": "vec_subec";
1160	  error ("builtin %qs only accepts 3 arguments", name);
1161	  return error_mark_node;
1162	}
1163
1164      tree arg0 = (*arglist)[0];
1165      tree arg0_type = TREE_TYPE (arg0);
1166      tree arg1 = (*arglist)[1];
1167      tree arg1_type = TREE_TYPE (arg1);
1168      tree arg2 = (*arglist)[2];
1169      tree arg2_type = TREE_TYPE (arg2);
1170
1171      /* All 3 arguments must be vectors of (signed or unsigned) (int or
1172	 __int128) and the types must be compatible.  */
1173      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1174	goto bad;
1175      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1176	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1177	goto bad;
1178
1179      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1180	{
1181	  /* For {un}signed ints,
1182	      vec_addec (va, vb, carryv) ==
1183				vec_or (vec_addc (va, vb),
1184					vec_addc (vec_add (va, vb),
1185						  vec_and (carryv, 0x1))).  */
1186	  case E_SImode:
1187	    {
1188	    /* Use save_expr to ensure that operands used more than once
1189		that may have side effects (like calls) are only evaluated
1190		once.  */
1191	    tree as_builtin;
1192	    tree as_c_builtin;
1193
1194	    arg0 = save_expr (arg0);
1195	    arg1 = save_expr (arg1);
1196	    vec<tree, va_gc> *params = make_tree_vector ();
1197	    vec_safe_push (params, arg0);
1198	    vec_safe_push (params, arg1);
1199
1200	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1201	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
1202	    else
1203	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUBC];
1204
1205	    tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1206							     params);
1207	    params = make_tree_vector ();
1208	    vec_safe_push (params, arg0);
1209	    vec_safe_push (params, arg1);
1210
1211
1212	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1213	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1214	    else
1215	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1216
1217	    tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1218							     params);
1219	    tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1220	    tree ones_vector = build_vector_from_val (arg0_type, const1);
1221	    tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1222					     arg2, ones_vector);
1223	    params = make_tree_vector ();
1224	    vec_safe_push (params, call2);
1225	    vec_safe_push (params, and_expr);
1226	    call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1227							params);
1228	    params = make_tree_vector ();
1229	    vec_safe_push (params, call1);
1230	    vec_safe_push (params, call2);
1231	    tree or_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_OR];
1232	    return altivec_resolve_overloaded_builtin (loc, or_builtin,
1233						       params);
1234	    }
1235	  /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1236	     instructions.  */
1237	  case E_TImode:
1238	    {
1239	       tree bii;
1240
1241	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1242		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
1243
1244	       else
1245		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBECUQ];
1246
1247	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1248	    }
1249	  /* Types other than {un}signed int and {un}signed __int128
1250		are errors.  */
1251	  default:
1252	    goto bad;
1253	}
1254    }
1255
1256  /* For now treat vec_splats and vec_promote as the same.  */
1257  if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
1258      || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
1259    {
1260      tree type, arg;
1261      int size;
1262      int i;
1263      bool unsigned_p;
1264      vec<constructor_elt, va_gc> *vec;
1265      const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
1266
1267      if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && nargs != 1)
1268	{
1269	  error ("builtin %qs only accepts 1 argument", name);
1270	  return error_mark_node;
1271	}
1272      if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && nargs != 2)
1273	{
1274	  error ("builtin %qs only accepts 2 arguments", name);
1275	  return error_mark_node;
1276	}
1277      /* Ignore promote's element argument.  */
1278      if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
1279	  && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1280	goto bad;
1281
1282      arg = (*arglist)[0];
1283      type = TREE_TYPE (arg);
1284      if (!SCALAR_FLOAT_TYPE_P (type)
1285	  && !INTEGRAL_TYPE_P (type))
1286	goto bad;
1287      unsigned_p = TYPE_UNSIGNED (type);
1288      switch (TYPE_MODE (type))
1289	{
1290	  case E_TImode:
1291	    type = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
1292	    size = 1;
1293	    break;
1294	  case E_DImode:
1295	    type = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
1296	    size = 2;
1297	    break;
1298	  case E_SImode:
1299	    type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
1300	    size = 4;
1301	    break;
1302	  case E_HImode:
1303	    type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
1304	    size = 8;
1305	    break;
1306	  case E_QImode:
1307	    type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
1308	    size = 16;
1309	    break;
1310	  case E_SFmode: type = V4SF_type_node; size = 4; break;
1311	  case E_DFmode: type = V2DF_type_node; size = 2; break;
1312	  default:
1313	    goto bad;
1314	}
1315      arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1316      vec_alloc (vec, size);
1317      for(i = 0; i < size; i++)
1318	{
1319	  constructor_elt elt = {NULL_TREE, arg};
1320	  vec->quick_push (elt);
1321	}
1322	return build_constructor (type, vec);
1323    }
1324
1325  /* For now use pointer tricks to do the extraction, unless we are on VSX
1326     extracting a double from a constant offset.  */
1327  if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT)
1328    {
1329      tree arg1;
1330      tree arg1_type;
1331      tree arg2;
1332      tree arg1_inner_type;
1333      tree decl, stmt;
1334      tree innerptrtype;
1335      machine_mode mode;
1336
1337      /* No second argument. */
1338      if (nargs != 2)
1339	{
1340	  error ("builtin %qs only accepts 2 arguments", "vec_extract");
1341	  return error_mark_node;
1342	}
1343
1344      arg2 = (*arglist)[1];
1345      arg1 = (*arglist)[0];
1346      arg1_type = TREE_TYPE (arg1);
1347
1348      if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1349	goto bad;
1350      if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1351	goto bad;
1352
1353      /* See if we can optimize vec_extracts with the current VSX instruction
1354	 set.  */
1355      mode = TYPE_MODE (arg1_type);
1356      if (VECTOR_MEM_VSX_P (mode))
1357
1358	{
1359	  tree call = NULL_TREE;
1360	  int nunits = GET_MODE_NUNITS (mode);
1361
1362	  arg2 = fold_for_warn (arg2);
1363
1364	  /* If the second argument is an integer constant, generate
1365	     the built-in code if we can.  We need 64-bit and direct
1366	     move to extract the small integer vectors.  */
1367	  if (TREE_CODE (arg2) == INTEGER_CST)
1368	    {
1369	      wide_int selector = wi::to_wide (arg2);
1370	      selector = wi::umod_trunc (selector, nunits);
1371	      arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1372	      switch (mode)
1373		{
1374		default:
1375		  break;
1376
1377		case E_V1TImode:
1378		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V1TI];
1379		  break;
1380
1381		case E_V2DFmode:
1382		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1383		  break;
1384
1385		case E_V2DImode:
1386		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1387		  break;
1388
1389		case E_V4SFmode:
1390		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1391		  break;
1392
1393		case E_V4SImode:
1394		  if (TARGET_DIRECT_MOVE_64BIT)
1395		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1396		  break;
1397
1398		case E_V8HImode:
1399		  if (TARGET_DIRECT_MOVE_64BIT)
1400		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1401		  break;
1402
1403		case E_V16QImode:
1404		  if (TARGET_DIRECT_MOVE_64BIT)
1405		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1406		  break;
1407		}
1408	    }
1409
1410	  /* If the second argument is variable, we can optimize it if we are
1411	     generating 64-bit code on a machine with direct move.  */
1412	  else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1413	    {
1414	      switch (mode)
1415		{
1416		default:
1417		  break;
1418
1419		case E_V2DFmode:
1420		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1421		  break;
1422
1423		case E_V2DImode:
1424		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1425		  break;
1426
1427		case E_V4SFmode:
1428		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1429		  break;
1430
1431		case E_V4SImode:
1432		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1433		  break;
1434
1435		case E_V8HImode:
1436		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1437		  break;
1438
1439		case E_V16QImode:
1440		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1441		  break;
1442		}
1443	    }
1444
1445	  if (call)
1446	    {
1447	      tree result = build_call_expr (call, 2, arg1, arg2);
1448	      /* Coerce the result to vector element type.  May be no-op.  */
1449	      arg1_inner_type = TREE_TYPE (arg1_type);
1450	      result = fold_convert (arg1_inner_type, result);
1451	      return result;
1452	    }
1453	}
1454
1455      /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
1456      arg1_inner_type = TREE_TYPE (arg1_type);
1457      arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1458			      build_int_cst (TREE_TYPE (arg2),
1459					     TYPE_VECTOR_SUBPARTS (arg1_type)
1460					     - 1), 0);
1461      decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1462      DECL_EXTERNAL (decl) = 0;
1463      TREE_PUBLIC (decl) = 0;
1464      DECL_CONTEXT (decl) = current_function_decl;
1465      TREE_USED (decl) = 1;
1466      TREE_TYPE (decl) = arg1_type;
1467      TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1468      if (c_dialect_cxx ())
1469	{
1470	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1471			 NULL_TREE, NULL_TREE);
1472	  SET_EXPR_LOCATION (stmt, loc);
1473	}
1474      else
1475	{
1476	  DECL_INITIAL (decl) = arg1;
1477	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1478	  TREE_ADDRESSABLE (decl) = 1;
1479	  SET_EXPR_LOCATION (stmt, loc);
1480	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1481	}
1482
1483      innerptrtype = build_pointer_type (arg1_inner_type);
1484
1485      stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1486      stmt = convert (innerptrtype, stmt);
1487      stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1488      stmt = build_indirect_ref (loc, stmt, RO_NULL);
1489
1490      /* PR83660: We mark this as having side effects so that
1491	 downstream in fold_build_cleanup_point_expr () it will get a
1492	 CLEANUP_POINT_EXPR.  If it does not we can run into an ICE
1493	 later in gimplify_cleanup_point_expr ().  Potentially this
1494	 causes missed optimization because the actually is no side
1495	 effect.  */
1496      if (c_dialect_cxx ())
1497	TREE_SIDE_EFFECTS (stmt) = 1;
1498
1499      return stmt;
1500    }
1501
1502  /* For now use pointer tricks to do the insertion, unless we are on VSX
1503     inserting a double to a constant offset..  */
1504  if (fcode == ALTIVEC_BUILTIN_VEC_INSERT)
1505    {
1506      tree arg0;
1507      tree arg1;
1508      tree arg2;
1509      tree arg1_type;
1510      tree arg1_inner_type;
1511      tree decl, stmt;
1512      tree innerptrtype;
1513      machine_mode mode;
1514
1515      /* No second or third arguments. */
1516      if (nargs != 3)
1517	{
1518	  error ("builtin %qs only accepts 3 arguments", "vec_insert");
1519	  return error_mark_node;
1520	}
1521
1522      arg0 = (*arglist)[0];
1523      arg1 = (*arglist)[1];
1524      arg1_type = TREE_TYPE (arg1);
1525      arg2 = fold_for_warn ((*arglist)[2]);
1526
1527      if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1528	goto bad;
1529      if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1530	goto bad;
1531
1532      /* If we can use the VSX xxpermdi instruction, use that for insert.  */
1533      mode = TYPE_MODE (arg1_type);
1534      if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
1535	  && TREE_CODE (arg2) == INTEGER_CST)
1536	{
1537	  wide_int selector = wi::to_wide (arg2);
1538	  selector = wi::umod_trunc (selector, 2);
1539	  tree call = NULL_TREE;
1540
1541	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1542	  if (mode == V2DFmode)
1543	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DF];
1544	  else if (mode == V2DImode)
1545	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DI];
1546
1547	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1548	     reversed.  */
1549	  if (call)
1550	    return build_call_expr (call, 3, arg1, arg0, arg2);
1551	}
1552      else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
1553	       && TREE_CODE (arg2) == INTEGER_CST)
1554	{
1555	  tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
1556	  wide_int selector = wi::zero(32);
1557
1558	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1559	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1560	     reversed.  */
1561	  return build_call_expr (call, 3, arg1, arg0, arg2);
1562	}
1563
1564      /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0. */
1565      arg1_inner_type = TREE_TYPE (arg1_type);
1566      if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1567	arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1568      else
1569	arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1570				build_int_cst (TREE_TYPE (arg2),
1571					       TYPE_VECTOR_SUBPARTS (arg1_type)
1572					       - 1), 0);
1573      decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1574      DECL_EXTERNAL (decl) = 0;
1575      TREE_PUBLIC (decl) = 0;
1576      DECL_CONTEXT (decl) = current_function_decl;
1577      TREE_USED (decl) = 1;
1578      TREE_TYPE (decl) = arg1_type;
1579      TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1580      if (c_dialect_cxx ())
1581	{
1582	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1583			 NULL_TREE, NULL_TREE);
1584	  SET_EXPR_LOCATION (stmt, loc);
1585	}
1586      else
1587	{
1588	  DECL_INITIAL (decl) = arg1;
1589	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1590	  TREE_ADDRESSABLE (decl) = 1;
1591	  SET_EXPR_LOCATION (stmt, loc);
1592	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1593	}
1594
1595      innerptrtype = build_pointer_type (arg1_inner_type);
1596
1597      stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1598      stmt = convert (innerptrtype, stmt);
1599      stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1600      stmt = build_indirect_ref (loc, stmt, RO_NULL);
1601      stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1602		     convert (TREE_TYPE (stmt), arg0));
1603      stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1604      return stmt;
1605    }
1606
1607  for (n = 0;
1608       !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1609       fnargs = TREE_CHAIN (fnargs), n++)
1610    {
1611      tree decl_type = TREE_VALUE (fnargs);
1612      tree arg = (*arglist)[n];
1613      tree type;
1614
1615      if (arg == error_mark_node)
1616	return error_mark_node;
1617
1618      if (n >= 3)
1619        abort ();
1620
1621      arg = default_conversion (arg);
1622
1623      /* The C++ front-end converts float * to const void * using
1624	 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)).  */
1625      type = TREE_TYPE (arg);
1626      if (POINTER_TYPE_P (type)
1627	  && TREE_CODE (arg) == NOP_EXPR
1628	  && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1629					    const_ptr_type_node)
1630	  && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1631					    ptr_type_node))
1632	{
1633	  arg = TREE_OPERAND (arg, 0);
1634          type = TREE_TYPE (arg);
1635	}
1636
1637      /* Remove the const from the pointers to simplify the overload
1638	 matching further down.  */
1639      if (POINTER_TYPE_P (decl_type)
1640	  && POINTER_TYPE_P (type)
1641	  && TYPE_QUALS (TREE_TYPE (type)) != 0)
1642	{
1643          if (TYPE_READONLY (TREE_TYPE (type))
1644	      && !TYPE_READONLY (TREE_TYPE (decl_type)))
1645	    warning (0, "passing argument %d of %qE discards qualifiers from "
1646		        "pointer target type", n + 1, fndecl);
1647	  type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
1648							   0));
1649	  arg = fold_convert (type, arg);
1650	}
1651
1652      /* For P9V_BUILTIN_VEC_LXVL, convert any const * to its non constant
1653	 equivalent to simplify the overload matching below.  */
1654      if (fcode == P9V_BUILTIN_VEC_LXVL)
1655	{
1656	  if (POINTER_TYPE_P (type)
1657	      && TYPE_READONLY (TREE_TYPE (type)))
1658	    {
1659	      type = build_pointer_type (build_qualified_type (
1660						TREE_TYPE (type),0));
1661	      arg = fold_convert (type, arg);
1662	    }
1663	}
1664
1665      args[n] = arg;
1666      types[n] = type;
1667    }
1668
1669  /* If the number of arguments did not match the prototype, return NULL
1670     and the generic code will issue the appropriate error message.  */
1671  if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
1672    return NULL;
1673
1674  if (n == 0)
1675    abort ();
1676
1677  if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
1678    {
1679      if (TREE_CODE (types[0]) != VECTOR_TYPE)
1680	goto bad;
1681
1682      return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
1683    }
1684
1685  {
1686    bool unsupported_builtin = false;
1687    enum rs6000_builtins overloaded_code;
1688    tree result = NULL;
1689    for (desc = altivec_overloaded_builtins;
1690	 desc->code && desc->code != fcode; desc++)
1691      continue;
1692
1693    /* Need to special case __builtin_cmp because the overloaded forms
1694       of this function take (unsigned int, unsigned int) or (unsigned
1695       long long int, unsigned long long int).  Since C conventions
1696       allow the respective argument types to be implicitly coerced into
1697       each other, the default handling does not provide adequate
1698       discrimination between the desired forms of the function.  */
1699    if (fcode == P6_OV_BUILTIN_CMPB)
1700      {
1701	machine_mode arg1_mode = TYPE_MODE (types[0]);
1702	machine_mode arg2_mode = TYPE_MODE (types[1]);
1703
1704	if (nargs != 2)
1705	  {
1706	    error ("builtin %qs only accepts 2 arguments", "__builtin_cmpb");
1707	    return error_mark_node;
1708	  }
1709
1710	/* If any supplied arguments are wider than 32 bits, resolve to
1711	   64-bit variant of built-in function.  */
1712	if ((GET_MODE_PRECISION (arg1_mode) > 32)
1713	    || (GET_MODE_PRECISION (arg2_mode) > 32))
1714	  {
1715	    /* Assure all argument and result types are compatible with
1716	       the built-in function represented by P6_BUILTIN_CMPB.  */
1717	    overloaded_code = P6_BUILTIN_CMPB;
1718	  }
1719	else
1720	  {
1721	    /* Assure all argument and result types are compatible with
1722	       the built-in function represented by P6_BUILTIN_CMPB_32.  */
1723	    overloaded_code = P6_BUILTIN_CMPB_32;
1724	  }
1725
1726	while (desc->code && desc->code == fcode
1727	       && desc->overloaded_code != overloaded_code)
1728	  desc++;
1729
1730	if (desc->code && (desc->code == fcode)
1731	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1732	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1733	  {
1734	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1735	      {
1736		result = altivec_build_resolved_builtin (args, n, desc);
1737		/* overloaded_code is set above */
1738		if (!rs6000_builtin_is_supported_p (overloaded_code))
1739		  unsupported_builtin = true;
1740		else
1741		  return result;
1742	      }
1743	    else
1744	      unsupported_builtin = true;
1745	  }
1746      }
1747    else if (fcode == P9V_BUILTIN_VEC_VSIEDP)
1748      {
1749	machine_mode arg1_mode = TYPE_MODE (types[0]);
1750
1751	if (nargs != 2)
1752	  {
1753	    error ("builtin %qs only accepts 2 arguments",
1754		   "scalar_insert_exp");
1755	    return error_mark_node;
1756	  }
1757
1758	/* If supplied first argument is wider than 64 bits, resolve to
1759	   128-bit variant of built-in function.  */
1760	if (GET_MODE_PRECISION (arg1_mode) > 64)
1761	  {
1762	    /* If first argument is of float variety, choose variant
1763	       that expects __ieee128 argument.  Otherwise, expect
1764	       __int128 argument.  */
1765	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1766	      overloaded_code = P9V_BUILTIN_VSIEQPF;
1767	    else
1768	      overloaded_code = P9V_BUILTIN_VSIEQP;
1769	  }
1770	else
1771	  {
1772	    /* If first argument is of float variety, choose variant
1773	       that expects double argument.  Otherwise, expect
1774	       long long int argument.  */
1775	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1776	      overloaded_code = P9V_BUILTIN_VSIEDPF;
1777	    else
1778	      overloaded_code = P9V_BUILTIN_VSIEDP;
1779	  }
1780	while (desc->code && desc->code == fcode
1781	       && desc->overloaded_code != overloaded_code)
1782	  desc++;
1783
1784	if (desc->code && (desc->code == fcode)
1785	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1786	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1787	  {
1788	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1789	      {
1790		result = altivec_build_resolved_builtin (args, n, desc);
1791		/* overloaded_code is set above.  */
1792		if (!rs6000_builtin_is_supported_p (overloaded_code))
1793		  unsupported_builtin = true;
1794		else
1795		  return result;
1796	      }
1797	    else
1798	      unsupported_builtin = true;
1799	  }
1800      }
1801    else
1802      {
1803	/* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
1804	   the opX fields.  */
1805	for (; desc->code == fcode; desc++)
1806	  {
1807	    if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
1808		 || rs6000_builtin_type_compatible (types[0], desc->op1))
1809		&& (desc->op2 == RS6000_BTI_NOT_OPAQUE
1810		    || rs6000_builtin_type_compatible (types[1], desc->op2))
1811		&& (desc->op3 == RS6000_BTI_NOT_OPAQUE
1812		    || rs6000_builtin_type_compatible (types[2], desc->op3)))
1813	      {
1814		if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1815		  {
1816		    result = altivec_build_resolved_builtin (args, n, desc);
1817		    if (!rs6000_builtin_is_supported_p (desc->overloaded_code))
1818		      {
1819			/* Allow loop to continue in case a different
1820			   definition is supported.  */
1821			overloaded_code = desc->overloaded_code;
1822			unsupported_builtin = true;
1823		      }
1824		    else
1825		      return result;
1826		  }
1827		else
1828		  unsupported_builtin = true;
1829	      }
1830	  }
1831      }
1832
1833    if (unsupported_builtin)
1834      {
1835	const char *name = rs6000_overloaded_builtin_name (fcode);
1836	if (result != NULL)
1837	  {
1838	    const char *internal_name
1839	      = rs6000_overloaded_builtin_name (overloaded_code);
1840	    /* An error message making reference to the name of the
1841	       non-overloaded function has already been issued.  Add
1842	       clarification of the previous message.  */
1843	    rich_location richloc (line_table, input_location);
1844	    inform (&richloc, "builtin %qs requires builtin %qs",
1845		    name, internal_name);
1846	  }
1847	else
1848	  error ("%qs is not supported in this compiler configuration", name);
1849	/* If an error-representing  result tree was returned from
1850	   altivec_build_resolved_builtin above, use it.  */
1851	return (result != NULL) ? result : error_mark_node;
1852      }
1853  }
1854 bad:
1855  {
1856    const char *name = rs6000_overloaded_builtin_name (fcode);
1857    error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1858    return error_mark_node;
1859  }
1860}
1861