1227569Sphilip/* Subroutines for the C front end on the PowerPC architecture.
2227569Sphilip   Copyright (C) 2002-2020 Free Software Foundation, Inc.
3227569Sphilip
4227569Sphilip   Contributed by Zack Weinberg <zack@codesourcery.com>
5227569Sphilip   and Paolo Bonzini <bonzini@gnu.org>
6227569Sphilip
7227569Sphilip   This file is part of GCC.
8227569Sphilip
9227569Sphilip   GCC is free software; you can redistribute it and/or modify it
10227569Sphilip   under the terms of the GNU General Public License as published
11227569Sphilip   by the Free Software Foundation; either version 3, or (at your
12227569Sphilip   option) any later version.
13227569Sphilip
14227569Sphilip   GCC is distributed in the hope that it will be useful, but WITHOUT
15227569Sphilip   ANY WARRANTY; without even the implied warranty of MERCHANTABILITY
16227569Sphilip   or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public
17227569Sphilip   License for more details.
18227569Sphilip
19227569Sphilip   You should have received a copy of the GNU General Public License
20227569Sphilip   along with GCC; see the file COPYING3.  If not see
21227569Sphilip   <http://www.gnu.org/licenses/>.  */
22227569Sphilip
23227569Sphilip#define IN_TARGET_CODE 1
24227569Sphilip
25227569Sphilip#include "config.h"
26228078Sphilip#include "system.h"
27228078Sphilip#include "coretypes.h"
28228078Sphilip#include "target.h"
29227569Sphilip#include "c-family/c-common.h"
30227569Sphilip#include "memmodel.h"
31227569Sphilip#include "tm_p.h"
32227569Sphilip#include "stringpool.h"
33227569Sphilip#include "stor-layout.h"
34227569Sphilip#include "c-family/c-pragma.h"
35227569Sphilip#include "langhooks.h"
36227569Sphilip#include "c/c-tree.h"
37227569Sphilip
38227569Sphilip
39227569Sphilip
40227569Sphilip/* Handle the machine specific pragma longcall.  Its syntax is
41227569Sphilip
42227569Sphilip   # pragma longcall ( TOGGLE )
43227569Sphilip
44227569Sphilip   where TOGGLE is either 0 or 1.
45227569Sphilip
46227569Sphilip   rs6000_default_long_calls is set to the value of TOGGLE, changing
47227569Sphilip   whether or not new function declarations receive a longcall
48227569Sphilip   attribute by default.  */
49227569Sphilip
50227569Sphilipvoid
51227569Sphiliprs6000_pragma_longcall (cpp_reader *pfile ATTRIBUTE_UNUSED)
52227569Sphilip{
53227569Sphilip#define SYNTAX_ERROR(gmsgid) do {					\
54227569Sphilip  warning (OPT_Wpragmas, gmsgid);					\
55227569Sphilip  warning (OPT_Wpragmas, "ignoring malformed %<#pragma longcall%>");	\
56227569Sphilip  return;								\
57227569Sphilip} while (0)
58227569Sphilip
59227569Sphilip
60227569Sphilip
61227569Sphilip  tree x, n;
62227569Sphilip
63227569Sphilip  /* If we get here, generic code has already scanned the directive
64227569Sphilip     leader and the word "longcall".  */
65227569Sphilip
66227569Sphilip  if (pragma_lex (&x) != CPP_OPEN_PAREN)
67227569Sphilip    SYNTAX_ERROR ("missing open paren");
68227569Sphilip  if (pragma_lex (&n) != CPP_NUMBER)
69227569Sphilip    SYNTAX_ERROR ("missing number");
70227569Sphilip  if (pragma_lex (&x) != CPP_CLOSE_PAREN)
71227569Sphilip    SYNTAX_ERROR ("missing close paren");
72227569Sphilip
73227569Sphilip  if (n != integer_zero_node && n != integer_one_node)
74227569Sphilip    SYNTAX_ERROR ("number must be 0 or 1");
75227569Sphilip
76227569Sphilip  if (pragma_lex (&x) != CPP_EOF)
77227569Sphilip    warning (OPT_Wpragmas, "junk at end of %<#pragma longcall%>");
78227569Sphilip
79227569Sphilip  rs6000_default_long_calls = (n == integer_one_node);
80227569Sphilip}
81227569Sphilip
82227569Sphilip/* Handle defining many CPP flags based on TARGET_xxx.  As a general
83227569Sphilip   policy, rather than trying to guess what flags a user might want a
84227569Sphilip   #define for, it's better to define a flag for everything.  */
85227569Sphilip
86227569Sphilip#define builtin_define(TXT) cpp_define (pfile, TXT)
87227569Sphilip#define builtin_assert(TXT) cpp_assert (pfile, TXT)
88227569Sphilip
89227569Sphilip/* Keep the AltiVec keywords handy for fast comparisons.  */
90227569Sphilipstatic GTY(()) tree __vector_keyword;
91227569Sphilipstatic GTY(()) tree vector_keyword;
92227569Sphilipstatic GTY(()) tree __pixel_keyword;
93227569Sphilipstatic GTY(()) tree pixel_keyword;
94227569Sphilipstatic GTY(()) tree __bool_keyword;
95227569Sphilipstatic GTY(()) tree bool_keyword;
96227569Sphilipstatic GTY(()) tree _Bool_keyword;
97227569Sphilipstatic GTY(()) tree __int128_type;
98227569Sphilipstatic GTY(()) tree __uint128_type;
99227569Sphilip
100227569Sphilip/* Preserved across calls.  */
101227569Sphilipstatic tree expand_bool_pixel;
102227569Sphilip
103227569Sphilipstatic cpp_hashnode *
104227569Sphilipaltivec_categorize_keyword (const cpp_token *tok)
105227569Sphilip{
106227569Sphilip  if (tok->type == CPP_NAME)
107227569Sphilip    {
108227569Sphilip      cpp_hashnode *ident = tok->val.node.node;
109227569Sphilip
110227569Sphilip      if (ident == C_CPP_HASHNODE (vector_keyword))
111227569Sphilip	return C_CPP_HASHNODE (__vector_keyword);
112227569Sphilip
113227569Sphilip      if (ident == C_CPP_HASHNODE (pixel_keyword))
114227569Sphilip	return C_CPP_HASHNODE (__pixel_keyword);
115227569Sphilip
116227569Sphilip      if (ident == C_CPP_HASHNODE (bool_keyword))
117227569Sphilip	return C_CPP_HASHNODE (__bool_keyword);
118227569Sphilip
119227569Sphilip      if (ident == C_CPP_HASHNODE (_Bool_keyword))
120227569Sphilip	return C_CPP_HASHNODE (__bool_keyword);
121227569Sphilip
122227569Sphilip      return ident;
123227569Sphilip    }
124227569Sphilip
125227569Sphilip  return 0;
126227569Sphilip}
127227569Sphilip
128227569Sphilipstatic void
129227569Sphilipinit_vector_keywords (void)
130227569Sphilip{
131227569Sphilip  /* Keywords without two leading underscores are context-sensitive, and hence
132227569Sphilip     implemented as conditional macros, controlled by the
133227569Sphilip     rs6000_macro_to_expand() function below.  If we have ISA 2.07 64-bit
134227569Sphilip     support, record the __int128_t and __uint128_t types.  */
135227569Sphilip
136227569Sphilip  __vector_keyword = get_identifier ("__vector");
137227569Sphilip  C_CPP_HASHNODE (__vector_keyword)->flags |= NODE_CONDITIONAL;
138227569Sphilip
139227569Sphilip  __pixel_keyword = get_identifier ("__pixel");
140227569Sphilip  C_CPP_HASHNODE (__pixel_keyword)->flags |= NODE_CONDITIONAL;
141227569Sphilip
142227569Sphilip  __bool_keyword = get_identifier ("__bool");
143227569Sphilip  C_CPP_HASHNODE (__bool_keyword)->flags |= NODE_CONDITIONAL;
144227569Sphilip
145227569Sphilip  vector_keyword = get_identifier ("vector");
146227569Sphilip  C_CPP_HASHNODE (vector_keyword)->flags |= NODE_CONDITIONAL;
147227569Sphilip
148227569Sphilip  pixel_keyword = get_identifier ("pixel");
149227569Sphilip  C_CPP_HASHNODE (pixel_keyword)->flags |= NODE_CONDITIONAL;
150227569Sphilip
151227569Sphilip  bool_keyword = get_identifier ("bool");
152227569Sphilip  C_CPP_HASHNODE (bool_keyword)->flags |= NODE_CONDITIONAL;
153227569Sphilip
154227569Sphilip  _Bool_keyword = get_identifier ("_Bool");
155227569Sphilip  C_CPP_HASHNODE (_Bool_keyword)->flags |= NODE_CONDITIONAL;
156227569Sphilip
157227569Sphilip  if (TARGET_VADDUQM)
158227569Sphilip    {
159227569Sphilip      __int128_type = get_identifier ("__int128_t");
160227569Sphilip      __uint128_type = get_identifier ("__uint128_t");
161227569Sphilip    }
162227569Sphilip}
163227569Sphilip
164227569Sphilip/* Helper function to find out which RID_INT_N_* code is the one for
165227569Sphilip   __int128, if any.  Returns RID_MAX+1 if none apply, which is safe
166227569Sphilip   (for our purposes, since we always expect to have __int128) to
167227569Sphilip   compare against.  */
168227569Sphilipstatic int
169227569Sphiliprid_int128(void)
170227569Sphilip{
171227569Sphilip  int i;
172227569Sphilip
173227569Sphilip  for (i = 0; i < NUM_INT_N_ENTS; i ++)
174227569Sphilip    if (int_n_enabled_p[i]
175227569Sphilip	&& int_n_data[i].bitsize == 128)
176227569Sphilip      return RID_INT_N_0 + i;
177227569Sphilip
178227569Sphilip  return RID_MAX + 1;
179227569Sphilip}
180227569Sphilip
181227569Sphilip/* Called to decide whether a conditional macro should be expanded.
182227569Sphilip   Since we have exactly one such macro (i.e, 'vector'), we do not
183227569Sphilip   need to examine the 'tok' parameter.  */
184227569Sphilip
185227569Sphilipstatic cpp_hashnode *
186227569Sphiliprs6000_macro_to_expand (cpp_reader *pfile, const cpp_token *tok)
187227569Sphilip{
188227569Sphilip  cpp_hashnode *expand_this = tok->val.node.node;
189227569Sphilip  cpp_hashnode *ident;
190227569Sphilip
191227569Sphilip  /* If the current machine does not have altivec, don't look for the
192227569Sphilip     keywords.  */
193227569Sphilip  if (!TARGET_ALTIVEC)
194227569Sphilip    return NULL;
195227569Sphilip
196227569Sphilip  ident = altivec_categorize_keyword (tok);
197227569Sphilip
198227569Sphilip  if (ident != expand_this)
199227569Sphilip    expand_this = NULL;
200227569Sphilip
201227569Sphilip  if (ident == C_CPP_HASHNODE (__vector_keyword))
202227569Sphilip    {
203227569Sphilip      int idx = 0;
204227569Sphilip      do
205227569Sphilip	tok = cpp_peek_token (pfile, idx++);
206227569Sphilip      while (tok->type == CPP_PADDING);
207227569Sphilip      ident = altivec_categorize_keyword (tok);
208227569Sphilip
209227569Sphilip      if (ident == C_CPP_HASHNODE (__pixel_keyword))
210227569Sphilip	{
211227569Sphilip	  expand_this = C_CPP_HASHNODE (__vector_keyword);
212227569Sphilip	  expand_bool_pixel = __pixel_keyword;
213227569Sphilip	}
214227569Sphilip      else if (ident == C_CPP_HASHNODE (__bool_keyword))
215227569Sphilip	{
216227569Sphilip	  expand_this = C_CPP_HASHNODE (__vector_keyword);
217227569Sphilip	  expand_bool_pixel = __bool_keyword;
218227569Sphilip	}
219227569Sphilip      /* The boost libraries have code with Iterator::vector vector in it.  If
220227569Sphilip	 we allow the normal handling, this module will be called recursively,
221227569Sphilip	 and the vector will be skipped.; */
222227569Sphilip      else if (ident && (ident != C_CPP_HASHNODE (__vector_keyword)))
223227569Sphilip	{
224227569Sphilip	  enum rid rid_code = (enum rid)(ident->rid_code);
225227569Sphilip	  bool is_macro = cpp_macro_p (ident);
226227569Sphilip
227227569Sphilip	  /* If there is a function-like macro, check if it is going to be
228227569Sphilip	     invoked with or without arguments.  Without following ( treat
229227569Sphilip	     it like non-macro, otherwise the following cpp_get_token eats
230227569Sphilip	     what should be preserved.  */
231227569Sphilip	  if (is_macro && cpp_fun_like_macro_p (ident))
232227569Sphilip	    {
233227569Sphilip	      int idx2 = idx;
234227569Sphilip	      do
235227569Sphilip		tok = cpp_peek_token (pfile, idx2++);
236227569Sphilip	      while (tok->type == CPP_PADDING);
237227569Sphilip	      if (tok->type != CPP_OPEN_PAREN)
238227569Sphilip		is_macro = false;
239227569Sphilip	    }
240227569Sphilip
241227569Sphilip	  if (is_macro)
242227569Sphilip	    {
243227569Sphilip	      do
244227569Sphilip		(void) cpp_get_token (pfile);
245227569Sphilip	      while (--idx > 0);
246227569Sphilip	      do
247227569Sphilip		tok = cpp_peek_token (pfile, idx++);
248227569Sphilip	      while (tok->type == CPP_PADDING);
249227569Sphilip	      ident = altivec_categorize_keyword (tok);
250227569Sphilip	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
251227569Sphilip		{
252227569Sphilip		  expand_this = C_CPP_HASHNODE (__vector_keyword);
253227569Sphilip		  expand_bool_pixel = __pixel_keyword;
254227569Sphilip		  rid_code = RID_MAX;
255227569Sphilip		}
256227569Sphilip	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
257227569Sphilip		{
258227569Sphilip		  expand_this = C_CPP_HASHNODE (__vector_keyword);
259227569Sphilip		  expand_bool_pixel = __bool_keyword;
260227569Sphilip		  rid_code = RID_MAX;
261227569Sphilip		}
262227569Sphilip	      else if (ident)
263227569Sphilip		rid_code = (enum rid)(ident->rid_code);
264227569Sphilip	    }
265227569Sphilip
266227569Sphilip	  if (rid_code == RID_UNSIGNED || rid_code == RID_LONG
267227569Sphilip	      || rid_code == RID_SHORT || rid_code == RID_SIGNED
268227569Sphilip	      || rid_code == RID_INT || rid_code == RID_CHAR
269227569Sphilip	      || rid_code == RID_FLOAT
270227569Sphilip	      || (rid_code == RID_DOUBLE && TARGET_VSX)
271227569Sphilip	      || (rid_code == rid_int128 () && TARGET_VADDUQM))
272227569Sphilip	    {
273227569Sphilip	      expand_this = C_CPP_HASHNODE (__vector_keyword);
274227569Sphilip	      /* If the next keyword is bool or pixel, it
275227569Sphilip		 will need to be expanded as well.  */
276227569Sphilip	      do
277227569Sphilip		tok = cpp_peek_token (pfile, idx++);
278227569Sphilip	      while (tok->type == CPP_PADDING);
279227569Sphilip	      ident = altivec_categorize_keyword (tok);
280227569Sphilip
281227569Sphilip	      if (ident == C_CPP_HASHNODE (__pixel_keyword))
282227569Sphilip		expand_bool_pixel = __pixel_keyword;
283227569Sphilip	      else if (ident == C_CPP_HASHNODE (__bool_keyword))
284227569Sphilip		expand_bool_pixel = __bool_keyword;
285227569Sphilip	      else
286227569Sphilip		{
287227569Sphilip		  /* Try two tokens down, too.  */
288227569Sphilip		  do
289227569Sphilip		    tok = cpp_peek_token (pfile, idx++);
290227569Sphilip		  while (tok->type == CPP_PADDING);
291227569Sphilip		  ident = altivec_categorize_keyword (tok);
292227569Sphilip		  if (ident == C_CPP_HASHNODE (__pixel_keyword))
293227569Sphilip		    expand_bool_pixel = __pixel_keyword;
294227569Sphilip		  else if (ident == C_CPP_HASHNODE (__bool_keyword))
295227569Sphilip		    expand_bool_pixel = __bool_keyword;
296227569Sphilip		}
297227569Sphilip	    }
298227569Sphilip
299227569Sphilip	  /* Support vector __int128_t, but we don't need to worry about bool
300227569Sphilip	     or pixel on this type.  */
301227569Sphilip	  else if (TARGET_VADDUQM
302227569Sphilip		   && (ident == C_CPP_HASHNODE (__int128_type)
303227569Sphilip		       || ident == C_CPP_HASHNODE (__uint128_type)))
304227569Sphilip	    expand_this = C_CPP_HASHNODE (__vector_keyword);
305227569Sphilip	}
306227569Sphilip    }
307227569Sphilip  else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__pixel_keyword))
308227569Sphilip    {
309227569Sphilip      expand_this = C_CPP_HASHNODE (__pixel_keyword);
310227569Sphilip      expand_bool_pixel = 0;
311227569Sphilip    }
312227569Sphilip  else if (expand_bool_pixel && ident == C_CPP_HASHNODE (__bool_keyword))
313227569Sphilip    {
314227569Sphilip      expand_this = C_CPP_HASHNODE (__bool_keyword);
315227569Sphilip      expand_bool_pixel = 0;
316227569Sphilip    }
317227569Sphilip
318227569Sphilip  return expand_this;
319227569Sphilip}
320227569Sphilip
321227569Sphilip
322227569Sphilip/* Define or undefine a single macro.  */
323227569Sphilip
324227569Sphilipstatic void
325227569Sphiliprs6000_define_or_undefine_macro (bool define_p, const char *name)
326227569Sphilip{
327227569Sphilip  if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
328227569Sphilip    fprintf (stderr, "#%s %s\n", (define_p) ? "define" : "undef", name);
329227569Sphilip
330227569Sphilip  if (define_p)
331227569Sphilip    cpp_define (parse_in, name);
332227569Sphilip  else
333227569Sphilip    cpp_undef (parse_in, name);
334227569Sphilip}
335227569Sphilip
336227569Sphilip/* Define or undefine macros based on the current target.  If the user does
337227569Sphilip   #pragma GCC target, we need to adjust the macros dynamically.  Note, some of
338227569Sphilip   the options needed for builtins have been moved to separate variables, so
339227569Sphilip   have both the target flags and the builtin flags as arguments.  */
340227569Sphilip
341227569Sphilipvoid
342227569Sphiliprs6000_target_modify_macros (bool define_p, HOST_WIDE_INT flags,
343227569Sphilip			     HOST_WIDE_INT bu_mask)
344227569Sphilip{
345227569Sphilip  if (TARGET_DEBUG_BUILTIN || TARGET_DEBUG_TARGET)
346227569Sphilip    fprintf (stderr,
347227569Sphilip	     "rs6000_target_modify_macros (%s, " HOST_WIDE_INT_PRINT_HEX
348227569Sphilip	     ", " HOST_WIDE_INT_PRINT_HEX ")\n",
349227569Sphilip	     (define_p) ? "define" : "undef",
350227569Sphilip	     flags, bu_mask);
351227569Sphilip
352227569Sphilip  /* Each of the flags mentioned below controls whether certain
353227569Sphilip     preprocessor macros will be automatically defined when
354227569Sphilip     preprocessing source files for compilation by this compiler.
355227569Sphilip     While most of these flags can be enabled or disabled
356227569Sphilip     explicitly by specifying certain command-line options when
357227569Sphilip     invoking the compiler, there are also many ways in which these
358227569Sphilip     flags are enabled or disabled implicitly, based on compiler
359227569Sphilip     defaults, configuration choices, and on the presence of certain
360227569Sphilip     related command-line options.  Many, but not all, of these
361227569Sphilip     implicit behaviors can be found in file "rs6000.c", the
362227569Sphilip     rs6000_option_override_internal() function.
363227569Sphilip
364227569Sphilip     In general, each of the flags may be automatically enabled in
365227569Sphilip     any of the following conditions:
366227569Sphilip
367227569Sphilip     1. If no -mcpu target is specified on the command line and no
368227569Sphilip	--with-cpu target is specified to the configure command line
369227569Sphilip	and the TARGET_DEFAULT macro for this default cpu host
370227569Sphilip	includes the flag, and the flag has not been explicitly disabled
371227569Sphilip	by command-line options.
372227569Sphilip
373227569Sphilip     2. If the target specified with -mcpu=target on the command line, or
374227569Sphilip	in the absence of a -mcpu=target command-line option, if the
375227569Sphilip	target specified using --with-cpu=target on the configure
376227569Sphilip	command line, is disqualified because the associated binary
377227569Sphilip	tools (e.g. the assembler) lack support for the requested cpu,
378227569Sphilip	and the TARGET_DEFAULT macro for this default cpu host
379227569Sphilip	includes the flag, and the flag has not been explicitly disabled
380227569Sphilip	by command-line options.
381227569Sphilip
382227569Sphilip     3. If either of the above two conditions apply except that the
383227569Sphilip	TARGET_DEFAULT macro is defined to equal zero, and
384227569Sphilip	TARGET_POWERPC64 and
385227569Sphilip	a) BYTES_BIG_ENDIAN and the flag to be enabled is either
386227569Sphilip	   MASK_PPC_GFXOPT or MASK_POWERPC64 (flags for "powerpc64"
387227569Sphilip	   target), or
388227569Sphilip	b) !BYTES_BIG_ENDIAN and the flag to be enabled is either
389227569Sphilip	   MASK_POWERPC64 or it is one of the flags included in
390227569Sphilip	   ISA_2_7_MASKS_SERVER (flags for "powerpc64le" target).
391227569Sphilip
392227569Sphilip     4. If a cpu has been requested with a -mcpu=target command-line option
393227569Sphilip	and this cpu has not been disqualified due to shortcomings of the
394227569Sphilip	binary tools, and the set of flags associated with the requested cpu
395227569Sphilip	include the flag to be enabled.  See rs6000-cpus.def for macro
396227569Sphilip	definitions that represent various ABI standards
397227569Sphilip	(e.g. ISA_2_1_MASKS, ISA_3_0_MASKS_SERVER) and for a list of
398227569Sphilip	the specific flags that are associated with each of the cpu
399227569Sphilip	choices that can be specified as the target of a -mcpu=target
400227569Sphilip	compile option, or as the target of a --with-cpu=target
401227569Sphilip	configure option.  Target flags that are specified in either
402227569Sphilip	of these two ways are considered "implicit" since the flags
403227569Sphilip	are not mentioned specifically by name.
404227569Sphilip
405227569Sphilip	Additional documentation describing behavior specific to
406227569Sphilip	particular flags is provided below, immediately preceding the
407227569Sphilip	use of each relevant flag.
408227569Sphilip
409227569Sphilip     5. If there is no -mcpu=target command-line option, and the cpu
410227569Sphilip	requested by a --with-cpu=target command-line option has not
411227569Sphilip	been disqualified due to shortcomings of the binary tools, and
412227569Sphilip	the set of flags associated with the specified target include
413227569Sphilip	the flag to be enabled.  See the notes immediately above for a
414227569Sphilip	summary of the flags associated with particular cpu
415227569Sphilip	definitions.  */
416227569Sphilip
417227569Sphilip  /* rs6000_isa_flags based options.  */
418227569Sphilip  rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC");
419227569Sphilip  if ((flags & OPTION_MASK_PPC_GPOPT) != 0)
420227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCSQ");
421227569Sphilip  if ((flags & OPTION_MASK_PPC_GFXOPT) != 0)
422227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPCGR");
423227569Sphilip  if ((flags & OPTION_MASK_POWERPC64) != 0)
424227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PPC64");
425227569Sphilip  if ((flags & OPTION_MASK_MFCRF) != 0)
426227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR4");
427227569Sphilip  if ((flags & OPTION_MASK_POPCNTB) != 0)
428227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5");
429227569Sphilip  if ((flags & OPTION_MASK_FPRND) != 0)
430227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR5X");
431227569Sphilip  if ((flags & OPTION_MASK_CMPB) != 0)
432227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR6");
433227569Sphilip  if ((flags & OPTION_MASK_POPCNTD) != 0)
434227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR7");
435227569Sphilip  /* Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
436227569Sphilip     turned on in the following condition:
437227569Sphilip     1. TARGET_P8_VECTOR is enabled and OPTION_MASK_DIRECT_MOVE is not
438227569Sphilip        explicitly disabled.
439227569Sphilip        Hereafter, the OPTION_MASK_DIRECT_MOVE flag is considered to
440227569Sphilip        have been turned on explicitly.
441227569Sphilip     Note that the OPTION_MASK_DIRECT_MOVE flag is automatically
442227569Sphilip     turned off in any of the following conditions:
443227569Sphilip     1. TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX is explicitly
444227569Sphilip	disabled and OPTION_MASK_DIRECT_MOVE was not explicitly
445227569Sphilip	enabled.
446227569Sphilip     2. TARGET_VSX is off.  */
447227569Sphilip  if ((flags & OPTION_MASK_DIRECT_MOVE) != 0)
448227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR8");
449227569Sphilip  if ((flags & OPTION_MASK_MODULO) != 0)
450227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR9");
451227569Sphilip  if ((flags & OPTION_MASK_POWER10) != 0)
452227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_ARCH_PWR10");
453227569Sphilip  if ((flags & OPTION_MASK_SOFT_FLOAT) != 0)
454227569Sphilip    rs6000_define_or_undefine_macro (define_p, "_SOFT_FLOAT");
455227569Sphilip  if ((flags & OPTION_MASK_RECIP_PRECISION) != 0)
456227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__RECIP_PRECISION__");
457227569Sphilip  /* Note that the OPTION_MASK_ALTIVEC flag is automatically turned on
458227569Sphilip     in any of the following conditions:
459227569Sphilip     1. The operating system is Darwin and it is configured for 64
460227569Sphilip	bit.  (See darwin_rs6000_override_options.)
461227569Sphilip     2. The operating system is Darwin and the operating system
462227569Sphilip	version is 10.5 or higher and the user has not explicitly
463227569Sphilip	disabled ALTIVEC by specifying -mcpu=G3 or -mno-altivec and
464227569Sphilip	the compiler is not producing code for integration within the
465227569Sphilip	kernel.  (See darwin_rs6000_override_options.)
466227569Sphilip     Note that the OPTION_MASK_ALTIVEC flag is automatically turned
467227569Sphilip     off in any of the following conditions:
468227569Sphilip     1. The operating system does not support saving of AltiVec
469227569Sphilip	registers (OS_MISSING_ALTIVEC).
470227569Sphilip     2. If an inner context (as introduced by
471227569Sphilip	__attribute__((__target__())) or #pragma GCC target()
472227569Sphilip	requests a target that normally enables the
473227569Sphilip	OPTION_MASK_ALTIVEC flag but the outer-most "main target"
474227569Sphilip	does not support the rs6000_altivec_abi, this flag is
475227569Sphilip	turned off for the inner context unless OPTION_MASK_ALTIVEC
476227569Sphilip	was explicitly enabled for the inner context.  */
477227569Sphilip  if ((flags & OPTION_MASK_ALTIVEC) != 0)
478227569Sphilip    {
479227569Sphilip      const char *vec_str = (define_p) ? "__VEC__=10206" : "__VEC__";
480227569Sphilip      rs6000_define_or_undefine_macro (define_p, "__ALTIVEC__");
481227569Sphilip      rs6000_define_or_undefine_macro (define_p, vec_str);
482227569Sphilip
483227569Sphilip	  /* Define this when supporting context-sensitive keywords.  */
484227569Sphilip      if (!flag_iso)
485227569Sphilip	rs6000_define_or_undefine_macro (define_p, "__APPLE_ALTIVEC__");
486227569Sphilip      if (rs6000_aix_extabi)
487227569Sphilip	rs6000_define_or_undefine_macro (define_p, "__EXTABI__");
488227569Sphilip    }
489227569Sphilip  /* Note that the OPTION_MASK_VSX flag is automatically turned on in
490227569Sphilip     the following conditions:
491227569Sphilip     1. TARGET_P8_VECTOR is explicitly turned on and the OPTION_MASK_VSX
492227569Sphilip        was not explicitly turned off.  Hereafter, the OPTION_MASK_VSX
493227569Sphilip        flag is considered to have been explicitly turned on.
494227569Sphilip     Note that the OPTION_MASK_VSX flag is automatically turned off in
495227569Sphilip     the following conditions:
496227569Sphilip     1. The operating system does not support saving of AltiVec
497227569Sphilip	registers (OS_MISSING_ALTIVEC).
498227569Sphilip     2. If the option TARGET_HARD_FLOAT is turned off.  Hereafter, the
499227569Sphilip	OPTION_MASK_VSX flag is considered to have been turned off
500227569Sphilip	explicitly.
501227569Sphilip     3. If TARGET_AVOID_XFORM is turned on explicitly at the outermost
502227569Sphilip	compilation context, or if it is turned on by any means in an
503227569Sphilip	inner compilation context.  Hereafter, the OPTION_MASK_VSX
504227569Sphilip	flag is considered to have been turned off explicitly.
505227569Sphilip     4. If TARGET_ALTIVEC was explicitly disabled.  Hereafter, the
506227569Sphilip	OPTION_MASK_VSX flag is considered to have been turned off
507227569Sphilip	explicitly.
508227569Sphilip     5. If an inner context (as introduced by
509227569Sphilip	__attribute__((__target__())) or #pragma GCC target()
510227569Sphilip	requests a target that normally enables the
511227569Sphilip	OPTION_MASK_VSX flag but the outer-most "main target"
512227569Sphilip	does not support the rs6000_altivec_abi, this flag is
513227569Sphilip	turned off for the inner context unless OPTION_MASK_VSX
514227569Sphilip	was explicitly enabled for the inner context.  */
515227569Sphilip  if ((flags & OPTION_MASK_VSX) != 0)
516227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__VSX__");
517227569Sphilip  if ((flags & OPTION_MASK_HTM) != 0)
518227569Sphilip    {
519227569Sphilip      rs6000_define_or_undefine_macro (define_p, "__HTM__");
520227569Sphilip      /* Tell the user that our HTM insn patterns act as memory barriers.  */
521227569Sphilip      rs6000_define_or_undefine_macro (define_p, "__TM_FENCE__");
522227569Sphilip    }
523227569Sphilip  /* Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
524227569Sphilip     on in the following conditions:
525227569Sphilip     1. TARGET_P9_VECTOR is explicitly turned on and
526227569Sphilip        OPTION_MASK_P8_VECTOR is not explicitly turned off.
527227569Sphilip        Hereafter, the OPTION_MASK_P8_VECTOR flag is considered to
528227569Sphilip        have been turned off explicitly.
529227569Sphilip     Note that the OPTION_MASK_P8_VECTOR flag is automatically turned
530227569Sphilip     off in the following conditions:
531227569Sphilip     1. If any of TARGET_HARD_FLOAT, TARGET_ALTIVEC, or TARGET_VSX
532227569Sphilip	were turned off explicitly and OPTION_MASK_P8_VECTOR flag was
533227569Sphilip	not turned on explicitly.
534227569Sphilip     2. If TARGET_ALTIVEC is turned off.  Hereafter, the
535227569Sphilip	OPTION_MASK_P8_VECTOR flag is considered to have been turned off
536227569Sphilip	explicitly.
537227569Sphilip     3. If TARGET_VSX is turned off and OPTION_MASK_P8_VECTOR was not
538227569Sphilip        explicitly enabled.  If TARGET_VSX is explicitly enabled, the
539227569Sphilip        OPTION_MASK_P8_VECTOR flag is hereafter also considered to
540227569Sphilip	have been turned off explicitly.  */
541227569Sphilip  if ((flags & OPTION_MASK_P8_VECTOR) != 0)
542227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__POWER8_VECTOR__");
543227569Sphilip  /* Note that the OPTION_MASK_P9_VECTOR flag is automatically turned
544227569Sphilip     off in the following conditions:
545227569Sphilip     1. If TARGET_P8_VECTOR is turned off and OPTION_MASK_P9_VECTOR is
546227569Sphilip        not turned on explicitly. Hereafter, if OPTION_MASK_P8_VECTOR
547227569Sphilip        was turned on explicitly, the OPTION_MASK_P9_VECTOR flag is
548227569Sphilip        also considered to have been turned off explicitly.
549227569Sphilip     Note that the OPTION_MASK_P9_VECTOR is automatically turned on
550227569Sphilip     in the following conditions:
551227569Sphilip     1. If TARGET_P9_MINMAX was turned on explicitly.
552227569Sphilip        Hereafter, THE OPTION_MASK_P9_VECTOR flag is considered to
553227569Sphilip        have been turned on explicitly.  */
554227569Sphilip  if ((flags & OPTION_MASK_P9_VECTOR) != 0)
555227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__POWER9_VECTOR__");
556227569Sphilip  /* Note that the OPTION_MASK_QUAD_MEMORY flag is automatically
557227569Sphilip     turned off in the following conditions:
558227569Sphilip     1. If TARGET_POWERPC64 is turned off.
559227569Sphilip     2. If WORDS_BIG_ENDIAN is false (non-atomic quad memory
560227569Sphilip	load/store are disabled on little endian).  */
561227569Sphilip  if ((flags & OPTION_MASK_QUAD_MEMORY) != 0)
562227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY__");
563227569Sphilip  /* Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is automatically
564227569Sphilip     turned off in the following conditions:
565227569Sphilip     1. If TARGET_POWERPC64 is turned off.
566227569Sphilip     Note that the OPTION_MASK_QUAD_MEMORY_ATOMIC flag is
567227569Sphilip     automatically turned on in the following conditions:
568227569Sphilip     1. If TARGET_QUAD_MEMORY and this flag was not explicitly
569227569Sphilip	disabled.  */
570227569Sphilip  if ((flags & OPTION_MASK_QUAD_MEMORY_ATOMIC) != 0)
571227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__QUAD_MEMORY_ATOMIC__");
572227569Sphilip  /* Note that the OPTION_MASK_CRYPTO flag is automatically turned off
573227569Sphilip     in the following conditions:
574227569Sphilip     1. If any of TARGET_HARD_FLOAT or TARGET_ALTIVEC or TARGET_VSX
575227569Sphilip	are turned off explicitly and OPTION_MASK_CRYPTO is not turned
576227569Sphilip	on explicitly.
577227569Sphilip     2. If TARGET_ALTIVEC is turned off.  */
578227569Sphilip  if ((flags & OPTION_MASK_CRYPTO) != 0)
579227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__CRYPTO__");
580227569Sphilip  if ((flags & OPTION_MASK_FLOAT128_KEYWORD) != 0)
581227569Sphilip    {
582227569Sphilip      rs6000_define_or_undefine_macro (define_p, "__FLOAT128__");
583227569Sphilip      if (define_p)
584227569Sphilip	rs6000_define_or_undefine_macro (true, "__float128=__ieee128");
585227569Sphilip      else
586227569Sphilip	rs6000_define_or_undefine_macro (false, "__float128");
587227569Sphilip    }
588227569Sphilip  /* OPTION_MASK_FLOAT128_HARDWARE can be turned on if -mcpu=power9 is used or
589227569Sphilip     via the target attribute/pragma.  */
590227569Sphilip  if ((flags & OPTION_MASK_FLOAT128_HW) != 0)
591227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__FLOAT128_HARDWARE__");
592227569Sphilip
593227569Sphilip  /* options from the builtin masks.  */
594227569Sphilip  /* Note that RS6000_BTM_CELL is enabled only if (rs6000_cpu ==
595227569Sphilip     PROCESSOR_CELL) (e.g. -mcpu=cell).  */
596227569Sphilip  if ((bu_mask & RS6000_BTM_CELL) != 0)
597227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__PPU__");
598227569Sphilip
599227569Sphilip  /* Tell the user if we support the MMA instructions.  */
600227569Sphilip  if ((flags & OPTION_MASK_MMA) != 0)
601227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__MMA__");
602227569Sphilip  /* Whether pc-relative code is being generated.  */
603227569Sphilip  if ((flags & OPTION_MASK_PCREL) != 0)
604227569Sphilip    rs6000_define_or_undefine_macro (define_p, "__PCREL__");
605227569Sphilip}
606227569Sphilip
607227569Sphilipvoid
608227569Sphiliprs6000_cpu_cpp_builtins (cpp_reader *pfile)
609227569Sphilip{
610227569Sphilip  /* Define all of the common macros.  */
611227569Sphilip  rs6000_target_modify_macros (true, rs6000_isa_flags,
612227569Sphilip			       rs6000_builtin_mask_calculate ());
613227569Sphilip
614227569Sphilip  if (TARGET_FRE)
615227569Sphilip    builtin_define ("__RECIP__");
616227569Sphilip  if (TARGET_FRES)
617227569Sphilip    builtin_define ("__RECIPF__");
618227569Sphilip  if (TARGET_FRSQRTE)
619227569Sphilip    builtin_define ("__RSQRTE__");
620227569Sphilip  if (TARGET_FRSQRTES)
621227569Sphilip    builtin_define ("__RSQRTEF__");
622227569Sphilip  if (TARGET_FLOAT128_TYPE)
623227569Sphilip    builtin_define ("__FLOAT128_TYPE__");
624227569Sphilip#ifdef TARGET_LIBC_PROVIDES_HWCAP_IN_TCB
625227569Sphilip  builtin_define ("__BUILTIN_CPU_SUPPORTS__");
626227569Sphilip#endif
627227569Sphilip
628227569Sphilip  if (TARGET_EXTRA_BUILTINS && cpp_get_options (pfile)->lang != CLK_ASM)
629227569Sphilip    {
630227569Sphilip      /* Define the AltiVec syntactic elements.  */
631227569Sphilip      builtin_define ("__vector=__attribute__((altivec(vector__)))");
632227569Sphilip      builtin_define ("__pixel=__attribute__((altivec(pixel__))) unsigned short");
633227569Sphilip      builtin_define ("__bool=__attribute__((altivec(bool__))) unsigned");
634227569Sphilip
635227569Sphilip      if (!flag_iso)
636227569Sphilip	{
637227569Sphilip	  builtin_define ("vector=vector");
638227569Sphilip	  builtin_define ("pixel=pixel");
639227569Sphilip	  builtin_define ("bool=bool");
640227569Sphilip	  builtin_define ("_Bool=_Bool");
641227569Sphilip	  init_vector_keywords ();
642227569Sphilip
643227569Sphilip	  /* Enable context-sensitive macros.  */
644227569Sphilip	  cpp_get_callbacks (pfile)->macro_to_expand = rs6000_macro_to_expand;
645227569Sphilip	}
646227569Sphilip    }
647227569Sphilip  if (!TARGET_HARD_FLOAT)
648227569Sphilip    builtin_define ("_SOFT_DOUBLE");
649227569Sphilip  /* Used by lwarx/stwcx. errata work-around.  */
650227569Sphilip  if (rs6000_cpu == PROCESSOR_PPC405)
651227569Sphilip    builtin_define ("__PPC405__");
652227569Sphilip  /* Used by libstdc++.  */
653227569Sphilip  if (TARGET_NO_LWSYNC)
654227569Sphilip    builtin_define ("__NO_LWSYNC__");
655227569Sphilip
656227569Sphilip  if (TARGET_EXTRA_BUILTINS)
657227569Sphilip    {
658227569Sphilip      /* For the VSX builtin functions identical to Altivec functions, just map
659227569Sphilip	 the altivec builtin into the vsx version (the altivec functions
660227569Sphilip	 generate VSX code if -mvsx).  */
661227569Sphilip      builtin_define ("__builtin_vsx_xxland=__builtin_vec_and");
662227569Sphilip      builtin_define ("__builtin_vsx_xxlandc=__builtin_vec_andc");
663227569Sphilip      builtin_define ("__builtin_vsx_xxlnor=__builtin_vec_nor");
664227569Sphilip      builtin_define ("__builtin_vsx_xxlor=__builtin_vec_or");
665227569Sphilip      builtin_define ("__builtin_vsx_xxlxor=__builtin_vec_xor");
666227569Sphilip      builtin_define ("__builtin_vsx_xxsel=__builtin_vec_sel");
667227569Sphilip      builtin_define ("__builtin_vsx_vperm=__builtin_vec_perm");
668227569Sphilip
669227569Sphilip      /* Also map the a and m versions of the multiply/add instructions to the
670227569Sphilip	 builtin for people blindly going off the instruction manual.  */
671227569Sphilip      builtin_define ("__builtin_vsx_xvmaddadp=__builtin_vsx_xvmadddp");
672227569Sphilip      builtin_define ("__builtin_vsx_xvmaddmdp=__builtin_vsx_xvmadddp");
673227569Sphilip      builtin_define ("__builtin_vsx_xvmaddasp=__builtin_vsx_xvmaddsp");
674227569Sphilip      builtin_define ("__builtin_vsx_xvmaddmsp=__builtin_vsx_xvmaddsp");
675227569Sphilip      builtin_define ("__builtin_vsx_xvmsubadp=__builtin_vsx_xvmsubdp");
676227569Sphilip      builtin_define ("__builtin_vsx_xvmsubmdp=__builtin_vsx_xvmsubdp");
677227569Sphilip      builtin_define ("__builtin_vsx_xvmsubasp=__builtin_vsx_xvmsubsp");
678227569Sphilip      builtin_define ("__builtin_vsx_xvmsubmsp=__builtin_vsx_xvmsubsp");
679227569Sphilip      builtin_define ("__builtin_vsx_xvnmaddadp=__builtin_vsx_xvnmadddp");
680227569Sphilip      builtin_define ("__builtin_vsx_xvnmaddmdp=__builtin_vsx_xvnmadddp");
681227569Sphilip      builtin_define ("__builtin_vsx_xvnmaddasp=__builtin_vsx_xvnmaddsp");
682227569Sphilip      builtin_define ("__builtin_vsx_xvnmaddmsp=__builtin_vsx_xvnmaddsp");
683227569Sphilip      builtin_define ("__builtin_vsx_xvnmsubadp=__builtin_vsx_xvnmsubdp");
684227569Sphilip      builtin_define ("__builtin_vsx_xvnmsubmdp=__builtin_vsx_xvnmsubdp");
685227569Sphilip      builtin_define ("__builtin_vsx_xvnmsubasp=__builtin_vsx_xvnmsubsp");
686227569Sphilip      builtin_define ("__builtin_vsx_xvnmsubmsp=__builtin_vsx_xvnmsubsp");
687227569Sphilip    }
688227569Sphilip
689227569Sphilip  /* Map the old _Float128 'q' builtins into the new 'f128' builtins.  */
690227569Sphilip  if (TARGET_FLOAT128_TYPE)
691227569Sphilip    {
692227569Sphilip      builtin_define ("__builtin_fabsq=__builtin_fabsf128");
693227569Sphilip      builtin_define ("__builtin_copysignq=__builtin_copysignf128");
694227569Sphilip      builtin_define ("__builtin_nanq=__builtin_nanf128");
695227569Sphilip      builtin_define ("__builtin_nansq=__builtin_nansf128");
696227569Sphilip      builtin_define ("__builtin_infq=__builtin_inff128");
697227569Sphilip      builtin_define ("__builtin_huge_valq=__builtin_huge_valf128");
698227569Sphilip    }
699227569Sphilip
700227569Sphilip  /* Tell users they can use __builtin_bswap{16,64}.  */
701227569Sphilip  builtin_define ("__HAVE_BSWAP__");
702227569Sphilip
703227569Sphilip  /* May be overridden by target configuration.  */
704227569Sphilip  RS6000_CPU_CPP_ENDIAN_BUILTINS();
705227569Sphilip
706227569Sphilip  if (TARGET_LONG_DOUBLE_128)
707227569Sphilip    {
708227569Sphilip      builtin_define ("__LONG_DOUBLE_128__");
709227569Sphilip      builtin_define ("__LONGDOUBLE128");
710227569Sphilip
711227569Sphilip      if (TARGET_IEEEQUAD)
712227569Sphilip	{
713227569Sphilip	  /* Older versions of GLIBC used __attribute__((__KC__)) to create the
714227569Sphilip	     IEEE 128-bit floating point complex type for C++ (which does not
715227569Sphilip	     support _Float128 _Complex).  If the default for long double is
716227569Sphilip	     IEEE 128-bit mode, the library would need to use
717227569Sphilip	     __attribute__((__TC__)) instead.  Defining __KF__ and __KC__
718227569Sphilip	     is a stop-gap to build with the older libraries, until we
719227569Sphilip	     get an updated library.  */
720227569Sphilip	  builtin_define ("__LONG_DOUBLE_IEEE128__");
721227569Sphilip	  builtin_define ("__KF__=__TF__");
722227569Sphilip	  builtin_define ("__KC__=__TC__");
723227569Sphilip	}
724227569Sphilip      else
725227569Sphilip	builtin_define ("__LONG_DOUBLE_IBM128__");
726227569Sphilip    }
727227569Sphilip
728227569Sphilip  switch (TARGET_CMODEL)
729227569Sphilip    {
730227569Sphilip      /* Deliberately omit __CMODEL_SMALL__ since that was the default
731227569Sphilip	 before --mcmodel support was added.  */
732227569Sphilip    case CMODEL_MEDIUM:
733227569Sphilip      builtin_define ("__CMODEL_MEDIUM__");
734227569Sphilip      break;
735227569Sphilip    case CMODEL_LARGE:
736227569Sphilip      builtin_define ("__CMODEL_LARGE__");
737227569Sphilip      break;
738227569Sphilip    default:
739227569Sphilip      break;
740227569Sphilip    }
741227569Sphilip
742227569Sphilip  switch (rs6000_current_abi)
743227569Sphilip    {
744227569Sphilip    case ABI_V4:
745227569Sphilip      builtin_define ("_CALL_SYSV");
746227569Sphilip      break;
747227569Sphilip    case ABI_AIX:
748227569Sphilip      builtin_define ("_CALL_AIXDESC");
749227569Sphilip      builtin_define ("_CALL_AIX");
750227569Sphilip      builtin_define ("_CALL_ELF=1");
751227569Sphilip      break;
752227569Sphilip    case ABI_ELFv2:
753227569Sphilip      builtin_define ("_CALL_ELF=2");
754227569Sphilip      break;
755227569Sphilip    case ABI_DARWIN:
756227569Sphilip      builtin_define ("_CALL_DARWIN");
757227569Sphilip      break;
758227569Sphilip    default:
759227569Sphilip      break;
760227569Sphilip    }
761227569Sphilip
762227569Sphilip  /* Vector element order.  */
763227569Sphilip  if (BYTES_BIG_ENDIAN)
764227569Sphilip    builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_BIG_ENDIAN__");
765227569Sphilip  else
766227569Sphilip    builtin_define ("__VEC_ELEMENT_REG_ORDER__=__ORDER_LITTLE_ENDIAN__");
767227569Sphilip
768227569Sphilip  /* Let the compiled code know if 'f' class registers will not be available.  */
769227569Sphilip  if (TARGET_SOFT_FLOAT)
770227569Sphilip    builtin_define ("__NO_FPRS__");
771227569Sphilip
772227569Sphilip  /* Whether aggregates passed by value are aligned to a 16 byte boundary
773227569Sphilip     if their alignment is 16 bytes or larger.  */
774227569Sphilip  if ((TARGET_MACHO && rs6000_darwin64_abi)
775227569Sphilip      || DEFAULT_ABI == ABI_ELFv2
776227569Sphilip      || (DEFAULT_ABI == ABI_AIX && !rs6000_compat_align_parm))
777227569Sphilip    builtin_define ("__STRUCT_PARM_ALIGN__=16");
778227569Sphilip}
779227569Sphilip
780227569Sphilip
781227569Sphilip
782227569Sphilip/* Convert a type stored into a struct altivec_builtin_types as ID,
783227569Sphilip   into a tree.  The types are in rs6000_builtin_types: negative values
784227569Sphilip   create a pointer type for the type associated to ~ID.  Note it is
785227569Sphilip   a logical NOT, rather than a negation, otherwise you cannot represent
786227569Sphilip   a pointer type for ID 0.  */
787227569Sphilip
788227569Sphilipstatic inline tree
789227569Sphiliprs6000_builtin_type (int id)
790227569Sphilip{
791227569Sphilip  tree t;
792227569Sphilip  t = rs6000_builtin_types[id < 0 ? ~id : id];
793227569Sphilip  return id < 0 ? build_pointer_type (t) : t;
794227569Sphilip}
795227569Sphilip
796227569Sphilip/* Check whether the type of an argument, T, is compatible with a type ID
797227569Sphilip   stored into a struct altivec_builtin_types.  Integer types are considered
798227569Sphilip   compatible; otherwise, the language hook lang_hooks.types_compatible_p makes
799227569Sphilip   the decision.  Also allow long double and _Float128 to be compatible if
800227569Sphilip   -mabi=ieeelongdouble.  */
801227569Sphilip
802227569Sphilipstatic inline bool
803227569Sphilipis_float128_p (tree t)
804227569Sphilip{
805227569Sphilip  return (t == float128_type_node
806227569Sphilip	  || (TARGET_IEEEQUAD
807227569Sphilip	      && TARGET_LONG_DOUBLE_128
808227569Sphilip	      && t == long_double_type_node));
809227569Sphilip}
810227569Sphilip
811227569Sphilipstatic inline bool
812227569Sphiliprs6000_builtin_type_compatible (tree t, int id)
813227569Sphilip{
814227569Sphilip  tree builtin_type;
815227569Sphilip  builtin_type = rs6000_builtin_type (id);
816227569Sphilip  if (t == error_mark_node)
817227569Sphilip    return false;
818227569Sphilip  if (INTEGRAL_TYPE_P (t) && INTEGRAL_TYPE_P (builtin_type))
819227569Sphilip    return true;
820227569Sphilip  else if (TARGET_IEEEQUAD && TARGET_LONG_DOUBLE_128
821227569Sphilip	   && is_float128_p (t) && is_float128_p (builtin_type))
822227569Sphilip    return true;
823227569Sphilip  else
824227569Sphilip    return lang_hooks.types_compatible_p (t, builtin_type);
825227569Sphilip}
826227569Sphilip
827227569Sphilip
828227569Sphilip/* In addition to calling fold_convert for EXPR of type TYPE, also
829227569Sphilip   call c_fully_fold to remove any C_MAYBE_CONST_EXPRs that could be
830227569Sphilip   hiding there (PR47197).  */
831227569Sphilip
832227569Sphilipstatic tree
833227569Sphilipfully_fold_convert (tree type, tree expr)
834227569Sphilip{
835227569Sphilip  tree result = fold_convert (type, expr);
836227569Sphilip  bool maybe_const = true;
837227569Sphilip
838227569Sphilip  if (!c_dialect_cxx ())
839227569Sphilip    result = c_fully_fold (result, false, &maybe_const);
840227569Sphilip
841227569Sphilip  return result;
842227569Sphilip}
843227569Sphilip
844227569Sphilip/* Build a tree for a function call to an Altivec non-overloaded builtin.
845227569Sphilip   The overloaded builtin that matched the types and args is described
846227569Sphilip   by DESC.  The N arguments are given in ARGS, respectively.
847227569Sphilip
848227569Sphilip   Actually the only thing it does is calling fold_convert on ARGS, with
849227569Sphilip   a small exception for vec_{all,any}_{ge,le} predicates. */
850227569Sphilip
851227569Sphilipstatic tree
852227569Sphilipaltivec_build_resolved_builtin (tree *args, int n,
853227569Sphilip				const struct altivec_builtin_types *desc)
854227569Sphilip{
855227569Sphilip  tree impl_fndecl = rs6000_builtin_decls[desc->overloaded_code];
856227569Sphilip  tree ret_type = rs6000_builtin_type (desc->ret_type);
857227569Sphilip  tree argtypes = TYPE_ARG_TYPES (TREE_TYPE (impl_fndecl));
858227569Sphilip  tree arg_type[3];
859227569Sphilip  tree call;
860227569Sphilip
861227569Sphilip  int i;
862227569Sphilip  for (i = 0; i < n; i++)
863227569Sphilip    arg_type[i] = TREE_VALUE (argtypes), argtypes = TREE_CHAIN (argtypes);
864227569Sphilip
865227569Sphilip  /* The AltiVec overloading implementation is overall gross, but this
866227569Sphilip     is particularly disgusting.  The vec_{all,any}_{ge,le} builtins
867227569Sphilip     are completely different for floating-point vs. integer vector
868227569Sphilip     types, because the former has vcmpgefp, but the latter should use
869227569Sphilip     vcmpgtXX.
870227569Sphilip
871227569Sphilip     In practice, the second and third arguments are swapped, and the
872227569Sphilip     condition (LT vs. EQ, which is recognizable by bit 1 of the first
873227569Sphilip     argument) is reversed.  Patch the arguments here before building
874227569Sphilip     the resolved CALL_EXPR.  */
875227569Sphilip  if (n == 3
876227569Sphilip      && desc->code == ALTIVEC_BUILTIN_VEC_VCMPGE_P
877227569Sphilip      && desc->overloaded_code != ALTIVEC_BUILTIN_VCMPGEFP_P
878227569Sphilip      && desc->overloaded_code != VSX_BUILTIN_XVCMPGEDP_P)
879227569Sphilip    {
880227569Sphilip      std::swap (args[1], args[2]);
881227569Sphilip      std::swap (arg_type[1], arg_type[2]);
882227569Sphilip
883227569Sphilip      args[0] = fold_build2 (BIT_XOR_EXPR, TREE_TYPE (args[0]), args[0],
884227569Sphilip			     build_int_cst (NULL_TREE, 2));
885227569Sphilip    }
886227569Sphilip
887227569Sphilip  switch (n)
888227569Sphilip    {
889227569Sphilip    case 0:
890227569Sphilip      call = build_call_expr (impl_fndecl, 0);
891227569Sphilip      break;
892227569Sphilip    case 1:
893227569Sphilip      call = build_call_expr (impl_fndecl, 1,
894227569Sphilip			      fully_fold_convert (arg_type[0], args[0]));
895227569Sphilip      break;
896227569Sphilip    case 2:
897227569Sphilip      call = build_call_expr (impl_fndecl, 2,
898227569Sphilip			      fully_fold_convert (arg_type[0], args[0]),
899227569Sphilip			      fully_fold_convert (arg_type[1], args[1]));
900227569Sphilip      break;
901227569Sphilip    case 3:
902227569Sphilip      call = build_call_expr (impl_fndecl, 3,
903227569Sphilip			      fully_fold_convert (arg_type[0], args[0]),
904227569Sphilip			      fully_fold_convert (arg_type[1], args[1]),
905227569Sphilip			      fully_fold_convert (arg_type[2], args[2]));
906227569Sphilip      break;
907227569Sphilip    default:
908227569Sphilip      gcc_unreachable ();
909227569Sphilip    }
910227569Sphilip  return fold_convert (ret_type, call);
911227569Sphilip}
912227569Sphilip
913227569Sphilip/* Implementation of the resolve_overloaded_builtin target hook, to
914227569Sphilip   support Altivec's overloaded builtins.  */
915227569Sphilip
916227569Sphiliptree
917227569Sphilipaltivec_resolve_overloaded_builtin (location_t loc, tree fndecl,
918227569Sphilip				    void *passed_arglist)
919227569Sphilip{
920227569Sphilip  vec<tree, va_gc> *arglist = static_cast<vec<tree, va_gc> *> (passed_arglist);
921227569Sphilip  unsigned int nargs = vec_safe_length (arglist);
922227569Sphilip  enum rs6000_builtins fcode
923227569Sphilip    = (enum rs6000_builtins) DECL_MD_FUNCTION_CODE (fndecl);
924227569Sphilip  tree fnargs = TYPE_ARG_TYPES (TREE_TYPE (fndecl));
925227569Sphilip  tree types[3], args[3];
926227569Sphilip  const struct altivec_builtin_types *desc;
927227569Sphilip  unsigned int n;
928227569Sphilip
929227569Sphilip  if (!rs6000_overloaded_builtin_p (fcode))
930227569Sphilip    return NULL_TREE;
931227569Sphilip
932227569Sphilip  if (TARGET_DEBUG_BUILTIN)
933227569Sphilip    fprintf (stderr, "altivec_resolve_overloaded_builtin, code = %4d, %s\n",
934227569Sphilip	     (int)fcode, IDENTIFIER_POINTER (DECL_NAME (fndecl)));
935227569Sphilip
936227569Sphilip  /* vec_lvsl and vec_lvsr are deprecated for use with LE element order.  */
937227569Sphilip  if (fcode == ALTIVEC_BUILTIN_VEC_LVSL && !BYTES_BIG_ENDIAN)
938227569Sphilip    warning (OPT_Wdeprecated,
939227569Sphilip	     "%<vec_lvsl%> is deprecated for little endian; use "
940227569Sphilip	     "assignment for unaligned loads and stores");
941227569Sphilip  else if (fcode == ALTIVEC_BUILTIN_VEC_LVSR && !BYTES_BIG_ENDIAN)
942227569Sphilip    warning (OPT_Wdeprecated,
943227569Sphilip	     "%<vec_lvsr%> is deprecated for little endian; use "
944227569Sphilip	     "assignment for unaligned loads and stores");
945227569Sphilip
946227569Sphilip  if (fcode == ALTIVEC_BUILTIN_VEC_MUL)
947227569Sphilip    {
948227569Sphilip      /* vec_mul needs to be special cased because there are no instructions
949227569Sphilip	 for it for the {un}signed char, {un}signed short, and {un}signed int
950227569Sphilip	 types.  */
951227569Sphilip      if (nargs != 2)
952227569Sphilip	{
953227569Sphilip	  error ("builtin %qs only accepts 2 arguments", "vec_mul");
954227569Sphilip	  return error_mark_node;
955227569Sphilip	}
956227569Sphilip
957227569Sphilip      tree arg0 = (*arglist)[0];
958227569Sphilip      tree arg0_type = TREE_TYPE (arg0);
959227569Sphilip      tree arg1 = (*arglist)[1];
960227569Sphilip      tree arg1_type = TREE_TYPE (arg1);
961227569Sphilip
962227569Sphilip      /* Both arguments must be vectors and the types must be compatible.  */
963227569Sphilip      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
964227569Sphilip	goto bad;
965227569Sphilip      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
966227569Sphilip	goto bad;
967227569Sphilip
968227569Sphilip      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
969227569Sphilip	{
970227569Sphilip	  case E_QImode:
971227569Sphilip	  case E_HImode:
972227569Sphilip	  case E_SImode:
973227569Sphilip	  case E_DImode:
974227569Sphilip	  case E_TImode:
975227569Sphilip	    {
976227569Sphilip	      /* For scalar types just use a multiply expression.  */
977227569Sphilip	      return fold_build2_loc (loc, MULT_EXPR, TREE_TYPE (arg0), arg0,
978227569Sphilip				      fold_convert (TREE_TYPE (arg0), arg1));
979227569Sphilip	    }
980227569Sphilip	  case E_SFmode:
981227569Sphilip	    {
982227569Sphilip	      /* For floats use the xvmulsp instruction directly.  */
983227569Sphilip	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULSP];
984227569Sphilip	      return build_call_expr (call, 2, arg0, arg1);
985227569Sphilip	    }
986227569Sphilip	  case E_DFmode:
987227569Sphilip	    {
988227569Sphilip	      /* For doubles use the xvmuldp instruction directly.  */
989	      tree call = rs6000_builtin_decls[VSX_BUILTIN_XVMULDP];
990	      return build_call_expr (call, 2, arg0, arg1);
991	    }
992	  /* Other types are errors.  */
993	  default:
994	    goto bad;
995	}
996    }
997
998  if (fcode == ALTIVEC_BUILTIN_VEC_CMPNE)
999    {
1000      /* vec_cmpne needs to be special cased because there are no instructions
1001	 for it (prior to power 9).  */
1002      if (nargs != 2)
1003	{
1004	  error ("builtin %qs only accepts 2 arguments", "vec_cmpne");
1005	  return error_mark_node;
1006	}
1007
1008      tree arg0 = (*arglist)[0];
1009      tree arg0_type = TREE_TYPE (arg0);
1010      tree arg1 = (*arglist)[1];
1011      tree arg1_type = TREE_TYPE (arg1);
1012
1013      /* Both arguments must be vectors and the types must be compatible.  */
1014      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1015	goto bad;
1016      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type))
1017	goto bad;
1018
1019      /* Power9 instructions provide the most efficient implementation of
1020	 ALTIVEC_BUILTIN_VEC_CMPNE if the mode is not DImode or TImode
1021	 or SFmode or DFmode.  */
1022      if (!TARGET_P9_VECTOR
1023	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DImode)
1024	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == TImode)
1025	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == SFmode)
1026	  || (TYPE_MODE (TREE_TYPE (arg0_type)) == DFmode))
1027	{
1028	  switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1029	    {
1030	      /* vec_cmpneq (va, vb) == vec_nor (vec_cmpeq (va, vb),
1031		 vec_cmpeq (va, vb)).  */
1032	      /* Note:  vec_nand also works but opt changes vec_nand's
1033		 to vec_nor's anyway.  */
1034	    case E_QImode:
1035	    case E_HImode:
1036	    case E_SImode:
1037	    case E_DImode:
1038	    case E_TImode:
1039	    case E_SFmode:
1040	    case E_DFmode:
1041	      {
1042		/* call = vec_cmpeq (va, vb)
1043		   result = vec_nor (call, call).  */
1044		vec<tree, va_gc> *params = make_tree_vector ();
1045		vec_safe_push (params, arg0);
1046		vec_safe_push (params, arg1);
1047		tree call = altivec_resolve_overloaded_builtin
1048		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_CMPEQ],
1049		   params);
1050		/* Use save_expr to ensure that operands used more than once
1051		   that may have side effects (like calls) are only evaluated
1052		   once.  */
1053		call = save_expr (call);
1054		params = make_tree_vector ();
1055		vec_safe_push (params, call);
1056		vec_safe_push (params, call);
1057		return altivec_resolve_overloaded_builtin
1058		  (loc, rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_NOR], params);
1059	      }
1060	      /* Other types are errors.  */
1061	    default:
1062	      goto bad;
1063	    }
1064	}
1065      /* else, fall through and process the Power9 alternative below */
1066    }
1067
1068  if (fcode == ALTIVEC_BUILTIN_VEC_ADDE
1069      || fcode == ALTIVEC_BUILTIN_VEC_SUBE)
1070    {
1071      /* vec_adde needs to be special cased because there is no instruction
1072	  for the {un}signed int version.  */
1073      if (nargs != 3)
1074	{
1075	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDE ?
1076	    "vec_adde": "vec_sube";
1077	  error ("builtin %qs only accepts 3 arguments", name);
1078	  return error_mark_node;
1079	}
1080
1081      tree arg0 = (*arglist)[0];
1082      tree arg0_type = TREE_TYPE (arg0);
1083      tree arg1 = (*arglist)[1];
1084      tree arg1_type = TREE_TYPE (arg1);
1085      tree arg2 = (*arglist)[2];
1086      tree arg2_type = TREE_TYPE (arg2);
1087
1088      /* All 3 arguments must be vectors of (signed or unsigned) (int or
1089	 __int128) and the types must be compatible.  */
1090      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1091	goto bad;
1092      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1093	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1094	goto bad;
1095
1096      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1097	{
1098	  /* For {un}signed ints,
1099	     vec_adde (va, vb, carryv) == vec_add (vec_add (va, vb),
1100						   vec_and (carryv, 1)).
1101	     vec_sube (va, vb, carryv) == vec_sub (vec_sub (va, vb),
1102						   vec_and (carryv, 1)).  */
1103	  case E_SImode:
1104	    {
1105	      tree add_sub_builtin;
1106
1107	      vec<tree, va_gc> *params = make_tree_vector ();
1108	      vec_safe_push (params, arg0);
1109	      vec_safe_push (params, arg1);
1110
1111	      if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1112		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1113	      else
1114		add_sub_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1115
1116	      tree call = altivec_resolve_overloaded_builtin (loc,
1117							      add_sub_builtin,
1118							      params);
1119	      tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1120	      tree ones_vector = build_vector_from_val (arg0_type, const1);
1121	      tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1122					       arg2, ones_vector);
1123	      params = make_tree_vector ();
1124	      vec_safe_push (params, call);
1125	      vec_safe_push (params, and_expr);
1126	      return altivec_resolve_overloaded_builtin (loc, add_sub_builtin,
1127							 params);
1128	    }
1129	  /* For {un}signed __int128s use the vaddeuqm instruction
1130		directly.  */
1131	  case E_TImode:
1132	    {
1133	       tree bii;
1134
1135	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDE)
1136		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDEUQM];
1137
1138	       else
1139		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBEUQM];
1140
1141	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1142	    }
1143
1144	  /* Types other than {un}signed int and {un}signed __int128
1145		are errors.  */
1146	  default:
1147	    goto bad;
1148	}
1149    }
1150
1151  if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC
1152      || fcode == ALTIVEC_BUILTIN_VEC_SUBEC)
1153    {
1154      /* vec_addec and vec_subec needs to be special cased because there is
1155	 no instruction for the {un}signed int version.  */
1156      if (nargs != 3)
1157	{
1158	  const char *name = fcode == ALTIVEC_BUILTIN_VEC_ADDEC ?
1159	    "vec_addec": "vec_subec";
1160	  error ("builtin %qs only accepts 3 arguments", name);
1161	  return error_mark_node;
1162	}
1163
1164      tree arg0 = (*arglist)[0];
1165      tree arg0_type = TREE_TYPE (arg0);
1166      tree arg1 = (*arglist)[1];
1167      tree arg1_type = TREE_TYPE (arg1);
1168      tree arg2 = (*arglist)[2];
1169      tree arg2_type = TREE_TYPE (arg2);
1170
1171      /* All 3 arguments must be vectors of (signed or unsigned) (int or
1172	 __int128) and the types must be compatible.  */
1173      if (TREE_CODE (arg0_type) != VECTOR_TYPE)
1174	goto bad;
1175      if (!lang_hooks.types_compatible_p (arg0_type, arg1_type)
1176	  || !lang_hooks.types_compatible_p (arg1_type, arg2_type))
1177	goto bad;
1178
1179      switch (TYPE_MODE (TREE_TYPE (arg0_type)))
1180	{
1181	  /* For {un}signed ints,
1182	      vec_addec (va, vb, carryv) ==
1183				vec_or (vec_addc (va, vb),
1184					vec_addc (vec_add (va, vb),
1185						  vec_and (carryv, 0x1))).  */
1186	  case E_SImode:
1187	    {
1188	    /* Use save_expr to ensure that operands used more than once
1189		that may have side effects (like calls) are only evaluated
1190		once.  */
1191	    tree as_builtin;
1192	    tree as_c_builtin;
1193
1194	    arg0 = save_expr (arg0);
1195	    arg1 = save_expr (arg1);
1196	    vec<tree, va_gc> *params = make_tree_vector ();
1197	    vec_safe_push (params, arg0);
1198	    vec_safe_push (params, arg1);
1199
1200	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1201	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADDC];
1202	    else
1203	      as_c_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUBC];
1204
1205	    tree call1 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1206							     params);
1207	    params = make_tree_vector ();
1208	    vec_safe_push (params, arg0);
1209	    vec_safe_push (params, arg1);
1210
1211
1212	    if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1213	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_ADD];
1214	    else
1215	      as_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_SUB];
1216
1217	    tree call2 = altivec_resolve_overloaded_builtin (loc, as_builtin,
1218							     params);
1219	    tree const1 = build_int_cstu (TREE_TYPE (arg0_type), 1);
1220	    tree ones_vector = build_vector_from_val (arg0_type, const1);
1221	    tree and_expr = fold_build2_loc (loc, BIT_AND_EXPR, arg0_type,
1222					     arg2, ones_vector);
1223	    params = make_tree_vector ();
1224	    vec_safe_push (params, call2);
1225	    vec_safe_push (params, and_expr);
1226	    call2 = altivec_resolve_overloaded_builtin (loc, as_c_builtin,
1227							params);
1228	    params = make_tree_vector ();
1229	    vec_safe_push (params, call1);
1230	    vec_safe_push (params, call2);
1231	    tree or_builtin = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_OR];
1232	    return altivec_resolve_overloaded_builtin (loc, or_builtin,
1233						       params);
1234	    }
1235	  /* For {un}signed __int128s use the vaddecuq/vsubbecuq
1236	     instructions.  */
1237	  case E_TImode:
1238	    {
1239	       tree bii;
1240
1241	       if (fcode == ALTIVEC_BUILTIN_VEC_ADDEC)
1242		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VADDECUQ];
1243
1244	       else
1245		 bii = rs6000_builtin_decls[P8V_BUILTIN_VEC_VSUBECUQ];
1246
1247	       return altivec_resolve_overloaded_builtin (loc, bii, arglist);
1248	    }
1249	  /* Types other than {un}signed int and {un}signed __int128
1250		are errors.  */
1251	  default:
1252	    goto bad;
1253	}
1254    }
1255
1256  /* For now treat vec_splats and vec_promote as the same.  */
1257  if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS
1258      || fcode == ALTIVEC_BUILTIN_VEC_PROMOTE)
1259    {
1260      tree type, arg;
1261      int size;
1262      int i;
1263      bool unsigned_p;
1264      vec<constructor_elt, va_gc> *vec;
1265      const char *name = fcode == ALTIVEC_BUILTIN_VEC_SPLATS ? "vec_splats": "vec_promote";
1266
1267      if (fcode == ALTIVEC_BUILTIN_VEC_SPLATS && nargs != 1)
1268	{
1269	  error ("builtin %qs only accepts 1 argument", name);
1270	  return error_mark_node;
1271	}
1272      if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE && nargs != 2)
1273	{
1274	  error ("builtin %qs only accepts 2 arguments", name);
1275	  return error_mark_node;
1276	}
1277      /* Ignore promote's element argument.  */
1278      if (fcode == ALTIVEC_BUILTIN_VEC_PROMOTE
1279	  && !INTEGRAL_TYPE_P (TREE_TYPE ((*arglist)[1])))
1280	goto bad;
1281
1282      arg = (*arglist)[0];
1283      type = TREE_TYPE (arg);
1284      if (!SCALAR_FLOAT_TYPE_P (type)
1285	  && !INTEGRAL_TYPE_P (type))
1286	goto bad;
1287      unsigned_p = TYPE_UNSIGNED (type);
1288      switch (TYPE_MODE (type))
1289	{
1290	  case E_TImode:
1291	    type = (unsigned_p ? unsigned_V1TI_type_node : V1TI_type_node);
1292	    size = 1;
1293	    break;
1294	  case E_DImode:
1295	    type = (unsigned_p ? unsigned_V2DI_type_node : V2DI_type_node);
1296	    size = 2;
1297	    break;
1298	  case E_SImode:
1299	    type = (unsigned_p ? unsigned_V4SI_type_node : V4SI_type_node);
1300	    size = 4;
1301	    break;
1302	  case E_HImode:
1303	    type = (unsigned_p ? unsigned_V8HI_type_node : V8HI_type_node);
1304	    size = 8;
1305	    break;
1306	  case E_QImode:
1307	    type = (unsigned_p ? unsigned_V16QI_type_node : V16QI_type_node);
1308	    size = 16;
1309	    break;
1310	  case E_SFmode: type = V4SF_type_node; size = 4; break;
1311	  case E_DFmode: type = V2DF_type_node; size = 2; break;
1312	  default:
1313	    goto bad;
1314	}
1315      arg = save_expr (fold_convert (TREE_TYPE (type), arg));
1316      vec_alloc (vec, size);
1317      for(i = 0; i < size; i++)
1318	{
1319	  constructor_elt elt = {NULL_TREE, arg};
1320	  vec->quick_push (elt);
1321	}
1322	return build_constructor (type, vec);
1323    }
1324
1325  /* For now use pointer tricks to do the extraction, unless we are on VSX
1326     extracting a double from a constant offset.  */
1327  if (fcode == ALTIVEC_BUILTIN_VEC_EXTRACT)
1328    {
1329      tree arg1;
1330      tree arg1_type;
1331      tree arg2;
1332      tree arg1_inner_type;
1333      tree decl, stmt;
1334      tree innerptrtype;
1335      machine_mode mode;
1336
1337      /* No second argument. */
1338      if (nargs != 2)
1339	{
1340	  error ("builtin %qs only accepts 2 arguments", "vec_extract");
1341	  return error_mark_node;
1342	}
1343
1344      arg2 = (*arglist)[1];
1345      arg1 = (*arglist)[0];
1346      arg1_type = TREE_TYPE (arg1);
1347
1348      if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1349	goto bad;
1350      if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1351	goto bad;
1352
1353      /* See if we can optimize vec_extracts with the current VSX instruction
1354	 set.  */
1355      mode = TYPE_MODE (arg1_type);
1356      if (VECTOR_MEM_VSX_P (mode))
1357
1358	{
1359	  tree call = NULL_TREE;
1360	  int nunits = GET_MODE_NUNITS (mode);
1361
1362	  arg2 = fold_for_warn (arg2);
1363
1364	  /* If the second argument is an integer constant, generate
1365	     the built-in code if we can.  We need 64-bit and direct
1366	     move to extract the small integer vectors.  */
1367	  if (TREE_CODE (arg2) == INTEGER_CST)
1368	    {
1369	      wide_int selector = wi::to_wide (arg2);
1370	      selector = wi::umod_trunc (selector, nunits);
1371	      arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1372	      switch (mode)
1373		{
1374		default:
1375		  break;
1376
1377		case E_V1TImode:
1378		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V1TI];
1379		  break;
1380
1381		case E_V2DFmode:
1382		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1383		  break;
1384
1385		case E_V2DImode:
1386		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1387		  break;
1388
1389		case E_V4SFmode:
1390		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1391		  break;
1392
1393		case E_V4SImode:
1394		  if (TARGET_DIRECT_MOVE_64BIT)
1395		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1396		  break;
1397
1398		case E_V8HImode:
1399		  if (TARGET_DIRECT_MOVE_64BIT)
1400		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1401		  break;
1402
1403		case E_V16QImode:
1404		  if (TARGET_DIRECT_MOVE_64BIT)
1405		    call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1406		  break;
1407		}
1408	    }
1409
1410	  /* If the second argument is variable, we can optimize it if we are
1411	     generating 64-bit code on a machine with direct move.  */
1412	  else if (TREE_CODE (arg2) != INTEGER_CST && TARGET_DIRECT_MOVE_64BIT)
1413	    {
1414	      switch (mode)
1415		{
1416		default:
1417		  break;
1418
1419		case E_V2DFmode:
1420		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DF];
1421		  break;
1422
1423		case E_V2DImode:
1424		  call = rs6000_builtin_decls[VSX_BUILTIN_VEC_EXT_V2DI];
1425		  break;
1426
1427		case E_V4SFmode:
1428		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SF];
1429		  break;
1430
1431		case E_V4SImode:
1432		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V4SI];
1433		  break;
1434
1435		case E_V8HImode:
1436		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V8HI];
1437		  break;
1438
1439		case E_V16QImode:
1440		  call = rs6000_builtin_decls[ALTIVEC_BUILTIN_VEC_EXT_V16QI];
1441		  break;
1442		}
1443	    }
1444
1445	  if (call)
1446	    {
1447	      tree result = build_call_expr (call, 2, arg1, arg2);
1448	      /* Coerce the result to vector element type.  May be no-op.  */
1449	      arg1_inner_type = TREE_TYPE (arg1_type);
1450	      result = fold_convert (arg1_inner_type, result);
1451	      return result;
1452	    }
1453	}
1454
1455      /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2). */
1456      arg1_inner_type = TREE_TYPE (arg1_type);
1457      arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1458			      build_int_cst (TREE_TYPE (arg2),
1459					     TYPE_VECTOR_SUBPARTS (arg1_type)
1460					     - 1), 0);
1461      decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1462      DECL_EXTERNAL (decl) = 0;
1463      TREE_PUBLIC (decl) = 0;
1464      DECL_CONTEXT (decl) = current_function_decl;
1465      TREE_USED (decl) = 1;
1466      TREE_TYPE (decl) = arg1_type;
1467      TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1468      if (c_dialect_cxx ())
1469	{
1470	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1471			 NULL_TREE, NULL_TREE);
1472	  SET_EXPR_LOCATION (stmt, loc);
1473	}
1474      else
1475	{
1476	  DECL_INITIAL (decl) = arg1;
1477	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1478	  TREE_ADDRESSABLE (decl) = 1;
1479	  SET_EXPR_LOCATION (stmt, loc);
1480	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1481	}
1482
1483      innerptrtype = build_pointer_type (arg1_inner_type);
1484
1485      stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1486      stmt = convert (innerptrtype, stmt);
1487      stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1488      stmt = build_indirect_ref (loc, stmt, RO_NULL);
1489
1490      /* PR83660: We mark this as having side effects so that
1491	 downstream in fold_build_cleanup_point_expr () it will get a
1492	 CLEANUP_POINT_EXPR.  If it does not we can run into an ICE
1493	 later in gimplify_cleanup_point_expr ().  Potentially this
1494	 causes missed optimization because the actually is no side
1495	 effect.  */
1496      if (c_dialect_cxx ())
1497	TREE_SIDE_EFFECTS (stmt) = 1;
1498
1499      return stmt;
1500    }
1501
1502  /* For now use pointer tricks to do the insertion, unless we are on VSX
1503     inserting a double to a constant offset..  */
1504  if (fcode == ALTIVEC_BUILTIN_VEC_INSERT)
1505    {
1506      tree arg0;
1507      tree arg1;
1508      tree arg2;
1509      tree arg1_type;
1510      tree arg1_inner_type;
1511      tree decl, stmt;
1512      tree innerptrtype;
1513      machine_mode mode;
1514
1515      /* No second or third arguments. */
1516      if (nargs != 3)
1517	{
1518	  error ("builtin %qs only accepts 3 arguments", "vec_insert");
1519	  return error_mark_node;
1520	}
1521
1522      arg0 = (*arglist)[0];
1523      arg1 = (*arglist)[1];
1524      arg1_type = TREE_TYPE (arg1);
1525      arg2 = fold_for_warn ((*arglist)[2]);
1526
1527      if (TREE_CODE (arg1_type) != VECTOR_TYPE)
1528	goto bad;
1529      if (!INTEGRAL_TYPE_P (TREE_TYPE (arg2)))
1530	goto bad;
1531
1532      /* If we can use the VSX xxpermdi instruction, use that for insert.  */
1533      mode = TYPE_MODE (arg1_type);
1534      if ((mode == V2DFmode || mode == V2DImode) && VECTOR_UNIT_VSX_P (mode)
1535	  && TREE_CODE (arg2) == INTEGER_CST)
1536	{
1537	  wide_int selector = wi::to_wide (arg2);
1538	  selector = wi::umod_trunc (selector, 2);
1539	  tree call = NULL_TREE;
1540
1541	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1542	  if (mode == V2DFmode)
1543	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DF];
1544	  else if (mode == V2DImode)
1545	    call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V2DI];
1546
1547	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1548	     reversed.  */
1549	  if (call)
1550	    return build_call_expr (call, 3, arg1, arg0, arg2);
1551	}
1552      else if (mode == V1TImode && VECTOR_UNIT_VSX_P (mode)
1553	       && TREE_CODE (arg2) == INTEGER_CST)
1554	{
1555	  tree call = rs6000_builtin_decls[VSX_BUILTIN_VEC_SET_V1TI];
1556	  wide_int selector = wi::zero(32);
1557
1558	  arg2 = wide_int_to_tree (TREE_TYPE (arg2), selector);
1559	  /* Note, __builtin_vec_insert_<xxx> has vector and scalar types
1560	     reversed.  */
1561	  return build_call_expr (call, 3, arg1, arg0, arg2);
1562	}
1563
1564      /* Build *(((arg1_inner_type*)&(vector type){arg1})+arg2) = arg0. */
1565      arg1_inner_type = TREE_TYPE (arg1_type);
1566      if (TYPE_VECTOR_SUBPARTS (arg1_type) == 1)
1567	arg2 = build_int_cst (TREE_TYPE (arg2), 0);
1568      else
1569	arg2 = build_binary_op (loc, BIT_AND_EXPR, arg2,
1570				build_int_cst (TREE_TYPE (arg2),
1571					       TYPE_VECTOR_SUBPARTS (arg1_type)
1572					       - 1), 0);
1573      decl = build_decl (loc, VAR_DECL, NULL_TREE, arg1_type);
1574      DECL_EXTERNAL (decl) = 0;
1575      TREE_PUBLIC (decl) = 0;
1576      DECL_CONTEXT (decl) = current_function_decl;
1577      TREE_USED (decl) = 1;
1578      TREE_TYPE (decl) = arg1_type;
1579      TREE_READONLY (decl) = TYPE_READONLY (arg1_type);
1580      if (c_dialect_cxx ())
1581	{
1582	  stmt = build4 (TARGET_EXPR, arg1_type, decl, arg1,
1583			 NULL_TREE, NULL_TREE);
1584	  SET_EXPR_LOCATION (stmt, loc);
1585	}
1586      else
1587	{
1588	  DECL_INITIAL (decl) = arg1;
1589	  stmt = build1 (DECL_EXPR, arg1_type, decl);
1590	  TREE_ADDRESSABLE (decl) = 1;
1591	  SET_EXPR_LOCATION (stmt, loc);
1592	  stmt = build1 (COMPOUND_LITERAL_EXPR, arg1_type, stmt);
1593	}
1594
1595      innerptrtype = build_pointer_type (arg1_inner_type);
1596
1597      stmt = build_unary_op (loc, ADDR_EXPR, stmt, 0);
1598      stmt = convert (innerptrtype, stmt);
1599      stmt = build_binary_op (loc, PLUS_EXPR, stmt, arg2, 1);
1600      stmt = build_indirect_ref (loc, stmt, RO_NULL);
1601      stmt = build2 (MODIFY_EXPR, TREE_TYPE (stmt), stmt,
1602		     convert (TREE_TYPE (stmt), arg0));
1603      stmt = build2 (COMPOUND_EXPR, arg1_type, stmt, decl);
1604      return stmt;
1605    }
1606
1607  for (n = 0;
1608       !VOID_TYPE_P (TREE_VALUE (fnargs)) && n < nargs;
1609       fnargs = TREE_CHAIN (fnargs), n++)
1610    {
1611      tree decl_type = TREE_VALUE (fnargs);
1612      tree arg = (*arglist)[n];
1613      tree type;
1614
1615      if (arg == error_mark_node)
1616	return error_mark_node;
1617
1618      if (n >= 3)
1619        abort ();
1620
1621      arg = default_conversion (arg);
1622
1623      /* The C++ front-end converts float * to const void * using
1624	 NOP_EXPR<const void *> (NOP_EXPR<void *> (x)).  */
1625      type = TREE_TYPE (arg);
1626      if (POINTER_TYPE_P (type)
1627	  && TREE_CODE (arg) == NOP_EXPR
1628	  && lang_hooks.types_compatible_p (TREE_TYPE (arg),
1629					    const_ptr_type_node)
1630	  && lang_hooks.types_compatible_p (TREE_TYPE (TREE_OPERAND (arg, 0)),
1631					    ptr_type_node))
1632	{
1633	  arg = TREE_OPERAND (arg, 0);
1634          type = TREE_TYPE (arg);
1635	}
1636
1637      /* Remove the const from the pointers to simplify the overload
1638	 matching further down.  */
1639      if (POINTER_TYPE_P (decl_type)
1640	  && POINTER_TYPE_P (type)
1641	  && TYPE_QUALS (TREE_TYPE (type)) != 0)
1642	{
1643          if (TYPE_READONLY (TREE_TYPE (type))
1644	      && !TYPE_READONLY (TREE_TYPE (decl_type)))
1645	    warning (0, "passing argument %d of %qE discards qualifiers from "
1646		        "pointer target type", n + 1, fndecl);
1647	  type = build_pointer_type (build_qualified_type (TREE_TYPE (type),
1648							   0));
1649	  arg = fold_convert (type, arg);
1650	}
1651
1652      /* For P9V_BUILTIN_VEC_LXVL, convert any const * to its non constant
1653	 equivalent to simplify the overload matching below.  */
1654      if (fcode == P9V_BUILTIN_VEC_LXVL)
1655	{
1656	  if (POINTER_TYPE_P (type)
1657	      && TYPE_READONLY (TREE_TYPE (type)))
1658	    {
1659	      type = build_pointer_type (build_qualified_type (
1660						TREE_TYPE (type),0));
1661	      arg = fold_convert (type, arg);
1662	    }
1663	}
1664
1665      args[n] = arg;
1666      types[n] = type;
1667    }
1668
1669  /* If the number of arguments did not match the prototype, return NULL
1670     and the generic code will issue the appropriate error message.  */
1671  if (!VOID_TYPE_P (TREE_VALUE (fnargs)) || n < nargs)
1672    return NULL;
1673
1674  if (n == 0)
1675    abort ();
1676
1677  if (fcode == ALTIVEC_BUILTIN_VEC_STEP)
1678    {
1679      if (TREE_CODE (types[0]) != VECTOR_TYPE)
1680	goto bad;
1681
1682      return build_int_cst (NULL_TREE, TYPE_VECTOR_SUBPARTS (types[0]));
1683    }
1684
1685  {
1686    bool unsupported_builtin = false;
1687    enum rs6000_builtins overloaded_code;
1688    tree result = NULL;
1689    for (desc = altivec_overloaded_builtins;
1690	 desc->code && desc->code != fcode; desc++)
1691      continue;
1692
1693    /* Need to special case __builtin_cmp because the overloaded forms
1694       of this function take (unsigned int, unsigned int) or (unsigned
1695       long long int, unsigned long long int).  Since C conventions
1696       allow the respective argument types to be implicitly coerced into
1697       each other, the default handling does not provide adequate
1698       discrimination between the desired forms of the function.  */
1699    if (fcode == P6_OV_BUILTIN_CMPB)
1700      {
1701	machine_mode arg1_mode = TYPE_MODE (types[0]);
1702	machine_mode arg2_mode = TYPE_MODE (types[1]);
1703
1704	if (nargs != 2)
1705	  {
1706	    error ("builtin %qs only accepts 2 arguments", "__builtin_cmpb");
1707	    return error_mark_node;
1708	  }
1709
1710	/* If any supplied arguments are wider than 32 bits, resolve to
1711	   64-bit variant of built-in function.  */
1712	if ((GET_MODE_PRECISION (arg1_mode) > 32)
1713	    || (GET_MODE_PRECISION (arg2_mode) > 32))
1714	  {
1715	    /* Assure all argument and result types are compatible with
1716	       the built-in function represented by P6_BUILTIN_CMPB.  */
1717	    overloaded_code = P6_BUILTIN_CMPB;
1718	  }
1719	else
1720	  {
1721	    /* Assure all argument and result types are compatible with
1722	       the built-in function represented by P6_BUILTIN_CMPB_32.  */
1723	    overloaded_code = P6_BUILTIN_CMPB_32;
1724	  }
1725
1726	while (desc->code && desc->code == fcode
1727	       && desc->overloaded_code != overloaded_code)
1728	  desc++;
1729
1730	if (desc->code && (desc->code == fcode)
1731	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1732	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1733	  {
1734	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1735	      {
1736		result = altivec_build_resolved_builtin (args, n, desc);
1737		/* overloaded_code is set above */
1738		if (!rs6000_builtin_is_supported_p (overloaded_code))
1739		  unsupported_builtin = true;
1740		else
1741		  return result;
1742	      }
1743	    else
1744	      unsupported_builtin = true;
1745	  }
1746      }
1747    else if (fcode == P9V_BUILTIN_VEC_VSIEDP)
1748      {
1749	machine_mode arg1_mode = TYPE_MODE (types[0]);
1750
1751	if (nargs != 2)
1752	  {
1753	    error ("builtin %qs only accepts 2 arguments",
1754		   "scalar_insert_exp");
1755	    return error_mark_node;
1756	  }
1757
1758	/* If supplied first argument is wider than 64 bits, resolve to
1759	   128-bit variant of built-in function.  */
1760	if (GET_MODE_PRECISION (arg1_mode) > 64)
1761	  {
1762	    /* If first argument is of float variety, choose variant
1763	       that expects __ieee128 argument.  Otherwise, expect
1764	       __int128 argument.  */
1765	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1766	      overloaded_code = P9V_BUILTIN_VSIEQPF;
1767	    else
1768	      overloaded_code = P9V_BUILTIN_VSIEQP;
1769	  }
1770	else
1771	  {
1772	    /* If first argument is of float variety, choose variant
1773	       that expects double argument.  Otherwise, expect
1774	       long long int argument.  */
1775	    if (GET_MODE_CLASS (arg1_mode) == MODE_FLOAT)
1776	      overloaded_code = P9V_BUILTIN_VSIEDPF;
1777	    else
1778	      overloaded_code = P9V_BUILTIN_VSIEDP;
1779	  }
1780	while (desc->code && desc->code == fcode
1781	       && desc->overloaded_code != overloaded_code)
1782	  desc++;
1783
1784	if (desc->code && (desc->code == fcode)
1785	    && rs6000_builtin_type_compatible (types[0], desc->op1)
1786	    && rs6000_builtin_type_compatible (types[1], desc->op2))
1787	  {
1788	    if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1789	      {
1790		result = altivec_build_resolved_builtin (args, n, desc);
1791		/* overloaded_code is set above.  */
1792		if (!rs6000_builtin_is_supported_p (overloaded_code))
1793		  unsupported_builtin = true;
1794		else
1795		  return result;
1796	      }
1797	    else
1798	      unsupported_builtin = true;
1799	  }
1800      }
1801    else
1802      {
1803	/* For arguments after the last, we have RS6000_BTI_NOT_OPAQUE in
1804	   the opX fields.  */
1805	for (; desc->code == fcode; desc++)
1806	  {
1807	    if ((desc->op1 == RS6000_BTI_NOT_OPAQUE
1808		 || rs6000_builtin_type_compatible (types[0], desc->op1))
1809		&& (desc->op2 == RS6000_BTI_NOT_OPAQUE
1810		    || rs6000_builtin_type_compatible (types[1], desc->op2))
1811		&& (desc->op3 == RS6000_BTI_NOT_OPAQUE
1812		    || rs6000_builtin_type_compatible (types[2], desc->op3)))
1813	      {
1814		if (rs6000_builtin_decls[desc->overloaded_code] != NULL_TREE)
1815		  {
1816		    result = altivec_build_resolved_builtin (args, n, desc);
1817		    if (!rs6000_builtin_is_supported_p (desc->overloaded_code))
1818		      {
1819			/* Allow loop to continue in case a different
1820			   definition is supported.  */
1821			overloaded_code = desc->overloaded_code;
1822			unsupported_builtin = true;
1823		      }
1824		    else
1825		      return result;
1826		  }
1827		else
1828		  unsupported_builtin = true;
1829	      }
1830	  }
1831      }
1832
1833    if (unsupported_builtin)
1834      {
1835	const char *name = rs6000_overloaded_builtin_name (fcode);
1836	if (result != NULL)
1837	  {
1838	    const char *internal_name
1839	      = rs6000_overloaded_builtin_name (overloaded_code);
1840	    /* An error message making reference to the name of the
1841	       non-overloaded function has already been issued.  Add
1842	       clarification of the previous message.  */
1843	    rich_location richloc (line_table, input_location);
1844	    inform (&richloc, "builtin %qs requires builtin %qs",
1845		    name, internal_name);
1846	  }
1847	else
1848	  error ("%qs is not supported in this compiler configuration", name);
1849	/* If an error-representing  result tree was returned from
1850	   altivec_build_resolved_builtin above, use it.  */
1851	return (result != NULL) ? result : error_mark_node;
1852      }
1853  }
1854 bad:
1855  {
1856    const char *name = rs6000_overloaded_builtin_name (fcode);
1857    error ("invalid parameter combination for AltiVec intrinsic %qs", name);
1858    return error_mark_node;
1859  }
1860}
1861