1/* Default target hook functions.
2   Copyright (C) 2003-2022 Free Software Foundation, Inc.
3
4This file is part of GCC.
5
6GCC is free software; you can redistribute it and/or modify it under
7the terms of the GNU General Public License as published by the Free
8Software Foundation; either version 3, or (at your option) any later
9version.
10
11GCC is distributed in the hope that it will be useful, but WITHOUT ANY
12WARRANTY; without even the implied warranty of MERCHANTABILITY or
13FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License
14for more details.
15
16You should have received a copy of the GNU General Public License
17along with GCC; see the file COPYING3.  If not see
18<http://www.gnu.org/licenses/>.  */
19
20/* The migration of target macros to target hooks works as follows:
21
22   1. Create a target hook that uses the existing target macros to
23      implement the same functionality.
24
25   2. Convert all the MI files to use the hook instead of the macro.
26
27   3. Repeat for a majority of the remaining target macros.  This will
28      take some time.
29
30   4. Tell target maintainers to start migrating.
31
32   5. Eventually convert the backends to override the hook instead of
33      defining the macros.  This will take some time too.
34
35   6. TBD when, poison the macros.  Unmigrated targets will break at
36      this point.
37
38   Note that we expect steps 1-3 to be done by the people that
39   understand what the MI does with each macro, and step 5 to be done
40   by the target maintainers for their respective targets.
41
42   Note that steps 1 and 2 don't have to be done together, but no
43   target can override the new hook until step 2 is complete for it.
44
45   Once the macros are poisoned, we will revert to the old migration
46   rules - migrate the macro, callers, and targets all at once.  This
47   comment can thus be removed at that point.  */
48
49#include "config.h"
50#include "system.h"
51#include "coretypes.h"
52#include "target.h"
53#include "function.h"
54#include "rtl.h"
55#include "tree.h"
56#include "tree-ssa-alias.h"
57#include "gimple-expr.h"
58#include "memmodel.h"
59#include "backend.h"
60#include "emit-rtl.h"
61#include "df.h"
62#include "tm_p.h"
63#include "stringpool.h"
64#include "tree-vrp.h"
65#include "tree-ssanames.h"
66#include "profile-count.h"
67#include "optabs.h"
68#include "regs.h"
69#include "recog.h"
70#include "diagnostic-core.h"
71#include "fold-const.h"
72#include "stor-layout.h"
73#include "varasm.h"
74#include "flags.h"
75#include "explow.h"
76#include "expmed.h"
77#include "calls.h"
78#include "expr.h"
79#include "output.h"
80#include "common/common-target.h"
81#include "reload.h"
82#include "intl.h"
83#include "opts.h"
84#include "gimplify.h"
85#include "predict.h"
86#include "real.h"
87#include "langhooks.h"
88#include "sbitmap.h"
89#include "function-abi.h"
90#include "attribs.h"
91#include "asan.h"
92#include "emit-rtl.h"
93#include "gimple.h"
94#include "cfgloop.h"
95#include "tree-vectorizer.h"
96
97bool
98default_legitimate_address_p (machine_mode mode ATTRIBUTE_UNUSED,
99			      rtx addr ATTRIBUTE_UNUSED,
100			      bool strict ATTRIBUTE_UNUSED)
101{
102#ifdef GO_IF_LEGITIMATE_ADDRESS
103  /* Defer to the old implementation using a goto.  */
104  if (strict)
105    return strict_memory_address_p (mode, addr);
106  else
107    return memory_address_p (mode, addr);
108#else
109  gcc_unreachable ();
110#endif
111}
112
113void
114default_external_libcall (rtx fun ATTRIBUTE_UNUSED)
115{
116#ifdef ASM_OUTPUT_EXTERNAL_LIBCALL
117  ASM_OUTPUT_EXTERNAL_LIBCALL (asm_out_file, fun);
118#endif
119}
120
121int
122default_unspec_may_trap_p (const_rtx x, unsigned flags)
123{
124  int i;
125
126  /* Any floating arithmetic may trap.  */
127  if ((SCALAR_FLOAT_MODE_P (GET_MODE (x)) && flag_trapping_math))
128    return 1;
129
130  for (i = 0; i < XVECLEN (x, 0); ++i)
131    {
132      if (may_trap_p_1 (XVECEXP (x, 0, i), flags))
133	return 1;
134    }
135
136  return 0;
137}
138
139machine_mode
140default_promote_function_mode (const_tree type ATTRIBUTE_UNUSED,
141			       machine_mode mode,
142			       int *punsignedp ATTRIBUTE_UNUSED,
143			       const_tree funtype ATTRIBUTE_UNUSED,
144			       int for_return ATTRIBUTE_UNUSED)
145{
146  if (type != NULL_TREE && for_return == 2)
147    return promote_mode (type, mode, punsignedp);
148  return mode;
149}
150
151machine_mode
152default_promote_function_mode_always_promote (const_tree type,
153					      machine_mode mode,
154					      int *punsignedp,
155					      const_tree funtype ATTRIBUTE_UNUSED,
156					      int for_return ATTRIBUTE_UNUSED)
157{
158  return promote_mode (type, mode, punsignedp);
159}
160
161machine_mode
162default_cc_modes_compatible (machine_mode m1, machine_mode m2)
163{
164  if (m1 == m2)
165    return m1;
166  return VOIDmode;
167}
168
169bool
170default_return_in_memory (const_tree type,
171			  const_tree fntype ATTRIBUTE_UNUSED)
172{
173  return (TYPE_MODE (type) == BLKmode);
174}
175
176rtx
177default_legitimize_address (rtx x, rtx orig_x ATTRIBUTE_UNUSED,
178			    machine_mode mode ATTRIBUTE_UNUSED)
179{
180  return x;
181}
182
183bool
184default_legitimize_address_displacement (rtx *, rtx *, poly_int64,
185					 machine_mode)
186{
187  return false;
188}
189
190bool
191default_const_not_ok_for_debug_p (rtx x)
192{
193  if (GET_CODE (x) == UNSPEC)
194    return true;
195  return false;
196}
197
198rtx
199default_expand_builtin_saveregs (void)
200{
201  error ("%<__builtin_saveregs%> not supported by this target");
202  return const0_rtx;
203}
204
205void
206default_setup_incoming_varargs (cumulative_args_t,
207				const function_arg_info &, int *, int)
208{
209}
210
211/* The default implementation of TARGET_BUILTIN_SETJMP_FRAME_VALUE.  */
212
213rtx
214default_builtin_setjmp_frame_value (void)
215{
216  return virtual_stack_vars_rtx;
217}
218
219/* Generic hook that takes a CUMULATIVE_ARGS pointer and returns false.  */
220
221bool
222hook_bool_CUMULATIVE_ARGS_false (cumulative_args_t ca ATTRIBUTE_UNUSED)
223{
224  return false;
225}
226
227bool
228default_pretend_outgoing_varargs_named (cumulative_args_t ca ATTRIBUTE_UNUSED)
229{
230  return (targetm.calls.setup_incoming_varargs
231	  != default_setup_incoming_varargs);
232}
233
234scalar_int_mode
235default_eh_return_filter_mode (void)
236{
237  return targetm.unwind_word_mode ();
238}
239
240scalar_int_mode
241default_libgcc_cmp_return_mode (void)
242{
243  return word_mode;
244}
245
246scalar_int_mode
247default_libgcc_shift_count_mode (void)
248{
249  return word_mode;
250}
251
252scalar_int_mode
253default_unwind_word_mode (void)
254{
255  return word_mode;
256}
257
258/* The default implementation of TARGET_SHIFT_TRUNCATION_MASK.  */
259
260unsigned HOST_WIDE_INT
261default_shift_truncation_mask (machine_mode mode)
262{
263  return SHIFT_COUNT_TRUNCATED ? GET_MODE_UNIT_BITSIZE (mode) - 1 : 0;
264}
265
266/* The default implementation of TARGET_MIN_DIVISIONS_FOR_RECIP_MUL.  */
267
268unsigned int
269default_min_divisions_for_recip_mul (machine_mode mode ATTRIBUTE_UNUSED)
270{
271  return have_insn_for (DIV, mode) ? 3 : 2;
272}
273
274/* The default implementation of TARGET_MODE_REP_EXTENDED.  */
275
276int
277default_mode_rep_extended (scalar_int_mode, scalar_int_mode)
278{
279  return UNKNOWN;
280}
281
282/* Generic hook that takes a CUMULATIVE_ARGS pointer and returns true.  */
283
284bool
285hook_bool_CUMULATIVE_ARGS_true (cumulative_args_t a ATTRIBUTE_UNUSED)
286{
287  return true;
288}
289
290/* Return machine mode for non-standard suffix
291   or VOIDmode if non-standard suffixes are unsupported.  */
292machine_mode
293default_mode_for_suffix (char suffix ATTRIBUTE_UNUSED)
294{
295  return VOIDmode;
296}
297
298/* The generic C++ ABI specifies this is a 64-bit value.  */
299tree
300default_cxx_guard_type (void)
301{
302  return long_long_integer_type_node;
303}
304
305/* Returns the size of the cookie to use when allocating an array
306   whose elements have the indicated TYPE.  Assumes that it is already
307   known that a cookie is needed.  */
308
309tree
310default_cxx_get_cookie_size (tree type)
311{
312  tree cookie_size;
313
314  /* We need to allocate an additional max (sizeof (size_t), alignof
315     (true_type)) bytes.  */
316  tree sizetype_size;
317  tree type_align;
318
319  sizetype_size = size_in_bytes (sizetype);
320  type_align = size_int (TYPE_ALIGN_UNIT (type));
321  if (tree_int_cst_lt (type_align, sizetype_size))
322    cookie_size = sizetype_size;
323  else
324    cookie_size = type_align;
325
326  return cookie_size;
327}
328
329/* Return true if a parameter must be passed by reference.  This version
330   of the TARGET_PASS_BY_REFERENCE hook uses just MUST_PASS_IN_STACK.  */
331
332bool
333hook_pass_by_reference_must_pass_in_stack (cumulative_args_t,
334					   const function_arg_info &arg)
335{
336  return targetm.calls.must_pass_in_stack (arg);
337}
338
339/* Return true if a parameter follows callee copies conventions.  This
340   version of the hook is true for all named arguments.  */
341
342bool
343hook_callee_copies_named (cumulative_args_t, const function_arg_info &arg)
344{
345  return arg.named;
346}
347
348/* Emit to STREAM the assembler syntax for insn operand X.  */
349
350void
351default_print_operand (FILE *stream ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
352		       int code ATTRIBUTE_UNUSED)
353{
354#ifdef PRINT_OPERAND
355  PRINT_OPERAND (stream, x, code);
356#else
357  gcc_unreachable ();
358#endif
359}
360
361/* Emit to STREAM the assembler syntax for an insn operand whose memory
362   address is X.  */
363
364void
365default_print_operand_address (FILE *stream ATTRIBUTE_UNUSED,
366			       machine_mode /*mode*/,
367			       rtx x ATTRIBUTE_UNUSED)
368{
369#ifdef PRINT_OPERAND_ADDRESS
370  PRINT_OPERAND_ADDRESS (stream, x);
371#else
372  gcc_unreachable ();
373#endif
374}
375
376/* Return true if CODE is a valid punctuation character for the
377   `print_operand' hook.  */
378
379bool
380default_print_operand_punct_valid_p (unsigned char code ATTRIBUTE_UNUSED)
381{
382#ifdef PRINT_OPERAND_PUNCT_VALID_P
383  return PRINT_OPERAND_PUNCT_VALID_P (code);
384#else
385  return false;
386#endif
387}
388
389/* The default implementation of TARGET_MANGLE_ASSEMBLER_NAME.  */
390tree
391default_mangle_assembler_name (const char *name ATTRIBUTE_UNUSED)
392{
393  const char *skipped = name + (*name == '*' ? 1 : 0);
394  const char *stripped = targetm.strip_name_encoding (skipped);
395  if (*name != '*' && user_label_prefix[0])
396    stripped = ACONCAT ((user_label_prefix, stripped, NULL));
397  return get_identifier (stripped);
398}
399
400/* The default implementation of TARGET_TRANSLATE_MODE_ATTRIBUTE.  */
401
402machine_mode
403default_translate_mode_attribute (machine_mode mode)
404{
405  return mode;
406}
407
408/* True if MODE is valid for the target.  By "valid", we mean able to
409   be manipulated in non-trivial ways.  In particular, this means all
410   the arithmetic is supported.
411
412   By default we guess this means that any C type is supported.  If
413   we can't map the mode back to a type that would be available in C,
414   then reject it.  Special case, here, is the double-word arithmetic
415   supported by optabs.cc.  */
416
417bool
418default_scalar_mode_supported_p (scalar_mode mode)
419{
420  int precision = GET_MODE_PRECISION (mode);
421
422  switch (GET_MODE_CLASS (mode))
423    {
424    case MODE_PARTIAL_INT:
425    case MODE_INT:
426      if (precision == CHAR_TYPE_SIZE)
427	return true;
428      if (precision == SHORT_TYPE_SIZE)
429	return true;
430      if (precision == INT_TYPE_SIZE)
431	return true;
432      if (precision == LONG_TYPE_SIZE)
433	return true;
434      if (precision == LONG_LONG_TYPE_SIZE)
435	return true;
436      if (precision == 2 * BITS_PER_WORD)
437	return true;
438      return false;
439
440    case MODE_FLOAT:
441      if (precision == FLOAT_TYPE_SIZE)
442	return true;
443      if (precision == DOUBLE_TYPE_SIZE)
444	return true;
445      if (precision == LONG_DOUBLE_TYPE_SIZE)
446	return true;
447      return false;
448
449    case MODE_DECIMAL_FLOAT:
450    case MODE_FRACT:
451    case MODE_UFRACT:
452    case MODE_ACCUM:
453    case MODE_UACCUM:
454      return false;
455
456    default:
457      gcc_unreachable ();
458    }
459}
460
461/* Return true if libgcc supports floating-point mode MODE (known to
462   be supported as a scalar mode).  */
463
464bool
465default_libgcc_floating_mode_supported_p (scalar_float_mode mode)
466{
467  switch (mode)
468    {
469#ifdef HAVE_SFmode
470    case E_SFmode:
471#endif
472#ifdef HAVE_DFmode
473    case E_DFmode:
474#endif
475#ifdef HAVE_XFmode
476    case E_XFmode:
477#endif
478#ifdef HAVE_TFmode
479    case E_TFmode:
480#endif
481      return true;
482
483    default:
484      return false;
485    }
486}
487
488/* Return the machine mode to use for the type _FloatN, if EXTENDED is
489   false, or _FloatNx, if EXTENDED is true, or VOIDmode if not
490   supported.  */
491opt_scalar_float_mode
492default_floatn_mode (int n, bool extended)
493{
494  if (extended)
495    {
496      opt_scalar_float_mode cand1, cand2;
497      scalar_float_mode mode;
498      switch (n)
499	{
500	case 32:
501#ifdef HAVE_DFmode
502	  cand1 = DFmode;
503#endif
504	  break;
505
506	case 64:
507#ifdef HAVE_XFmode
508	  cand1 = XFmode;
509#endif
510#ifdef HAVE_TFmode
511	  cand2 = TFmode;
512#endif
513	  break;
514
515	case 128:
516	  break;
517
518	default:
519	  /* Those are the only valid _FloatNx types.  */
520	  gcc_unreachable ();
521	}
522      if (cand1.exists (&mode)
523	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
524	  && targetm.scalar_mode_supported_p (mode)
525	  && targetm.libgcc_floating_mode_supported_p (mode))
526	return cand1;
527      if (cand2.exists (&mode)
528	  && REAL_MODE_FORMAT (mode)->ieee_bits > n
529	  && targetm.scalar_mode_supported_p (mode)
530	  && targetm.libgcc_floating_mode_supported_p (mode))
531	return cand2;
532    }
533  else
534    {
535      opt_scalar_float_mode cand;
536      scalar_float_mode mode;
537      switch (n)
538	{
539	case 16:
540	  /* Always enable _Float16 if we have basic support for the mode.
541	     Targets can control the range and precision of operations on
542	     the _Float16 type using TARGET_C_EXCESS_PRECISION.  */
543#ifdef HAVE_HFmode
544	  cand = HFmode;
545#endif
546	  break;
547
548	case 32:
549#ifdef HAVE_SFmode
550	  cand = SFmode;
551#endif
552	  break;
553
554	case 64:
555#ifdef HAVE_DFmode
556	  cand = DFmode;
557#endif
558	  break;
559
560	case 128:
561#ifdef HAVE_TFmode
562	  cand = TFmode;
563#endif
564	  break;
565
566	default:
567	  break;
568	}
569      if (cand.exists (&mode)
570	  && REAL_MODE_FORMAT (mode)->ieee_bits == n
571	  && targetm.scalar_mode_supported_p (mode)
572	  && targetm.libgcc_floating_mode_supported_p (mode))
573	return cand;
574    }
575  return opt_scalar_float_mode ();
576}
577
578/* Define this to return true if the _Floatn and _Floatnx built-in functions
579   should implicitly enable the built-in function without the __builtin_ prefix
580   in addition to the normal built-in function with the __builtin_ prefix.  The
581   default is to only enable built-in functions without the __builtin_ prefix
582   for the GNU C langauge.  The argument FUNC is the enum builtin_in_function
583   id of the function to be enabled.  */
584
585bool
586default_floatn_builtin_p (int func ATTRIBUTE_UNUSED)
587{
588  static bool first_time_p = true;
589  static bool c_or_objective_c;
590
591  if (first_time_p)
592    {
593      first_time_p = false;
594      c_or_objective_c = lang_GNU_C () || lang_GNU_OBJC ();
595    }
596
597  return c_or_objective_c;
598}
599
600/* Make some target macros useable by target-independent code.  */
601bool
602targhook_words_big_endian (void)
603{
604  return !!WORDS_BIG_ENDIAN;
605}
606
607bool
608targhook_float_words_big_endian (void)
609{
610  return !!FLOAT_WORDS_BIG_ENDIAN;
611}
612
613/* True if the target supports floating-point exceptions and rounding
614   modes.  */
615
616bool
617default_float_exceptions_rounding_supported_p (void)
618{
619#ifdef HAVE_adddf3
620  return HAVE_adddf3;
621#else
622  return false;
623#endif
624}
625
626/* True if the target supports decimal floating point.  */
627
628bool
629default_decimal_float_supported_p (void)
630{
631  return ENABLE_DECIMAL_FLOAT;
632}
633
634/* True if the target supports fixed-point arithmetic.  */
635
636bool
637default_fixed_point_supported_p (void)
638{
639  return ENABLE_FIXED_POINT;
640}
641
642/* True if the target supports GNU indirect functions.  */
643
644bool
645default_has_ifunc_p (void)
646{
647  return HAVE_GNU_INDIRECT_FUNCTION;
648}
649
650/* Return true if we predict the loop LOOP will be transformed to a
651   low-overhead loop, otherwise return false.
652
653   By default, false is returned, as this hook's applicability should be
654   verified for each target.  Target maintainers should re-define the hook
655   if the target can take advantage of it.  */
656
657bool
658default_predict_doloop_p (class loop *loop ATTRIBUTE_UNUSED)
659{
660  return false;
661}
662
663/* By default, just use the input MODE itself.  */
664
665machine_mode
666default_preferred_doloop_mode (machine_mode mode)
667{
668  return mode;
669}
670
671/* NULL if INSN insn is valid within a low-overhead loop, otherwise returns
672   an error message.
673
674   This function checks whether a given INSN is valid within a low-overhead
675   loop.  If INSN is invalid it returns the reason for that, otherwise it
676   returns NULL. A called function may clobber any special registers required
677   for low-overhead looping. Additionally, some targets (eg, PPC) use the count
678   register for branch on table instructions. We reject the doloop pattern in
679   these cases.  */
680
681const char *
682default_invalid_within_doloop (const rtx_insn *insn)
683{
684  if (CALL_P (insn))
685    return "Function call in loop.";
686
687  if (tablejump_p (insn, NULL, NULL) || computed_jump_p (insn))
688    return "Computed branch in the loop.";
689
690  return NULL;
691}
692
693/* Mapping of builtin functions to vectorized variants.  */
694
695tree
696default_builtin_vectorized_function (unsigned int, tree, tree)
697{
698  return NULL_TREE;
699}
700
701/* Mapping of target builtin functions to vectorized variants.  */
702
703tree
704default_builtin_md_vectorized_function (tree, tree, tree)
705{
706  return NULL_TREE;
707}
708
709/* Default vectorizer cost model values.  */
710
711int
712default_builtin_vectorization_cost (enum vect_cost_for_stmt type_of_cost,
713                                    tree vectype,
714                                    int misalign ATTRIBUTE_UNUSED)
715{
716  switch (type_of_cost)
717    {
718      case scalar_stmt:
719      case scalar_load:
720      case scalar_store:
721      case vector_stmt:
722      case vector_load:
723      case vector_store:
724      case vec_to_scalar:
725      case scalar_to_vec:
726      case cond_branch_not_taken:
727      case vec_perm:
728      case vec_promote_demote:
729        return 1;
730
731      case unaligned_load:
732      case unaligned_store:
733        return 2;
734
735      case cond_branch_taken:
736        return 3;
737
738      case vec_construct:
739	return estimated_poly_value (TYPE_VECTOR_SUBPARTS (vectype)) - 1;
740
741      default:
742        gcc_unreachable ();
743    }
744}
745
746/* Reciprocal.  */
747
748tree
749default_builtin_reciprocal (tree)
750{
751  return NULL_TREE;
752}
753
754bool
755hook_bool_CUMULATIVE_ARGS_arg_info_false (cumulative_args_t,
756					  const function_arg_info &)
757{
758  return false;
759}
760
761bool
762hook_bool_CUMULATIVE_ARGS_arg_info_true (cumulative_args_t,
763					 const function_arg_info &)
764{
765  return true;
766}
767
768int
769hook_int_CUMULATIVE_ARGS_arg_info_0 (cumulative_args_t,
770				     const function_arg_info &)
771{
772  return 0;
773}
774
775void
776hook_void_CUMULATIVE_ARGS_tree (cumulative_args_t ca ATTRIBUTE_UNUSED,
777				tree ATTRIBUTE_UNUSED)
778{
779}
780
781/* Default implementation of TARGET_PUSH_ARGUMENT.  */
782
783bool
784default_push_argument (unsigned int)
785{
786#ifdef PUSH_ROUNDING
787  return !ACCUMULATE_OUTGOING_ARGS;
788#else
789  return false;
790#endif
791}
792
793void
794default_function_arg_advance (cumulative_args_t, const function_arg_info &)
795{
796  gcc_unreachable ();
797}
798
799/* Default implementation of TARGET_FUNCTION_ARG_OFFSET.  */
800
801HOST_WIDE_INT
802default_function_arg_offset (machine_mode, const_tree)
803{
804  return 0;
805}
806
807/* Default implementation of TARGET_FUNCTION_ARG_PADDING: usually pad
808   upward, but pad short args downward on big-endian machines.  */
809
810pad_direction
811default_function_arg_padding (machine_mode mode, const_tree type)
812{
813  if (!BYTES_BIG_ENDIAN)
814    return PAD_UPWARD;
815
816  unsigned HOST_WIDE_INT size;
817  if (mode == BLKmode)
818    {
819      if (!type || TREE_CODE (TYPE_SIZE (type)) != INTEGER_CST)
820	return PAD_UPWARD;
821      size = int_size_in_bytes (type);
822    }
823  else
824    /* Targets with variable-sized modes must override this hook
825       and handle variable-sized modes explicitly.  */
826    size = GET_MODE_SIZE (mode).to_constant ();
827
828  if (size < (PARM_BOUNDARY / BITS_PER_UNIT))
829    return PAD_DOWNWARD;
830
831  return PAD_UPWARD;
832}
833
834rtx
835default_function_arg (cumulative_args_t, const function_arg_info &)
836{
837  gcc_unreachable ();
838}
839
840rtx
841default_function_incoming_arg (cumulative_args_t, const function_arg_info &)
842{
843  gcc_unreachable ();
844}
845
846unsigned int
847default_function_arg_boundary (machine_mode mode ATTRIBUTE_UNUSED,
848			       const_tree type ATTRIBUTE_UNUSED)
849{
850  return PARM_BOUNDARY;
851}
852
853unsigned int
854default_function_arg_round_boundary (machine_mode mode ATTRIBUTE_UNUSED,
855				     const_tree type ATTRIBUTE_UNUSED)
856{
857  return PARM_BOUNDARY;
858}
859
860void
861hook_void_bitmap (bitmap regs ATTRIBUTE_UNUSED)
862{
863}
864
865const char *
866hook_invalid_arg_for_unprototyped_fn (
867	const_tree typelist ATTRIBUTE_UNUSED,
868	const_tree funcdecl ATTRIBUTE_UNUSED,
869	const_tree val ATTRIBUTE_UNUSED)
870{
871  return NULL;
872}
873
874/* Initialize the stack protection decls.  */
875
876/* Stack protection related decls living in libgcc.  */
877static GTY(()) tree stack_chk_guard_decl;
878
879tree
880default_stack_protect_guard (void)
881{
882  tree t = stack_chk_guard_decl;
883
884  if (t == NULL)
885    {
886      rtx x;
887
888      t = build_decl (UNKNOWN_LOCATION,
889		      VAR_DECL, get_identifier ("__stack_chk_guard"),
890		      ptr_type_node);
891      TREE_STATIC (t) = 1;
892      TREE_PUBLIC (t) = 1;
893      DECL_EXTERNAL (t) = 1;
894      TREE_USED (t) = 1;
895      TREE_THIS_VOLATILE (t) = 1;
896      DECL_ARTIFICIAL (t) = 1;
897      DECL_IGNORED_P (t) = 1;
898
899      /* Do not share RTL as the declaration is visible outside of
900	 current function.  */
901      x = DECL_RTL (t);
902      RTX_FLAG (x, used) = 1;
903
904      stack_chk_guard_decl = t;
905    }
906
907  return t;
908}
909
910static GTY(()) tree stack_chk_fail_decl;
911
912tree
913default_external_stack_protect_fail (void)
914{
915  tree t = stack_chk_fail_decl;
916
917  if (t == NULL_TREE)
918    {
919      t = build_function_type_list (void_type_node, NULL_TREE);
920      t = build_decl (UNKNOWN_LOCATION,
921		      FUNCTION_DECL, get_identifier ("__stack_chk_fail"), t);
922      TREE_STATIC (t) = 1;
923      TREE_PUBLIC (t) = 1;
924      DECL_EXTERNAL (t) = 1;
925      TREE_USED (t) = 1;
926      TREE_THIS_VOLATILE (t) = 1;
927      TREE_NOTHROW (t) = 1;
928      DECL_ARTIFICIAL (t) = 1;
929      DECL_IGNORED_P (t) = 1;
930      DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
931      DECL_VISIBILITY_SPECIFIED (t) = 1;
932
933      stack_chk_fail_decl = t;
934    }
935
936  return build_call_expr (t, 0);
937}
938
939tree
940default_hidden_stack_protect_fail (void)
941{
942#ifndef HAVE_GAS_HIDDEN
943  return default_external_stack_protect_fail ();
944#else
945  tree t = stack_chk_fail_decl;
946
947  if (!flag_pic)
948    return default_external_stack_protect_fail ();
949
950  if (t == NULL_TREE)
951    {
952      t = build_function_type_list (void_type_node, NULL_TREE);
953      t = build_decl (UNKNOWN_LOCATION, FUNCTION_DECL,
954		      get_identifier ("__stack_chk_fail_local"), t);
955      TREE_STATIC (t) = 1;
956      TREE_PUBLIC (t) = 1;
957      DECL_EXTERNAL (t) = 1;
958      TREE_USED (t) = 1;
959      TREE_THIS_VOLATILE (t) = 1;
960      TREE_NOTHROW (t) = 1;
961      DECL_ARTIFICIAL (t) = 1;
962      DECL_IGNORED_P (t) = 1;
963      DECL_VISIBILITY_SPECIFIED (t) = 1;
964#if 1
965      /*
966       * This is a hack:
967       * It appears that our gas does not generate @PLT for hidden
968       * symbols. It could be that we need a newer version, or that
969       * this local function is handled differently on linux.
970       */
971      DECL_VISIBILITY (t) = VISIBILITY_DEFAULT;
972#else
973      DECL_VISIBILITY (t) = VISIBILITY_HIDDEN;
974#endif
975
976      stack_chk_fail_decl = t;
977    }
978
979  return build_call_expr (t, 0);
980#endif
981}
982
983bool
984hook_bool_const_rtx_commutative_p (const_rtx x,
985				   int outer_code ATTRIBUTE_UNUSED)
986{
987  return COMMUTATIVE_P (x);
988}
989
990rtx
991default_function_value (const_tree ret_type ATTRIBUTE_UNUSED,
992			const_tree fn_decl_or_type,
993			bool outgoing ATTRIBUTE_UNUSED)
994{
995  /* The old interface doesn't handle receiving the function type.  */
996  if (fn_decl_or_type
997      && !DECL_P (fn_decl_or_type))
998    fn_decl_or_type = NULL;
999
1000#ifdef FUNCTION_VALUE
1001  return FUNCTION_VALUE (ret_type, fn_decl_or_type);
1002#else
1003  gcc_unreachable ();
1004#endif
1005}
1006
1007rtx
1008default_libcall_value (machine_mode mode ATTRIBUTE_UNUSED,
1009		       const_rtx fun ATTRIBUTE_UNUSED)
1010{
1011#ifdef LIBCALL_VALUE
1012  return LIBCALL_VALUE (MACRO_MODE (mode));
1013#else
1014  gcc_unreachable ();
1015#endif
1016}
1017
1018/* The default hook for TARGET_FUNCTION_VALUE_REGNO_P.  */
1019
1020bool
1021default_function_value_regno_p (const unsigned int regno ATTRIBUTE_UNUSED)
1022{
1023#ifdef FUNCTION_VALUE_REGNO_P
1024  return FUNCTION_VALUE_REGNO_P (regno);
1025#else
1026  gcc_unreachable ();
1027#endif
1028}
1029
1030/* Choose the mode and rtx to use to zero REGNO, storing tem in PMODE and
1031   PREGNO_RTX and returning TRUE if successful, otherwise returning FALSE.  If
1032   the natural mode for REGNO doesn't work, attempt to group it with subsequent
1033   adjacent registers set in TOZERO.  */
1034
1035static inline bool
1036zcur_select_mode_rtx (unsigned int regno, machine_mode *pmode,
1037		      rtx *pregno_rtx, HARD_REG_SET tozero)
1038{
1039  rtx regno_rtx = regno_reg_rtx[regno];
1040  machine_mode mode = GET_MODE (regno_rtx);
1041
1042  /* If the natural mode doesn't work, try some wider mode.  */
1043  if (!targetm.hard_regno_mode_ok (regno, mode))
1044    {
1045      bool found = false;
1046      for (int nregs = 2;
1047	   !found && nregs <= hard_regno_max_nregs
1048	     && regno + nregs <= FIRST_PSEUDO_REGISTER
1049	     && TEST_HARD_REG_BIT (tozero,
1050				   regno + nregs - 1);
1051	   nregs++)
1052	{
1053	  mode = choose_hard_reg_mode (regno, nregs, 0);
1054	  if (mode == E_VOIDmode)
1055	    continue;
1056	  gcc_checking_assert (targetm.hard_regno_mode_ok (regno, mode));
1057	  regno_rtx = gen_rtx_REG (mode, regno);
1058	  found = true;
1059	}
1060      if (!found)
1061	return false;
1062    }
1063
1064  *pmode = mode;
1065  *pregno_rtx = regno_rtx;
1066  return true;
1067}
1068
1069/* The default hook for TARGET_ZERO_CALL_USED_REGS.  */
1070
1071HARD_REG_SET
1072default_zero_call_used_regs (HARD_REG_SET need_zeroed_hardregs)
1073{
1074  gcc_assert (!hard_reg_set_empty_p (need_zeroed_hardregs));
1075
1076  HARD_REG_SET failed;
1077  CLEAR_HARD_REG_SET (failed);
1078  bool progress = false;
1079
1080  /* First, try to zero each register in need_zeroed_hardregs by
1081     loading a zero into it, taking note of any failures in
1082     FAILED.  */
1083  for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1084    if (TEST_HARD_REG_BIT (need_zeroed_hardregs, regno))
1085      {
1086	rtx_insn *last_insn = get_last_insn ();
1087	rtx regno_rtx;
1088	machine_mode mode;
1089
1090	if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1091				   need_zeroed_hardregs))
1092	  {
1093	    SET_HARD_REG_BIT (failed, regno);
1094	    continue;
1095	  }
1096
1097	rtx zero = CONST0_RTX (mode);
1098	rtx_insn *insn = emit_move_insn (regno_rtx, zero);
1099	if (!valid_insn_p (insn))
1100	  {
1101	    SET_HARD_REG_BIT (failed, regno);
1102	    delete_insns_since (last_insn);
1103	  }
1104	else
1105	  {
1106	    progress = true;
1107	    regno += hard_regno_nregs (regno, mode) - 1;
1108	  }
1109      }
1110
1111  /* Now retry with copies from zeroed registers, as long as we've
1112     made some PROGRESS, and registers remain to be zeroed in
1113     FAILED.  */
1114  while (progress && !hard_reg_set_empty_p (failed))
1115    {
1116      HARD_REG_SET retrying = failed;
1117
1118      CLEAR_HARD_REG_SET (failed);
1119      progress = false;
1120
1121      for (unsigned int regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
1122	if (TEST_HARD_REG_BIT (retrying, regno))
1123	  {
1124	    rtx regno_rtx;
1125	    machine_mode mode;
1126
1127	    /* This might select registers we've already zeroed.  If grouping
1128	       with them is what it takes to get regno zeroed, so be it.  */
1129	    if (!zcur_select_mode_rtx (regno, &mode, &regno_rtx,
1130				       need_zeroed_hardregs))
1131	      {
1132		SET_HARD_REG_BIT (failed, regno);
1133		continue;
1134	      }
1135
1136	    bool success = false;
1137	    /* Look for a source.  */
1138	    for (unsigned int src = 0; src < FIRST_PSEUDO_REGISTER; src++)
1139	      {
1140		/* If SRC hasn't been zeroed (yet?), skip it.  */
1141		if (! TEST_HARD_REG_BIT (need_zeroed_hardregs, src))
1142		  continue;
1143		if (TEST_HARD_REG_BIT (retrying, src))
1144		  continue;
1145
1146		/* Check that SRC can hold MODE, and that any other
1147		   registers needed to hold MODE in SRC have also been
1148		   zeroed.  */
1149		if (!targetm.hard_regno_mode_ok (src, mode))
1150		  continue;
1151		unsigned n = targetm.hard_regno_nregs (src, mode);
1152		bool ok = true;
1153		for (unsigned i = 1; ok && i < n; i++)
1154		  ok = (TEST_HARD_REG_BIT (need_zeroed_hardregs, src + i)
1155			&& !TEST_HARD_REG_BIT (retrying, src + i));
1156		if (!ok)
1157		  continue;
1158
1159		/* SRC is usable, try to copy from it.  */
1160		rtx_insn *last_insn = get_last_insn ();
1161		rtx src_rtx = gen_rtx_REG (mode, src);
1162		rtx_insn *insn = emit_move_insn (regno_rtx, src_rtx);
1163		if (!valid_insn_p (insn))
1164		  /* It didn't work, remove any inserts.  We'll look
1165		     for another SRC.  */
1166		  delete_insns_since (last_insn);
1167		else
1168		  {
1169		    /* We're done for REGNO.  */
1170		    success = true;
1171		    break;
1172		  }
1173	      }
1174
1175	    /* If nothing worked for REGNO this round, mark it to be
1176	       retried if we get another round.  */
1177	    if (!success)
1178	      SET_HARD_REG_BIT (failed, regno);
1179	    else
1180	      {
1181		/* Take note so as to enable another round if needed.  */
1182		progress = true;
1183		regno += hard_regno_nregs (regno, mode) - 1;
1184	      }
1185	  }
1186    }
1187
1188  /* If any register remained, report it.  */
1189  if (!progress)
1190    {
1191      static bool issued_error;
1192      if (!issued_error)
1193	{
1194	  issued_error = true;
1195	  sorry ("%qs not supported on this target",
1196		 "-fzero-call-used-regs");
1197	}
1198    }
1199
1200  return need_zeroed_hardregs;
1201}
1202
1203rtx
1204default_internal_arg_pointer (void)
1205{
1206  /* If the reg that the virtual arg pointer will be translated into is
1207     not a fixed reg or is the stack pointer, make a copy of the virtual
1208     arg pointer, and address parms via the copy.  The frame pointer is
1209     considered fixed even though it is not marked as such.  */
1210  if ((ARG_POINTER_REGNUM == STACK_POINTER_REGNUM
1211       || ! (fixed_regs[ARG_POINTER_REGNUM]
1212	     || ARG_POINTER_REGNUM == FRAME_POINTER_REGNUM)))
1213    return copy_to_reg (virtual_incoming_args_rtx);
1214  else
1215    return virtual_incoming_args_rtx;
1216}
1217
1218rtx
1219default_static_chain (const_tree ARG_UNUSED (fndecl_or_type), bool incoming_p)
1220{
1221  if (incoming_p)
1222    {
1223#ifdef STATIC_CHAIN_INCOMING_REGNUM
1224      return gen_rtx_REG (Pmode, STATIC_CHAIN_INCOMING_REGNUM);
1225#endif
1226    }
1227
1228#ifdef STATIC_CHAIN_REGNUM
1229  return gen_rtx_REG (Pmode, STATIC_CHAIN_REGNUM);
1230#endif
1231
1232  {
1233    static bool issued_error;
1234    if (!issued_error)
1235      {
1236	issued_error = true;
1237	sorry ("nested functions not supported on this target");
1238      }
1239
1240    /* It really doesn't matter what we return here, so long at it
1241       doesn't cause the rest of the compiler to crash.  */
1242    return gen_rtx_MEM (Pmode, stack_pointer_rtx);
1243  }
1244}
1245
1246void
1247default_trampoline_init (rtx ARG_UNUSED (m_tramp), tree ARG_UNUSED (t_func),
1248			 rtx ARG_UNUSED (r_chain))
1249{
1250  sorry ("nested function trampolines not supported on this target");
1251}
1252
1253poly_int64
1254default_return_pops_args (tree, tree, poly_int64)
1255{
1256  return 0;
1257}
1258
1259reg_class_t
1260default_ira_change_pseudo_allocno_class (int regno ATTRIBUTE_UNUSED,
1261					 reg_class_t cl,
1262					 reg_class_t best_cl ATTRIBUTE_UNUSED)
1263{
1264  return cl;
1265}
1266
1267extern bool
1268default_lra_p (void)
1269{
1270  return true;
1271}
1272
1273int
1274default_register_priority (int hard_regno ATTRIBUTE_UNUSED)
1275{
1276  return 0;
1277}
1278
1279extern bool
1280default_register_usage_leveling_p (void)
1281{
1282  return false;
1283}
1284
1285extern bool
1286default_different_addr_displacement_p (void)
1287{
1288  return false;
1289}
1290
1291reg_class_t
1292default_secondary_reload (bool in_p ATTRIBUTE_UNUSED, rtx x ATTRIBUTE_UNUSED,
1293			  reg_class_t reload_class_i ATTRIBUTE_UNUSED,
1294			  machine_mode reload_mode ATTRIBUTE_UNUSED,
1295			  secondary_reload_info *sri)
1296{
1297  enum reg_class rclass = NO_REGS;
1298  enum reg_class reload_class = (enum reg_class) reload_class_i;
1299
1300  if (sri->prev_sri && sri->prev_sri->t_icode != CODE_FOR_nothing)
1301    {
1302      sri->icode = sri->prev_sri->t_icode;
1303      return NO_REGS;
1304    }
1305#ifdef SECONDARY_INPUT_RELOAD_CLASS
1306  if (in_p)
1307    rclass = SECONDARY_INPUT_RELOAD_CLASS (reload_class,
1308					   MACRO_MODE (reload_mode), x);
1309#endif
1310#ifdef SECONDARY_OUTPUT_RELOAD_CLASS
1311  if (! in_p)
1312    rclass = SECONDARY_OUTPUT_RELOAD_CLASS (reload_class,
1313					    MACRO_MODE (reload_mode), x);
1314#endif
1315  if (rclass != NO_REGS)
1316    {
1317      enum insn_code icode
1318	= direct_optab_handler (in_p ? reload_in_optab : reload_out_optab,
1319				reload_mode);
1320
1321      if (icode != CODE_FOR_nothing
1322	  && !insn_operand_matches (icode, in_p, x))
1323	icode = CODE_FOR_nothing;
1324      else if (icode != CODE_FOR_nothing)
1325	{
1326	  const char *insn_constraint, *scratch_constraint;
1327	  enum reg_class insn_class, scratch_class;
1328
1329	  gcc_assert (insn_data[(int) icode].n_operands == 3);
1330	  insn_constraint = insn_data[(int) icode].operand[!in_p].constraint;
1331	  if (!*insn_constraint)
1332	    insn_class = ALL_REGS;
1333	  else
1334	    {
1335	      if (in_p)
1336		{
1337		  gcc_assert (*insn_constraint == '=');
1338		  insn_constraint++;
1339		}
1340	      insn_class = (reg_class_for_constraint
1341			    (lookup_constraint (insn_constraint)));
1342	      gcc_assert (insn_class != NO_REGS);
1343	    }
1344
1345	  scratch_constraint = insn_data[(int) icode].operand[2].constraint;
1346	  /* The scratch register's constraint must start with "=&",
1347	     except for an input reload, where only "=" is necessary,
1348	     and where it might be beneficial to re-use registers from
1349	     the input.  */
1350	  gcc_assert (scratch_constraint[0] == '='
1351		      && (in_p || scratch_constraint[1] == '&'));
1352	  scratch_constraint++;
1353	  if (*scratch_constraint == '&')
1354	    scratch_constraint++;
1355	  scratch_class = (reg_class_for_constraint
1356			   (lookup_constraint (scratch_constraint)));
1357
1358	  if (reg_class_subset_p (reload_class, insn_class))
1359	    {
1360	      gcc_assert (scratch_class == rclass);
1361	      rclass = NO_REGS;
1362	    }
1363	  else
1364	    rclass = insn_class;
1365
1366        }
1367      if (rclass == NO_REGS)
1368	sri->icode = icode;
1369      else
1370	sri->t_icode = icode;
1371    }
1372  return rclass;
1373}
1374
1375/* The default implementation of TARGET_SECONDARY_MEMORY_NEEDED_MODE.  */
1376
1377machine_mode
1378default_secondary_memory_needed_mode (machine_mode mode)
1379{
1380  if (!targetm.lra_p ()
1381      && known_lt (GET_MODE_BITSIZE (mode), BITS_PER_WORD)
1382      && INTEGRAL_MODE_P (mode))
1383    return mode_for_size (BITS_PER_WORD, GET_MODE_CLASS (mode), 0).require ();
1384  return mode;
1385}
1386
1387/* By default, if flag_pic is true, then neither local nor global relocs
1388   should be placed in readonly memory.  */
1389
1390int
1391default_reloc_rw_mask (void)
1392{
1393  return flag_pic ? 3 : 0;
1394}
1395
1396/* By default, address diff vectors are generated
1397for jump tables when flag_pic is true.  */
1398
1399bool
1400default_generate_pic_addr_diff_vec (void)
1401{
1402  return flag_pic;
1403}
1404
1405/* By default, do no modification. */
1406tree default_mangle_decl_assembler_name (tree decl ATTRIBUTE_UNUSED,
1407					 tree id)
1408{
1409   return id;
1410}
1411
1412/* The default implementation of TARGET_STATIC_RTX_ALIGNMENT.  */
1413
1414HOST_WIDE_INT
1415default_static_rtx_alignment (machine_mode mode)
1416{
1417  return GET_MODE_ALIGNMENT (mode);
1418}
1419
1420/* The default implementation of TARGET_CONSTANT_ALIGNMENT.  */
1421
1422HOST_WIDE_INT
1423default_constant_alignment (const_tree, HOST_WIDE_INT align)
1424{
1425  return align;
1426}
1427
1428/* An implementation of TARGET_CONSTANT_ALIGNMENT that aligns strings
1429   to at least BITS_PER_WORD but otherwise makes no changes.  */
1430
1431HOST_WIDE_INT
1432constant_alignment_word_strings (const_tree exp, HOST_WIDE_INT align)
1433{
1434  if (TREE_CODE (exp) == STRING_CST)
1435    return MAX (align, BITS_PER_WORD);
1436  return align;
1437}
1438
1439/* Default to natural alignment for vector types, bounded by
1440   MAX_OFILE_ALIGNMENT.  */
1441
1442HOST_WIDE_INT
1443default_vector_alignment (const_tree type)
1444{
1445  unsigned HOST_WIDE_INT align = MAX_OFILE_ALIGNMENT;
1446  tree size = TYPE_SIZE (type);
1447  if (tree_fits_uhwi_p (size))
1448    align = tree_to_uhwi (size);
1449  if (align >= MAX_OFILE_ALIGNMENT)
1450    return MAX_OFILE_ALIGNMENT;
1451  return MAX (align, GET_MODE_ALIGNMENT (TYPE_MODE (type)));
1452}
1453
1454/* The default implementation of
1455   TARGET_VECTORIZE_PREFERRED_VECTOR_ALIGNMENT.  */
1456
1457poly_uint64
1458default_preferred_vector_alignment (const_tree type)
1459{
1460  return TYPE_ALIGN (type);
1461}
1462
1463/* By default assume vectors of element TYPE require a multiple of the natural
1464   alignment of TYPE.  TYPE is naturally aligned if IS_PACKED is false.  */
1465bool
1466default_builtin_vector_alignment_reachable (const_tree /*type*/, bool is_packed)
1467{
1468  return ! is_packed;
1469}
1470
1471/* By default, assume that a target supports any factor of misalignment
1472   memory access if it supports movmisalign patten.
1473   is_packed is true if the memory access is defined in a packed struct.  */
1474bool
1475default_builtin_support_vector_misalignment (machine_mode mode,
1476					     const_tree type
1477					     ATTRIBUTE_UNUSED,
1478					     int misalignment
1479					     ATTRIBUTE_UNUSED,
1480					     bool is_packed
1481					     ATTRIBUTE_UNUSED)
1482{
1483  if (optab_handler (movmisalign_optab, mode) != CODE_FOR_nothing)
1484    return true;
1485  return false;
1486}
1487
1488/* By default, only attempt to parallelize bitwise operations, and
1489   possibly adds/subtracts using bit-twiddling.  */
1490
1491machine_mode
1492default_preferred_simd_mode (scalar_mode)
1493{
1494  return word_mode;
1495}
1496
1497/* By default do not split reductions further.  */
1498
1499machine_mode
1500default_split_reduction (machine_mode mode)
1501{
1502  return mode;
1503}
1504
1505/* By default only the preferred vector mode is tried.  */
1506
1507unsigned int
1508default_autovectorize_vector_modes (vector_modes *, bool)
1509{
1510  return 0;
1511}
1512
1513/* The default implementation of TARGET_VECTORIZE_RELATED_MODE.  */
1514
1515opt_machine_mode
1516default_vectorize_related_mode (machine_mode vector_mode,
1517				scalar_mode element_mode,
1518				poly_uint64 nunits)
1519{
1520  machine_mode result_mode;
1521  if ((maybe_ne (nunits, 0U)
1522       || multiple_p (GET_MODE_SIZE (vector_mode),
1523		      GET_MODE_SIZE (element_mode), &nunits))
1524      && mode_for_vector (element_mode, nunits).exists (&result_mode)
1525      && VECTOR_MODE_P (result_mode)
1526      && targetm.vector_mode_supported_p (result_mode))
1527    return result_mode;
1528
1529  return opt_machine_mode ();
1530}
1531
1532/* By default a vector of integers is used as a mask.  */
1533
1534opt_machine_mode
1535default_get_mask_mode (machine_mode mode)
1536{
1537  return related_int_vector_mode (mode);
1538}
1539
1540/* By default consider masked stores to be expensive.  */
1541
1542bool
1543default_empty_mask_is_expensive (unsigned ifn)
1544{
1545  return ifn == IFN_MASK_STORE;
1546}
1547
1548/* By default, the cost model accumulates three separate costs (prologue,
1549   loop body, and epilogue) for a vectorized loop or block.  So allocate an
1550   array of three unsigned ints, set it to zero, and return its address.  */
1551
1552vector_costs *
1553default_vectorize_create_costs (vec_info *vinfo, bool costing_for_scalar)
1554{
1555  return new vector_costs (vinfo, costing_for_scalar);
1556}
1557
1558/* Determine whether or not a pointer mode is valid. Assume defaults
1559   of ptr_mode or Pmode - can be overridden.  */
1560bool
1561default_valid_pointer_mode (scalar_int_mode mode)
1562{
1563  return (mode == ptr_mode || mode == Pmode);
1564}
1565
1566/* Determine whether the memory reference specified by REF may alias
1567   the C libraries errno location.  */
1568bool
1569default_ref_may_alias_errno (ao_ref *ref)
1570{
1571  tree base = ao_ref_base (ref);
1572  /* The default implementation assumes the errno location is
1573     a declaration of type int or is always accessed via a
1574     pointer to int.  We assume that accesses to errno are
1575     not deliberately obfuscated (even in conforming ways).  */
1576  if (TYPE_UNSIGNED (TREE_TYPE (base))
1577      || TYPE_MODE (TREE_TYPE (base)) != TYPE_MODE (integer_type_node))
1578    return false;
1579  /* The default implementation assumes an errno location declaration
1580     is never defined in the current compilation unit and may not be
1581     aliased by a local variable.  */
1582  if (DECL_P (base)
1583      && DECL_EXTERNAL (base)
1584      && !TREE_STATIC (base))
1585    return true;
1586  else if (TREE_CODE (base) == MEM_REF
1587	   && TREE_CODE (TREE_OPERAND (base, 0)) == SSA_NAME)
1588    {
1589      struct ptr_info_def *pi = SSA_NAME_PTR_INFO (TREE_OPERAND (base, 0));
1590      return !pi || pi->pt.anything || pi->pt.nonlocal;
1591    }
1592  return false;
1593}
1594
1595/* Return the mode for a pointer to a given ADDRSPACE,
1596   defaulting to ptr_mode for all address spaces.  */
1597
1598scalar_int_mode
1599default_addr_space_pointer_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1600{
1601  return ptr_mode;
1602}
1603
1604/* Return the mode for an address in a given ADDRSPACE,
1605   defaulting to Pmode for all address spaces.  */
1606
1607scalar_int_mode
1608default_addr_space_address_mode (addr_space_t addrspace ATTRIBUTE_UNUSED)
1609{
1610  return Pmode;
1611}
1612
1613/* Named address space version of valid_pointer_mode.
1614   To match the above, the same modes apply to all address spaces.  */
1615
1616bool
1617default_addr_space_valid_pointer_mode (scalar_int_mode mode,
1618				       addr_space_t as ATTRIBUTE_UNUSED)
1619{
1620  return targetm.valid_pointer_mode (mode);
1621}
1622
1623/* Some places still assume that all pointer or address modes are the
1624   standard Pmode and ptr_mode.  These optimizations become invalid if
1625   the target actually supports multiple different modes.  For now,
1626   we disable such optimizations on such targets, using this function.  */
1627
1628bool
1629target_default_pointer_address_modes_p (void)
1630{
1631  if (targetm.addr_space.address_mode != default_addr_space_address_mode)
1632    return false;
1633  if (targetm.addr_space.pointer_mode != default_addr_space_pointer_mode)
1634    return false;
1635
1636  return true;
1637}
1638
1639/* Named address space version of legitimate_address_p.
1640   By default, all address spaces have the same form.  */
1641
1642bool
1643default_addr_space_legitimate_address_p (machine_mode mode, rtx mem,
1644					 bool strict,
1645					 addr_space_t as ATTRIBUTE_UNUSED)
1646{
1647  return targetm.legitimate_address_p (mode, mem, strict);
1648}
1649
1650/* Named address space version of LEGITIMIZE_ADDRESS.
1651   By default, all address spaces have the same form.  */
1652
1653rtx
1654default_addr_space_legitimize_address (rtx x, rtx oldx, machine_mode mode,
1655				       addr_space_t as ATTRIBUTE_UNUSED)
1656{
1657  return targetm.legitimize_address (x, oldx, mode);
1658}
1659
1660/* The default hook for determining if one named address space is a subset of
1661   another and to return which address space to use as the common address
1662   space.  */
1663
1664bool
1665default_addr_space_subset_p (addr_space_t subset, addr_space_t superset)
1666{
1667  return (subset == superset);
1668}
1669
1670/* The default hook for determining if 0 within a named address
1671   space is a valid address.  */
1672
1673bool
1674default_addr_space_zero_address_valid (addr_space_t as ATTRIBUTE_UNUSED)
1675{
1676  return false;
1677}
1678
1679/* The default hook for debugging the address space is to return the
1680   address space number to indicate DW_AT_address_class.  */
1681int
1682default_addr_space_debug (addr_space_t as)
1683{
1684  return as;
1685}
1686
1687/* The default hook implementation for TARGET_ADDR_SPACE_DIAGNOSE_USAGE.
1688   Don't complain about any address space.  */
1689
1690void
1691default_addr_space_diagnose_usage (addr_space_t, location_t)
1692{
1693}
1694
1695
1696/* The default hook for TARGET_ADDR_SPACE_CONVERT. This hook should never be
1697   called for targets with only a generic address space.  */
1698
1699rtx
1700default_addr_space_convert (rtx op ATTRIBUTE_UNUSED,
1701			    tree from_type ATTRIBUTE_UNUSED,
1702			    tree to_type ATTRIBUTE_UNUSED)
1703{
1704  gcc_unreachable ();
1705}
1706
1707/* The defualt implementation of TARGET_HARD_REGNO_NREGS.  */
1708
1709unsigned int
1710default_hard_regno_nregs (unsigned int, machine_mode mode)
1711{
1712  /* Targets with variable-sized modes must provide their own definition
1713     of this hook.  */
1714  return CEIL (GET_MODE_SIZE (mode).to_constant (), UNITS_PER_WORD);
1715}
1716
1717bool
1718default_hard_regno_scratch_ok (unsigned int regno ATTRIBUTE_UNUSED)
1719{
1720  return true;
1721}
1722
1723/* The default implementation of TARGET_MODE_DEPENDENT_ADDRESS_P.  */
1724
1725bool
1726default_mode_dependent_address_p (const_rtx addr ATTRIBUTE_UNUSED,
1727				  addr_space_t addrspace ATTRIBUTE_UNUSED)
1728{
1729  return false;
1730}
1731
1732extern bool default_new_address_profitable_p (rtx, rtx);
1733
1734
1735/* The default implementation of TARGET_NEW_ADDRESS_PROFITABLE_P.  */
1736
1737bool
1738default_new_address_profitable_p (rtx memref ATTRIBUTE_UNUSED,
1739				  rtx_insn *insn ATTRIBUTE_UNUSED,
1740				  rtx new_addr ATTRIBUTE_UNUSED)
1741{
1742  return true;
1743}
1744
1745bool
1746default_target_option_valid_attribute_p (tree ARG_UNUSED (fndecl),
1747					 tree ARG_UNUSED (name),
1748					 tree ARG_UNUSED (args),
1749					 int ARG_UNUSED (flags))
1750{
1751  warning (OPT_Wattributes,
1752	   "target attribute is not supported on this machine");
1753
1754  return false;
1755}
1756
1757bool
1758default_target_option_pragma_parse (tree ARG_UNUSED (args),
1759				    tree ARG_UNUSED (pop_target))
1760{
1761  /* If args is NULL the caller is handle_pragma_pop_options ().  In that case,
1762     emit no warning because "#pragma GCC pop_target" is valid on targets that
1763     do not have the "target" pragma.  */
1764  if (args)
1765    warning (OPT_Wpragmas,
1766	     "%<#pragma GCC target%> is not supported for this machine");
1767
1768  return false;
1769}
1770
1771bool
1772default_target_can_inline_p (tree caller, tree callee)
1773{
1774  tree callee_opts = DECL_FUNCTION_SPECIFIC_TARGET (callee);
1775  tree caller_opts = DECL_FUNCTION_SPECIFIC_TARGET (caller);
1776  if (! callee_opts)
1777    callee_opts = target_option_default_node;
1778  if (! caller_opts)
1779    caller_opts = target_option_default_node;
1780
1781  /* If both caller and callee have attributes, assume that if the
1782     pointer is different, the two functions have different target
1783     options since build_target_option_node uses a hash table for the
1784     options.  */
1785  return callee_opts == caller_opts;
1786}
1787
1788/* By default, return false to not need to collect any target information
1789   for inlining.  Target maintainer should re-define the hook if the
1790   target want to take advantage of it.  */
1791
1792bool
1793default_need_ipa_fn_target_info (const_tree, unsigned int &)
1794{
1795  return false;
1796}
1797
1798bool
1799default_update_ipa_fn_target_info (unsigned int &, const gimple *)
1800{
1801  return false;
1802}
1803
1804/* If the machine does not have a case insn that compares the bounds,
1805   this means extra overhead for dispatch tables, which raises the
1806   threshold for using them.  */
1807
1808unsigned int
1809default_case_values_threshold (void)
1810{
1811  return (targetm.have_casesi () ? 4 : 5);
1812}
1813
1814bool
1815default_have_conditional_execution (void)
1816{
1817  return HAVE_conditional_execution;
1818}
1819
1820/* By default we assume that c99 functions are present at the runtime,
1821   but sincos is not.  */
1822bool
1823default_libc_has_function (enum function_class fn_class,
1824			   tree type ATTRIBUTE_UNUSED)
1825{
1826  if (fn_class == function_c94
1827      || fn_class == function_c99_misc
1828      || fn_class == function_c99_math_complex)
1829    return true;
1830
1831  return false;
1832}
1833
1834/* By default assume that libc has not a fast implementation.  */
1835
1836bool
1837default_libc_has_fast_function (int fcode ATTRIBUTE_UNUSED)
1838{
1839  return false;
1840}
1841
1842bool
1843gnu_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1844		       tree type ATTRIBUTE_UNUSED)
1845{
1846  return true;
1847}
1848
1849bool
1850no_c99_libc_has_function (enum function_class fn_class ATTRIBUTE_UNUSED,
1851			  tree type ATTRIBUTE_UNUSED)
1852{
1853  return false;
1854}
1855
1856/* Assume some c99 functions are present at the runtime including sincos.  */
1857bool
1858bsd_libc_has_function (enum function_class fn_class,
1859		       tree type ATTRIBUTE_UNUSED)
1860{
1861  if (fn_class == function_c94
1862      || fn_class == function_c99_misc
1863      || fn_class == function_sincos)
1864    return true;
1865
1866  return false;
1867}
1868
1869
1870tree
1871default_builtin_tm_load_store (tree ARG_UNUSED (type))
1872{
1873  return NULL_TREE;
1874}
1875
1876/* Compute cost of moving registers to/from memory.  */
1877
1878int
1879default_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1880			  reg_class_t rclass ATTRIBUTE_UNUSED,
1881			  bool in ATTRIBUTE_UNUSED)
1882{
1883#ifndef MEMORY_MOVE_COST
1884    return (4 + memory_move_secondary_cost (mode, (enum reg_class) rclass, in));
1885#else
1886    return MEMORY_MOVE_COST (MACRO_MODE (mode), (enum reg_class) rclass, in);
1887#endif
1888}
1889
1890/* Compute cost of moving data from a register of class FROM to one of
1891   TO, using MODE.  */
1892
1893int
1894default_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
1895                            reg_class_t from ATTRIBUTE_UNUSED,
1896                            reg_class_t to ATTRIBUTE_UNUSED)
1897{
1898#ifndef REGISTER_MOVE_COST
1899  return 2;
1900#else
1901  return REGISTER_MOVE_COST (MACRO_MODE (mode),
1902			     (enum reg_class) from, (enum reg_class) to);
1903#endif
1904}
1905
1906/* The default implementation of TARGET_SLOW_UNALIGNED_ACCESS.  */
1907
1908bool
1909default_slow_unaligned_access (machine_mode, unsigned int)
1910{
1911  return STRICT_ALIGNMENT;
1912}
1913
1914/* The default implementation of TARGET_ESTIMATED_POLY_VALUE.  */
1915
1916HOST_WIDE_INT
1917default_estimated_poly_value (poly_int64 x, poly_value_estimate_kind)
1918{
1919  return x.coeffs[0];
1920}
1921
1922/* For hooks which use the MOVE_RATIO macro, this gives the legacy default
1923   behavior.  SPEED_P is true if we are compiling for speed.  */
1924
1925unsigned int
1926get_move_ratio (bool speed_p ATTRIBUTE_UNUSED)
1927{
1928  unsigned int move_ratio;
1929#ifdef MOVE_RATIO
1930  move_ratio = (unsigned int) MOVE_RATIO (speed_p);
1931#else
1932#if defined (HAVE_cpymemqi) || defined (HAVE_cpymemhi) || defined (HAVE_cpymemsi) || defined (HAVE_cpymemdi) || defined (HAVE_cpymemti)
1933  move_ratio = 2;
1934#else /* No cpymem patterns, pick a default.  */
1935  move_ratio = ((speed_p) ? 15 : 3);
1936#endif
1937#endif
1938  return move_ratio;
1939}
1940
1941/* Return TRUE if the move_by_pieces/set_by_pieces infrastructure should be
1942   used; return FALSE if the cpymem/setmem optab should be expanded, or
1943   a call to memcpy emitted.  */
1944
1945bool
1946default_use_by_pieces_infrastructure_p (unsigned HOST_WIDE_INT size,
1947					unsigned int alignment,
1948					enum by_pieces_operation op,
1949					bool speed_p)
1950{
1951  unsigned int max_size = 0;
1952  unsigned int ratio = 0;
1953
1954  switch (op)
1955    {
1956    case CLEAR_BY_PIECES:
1957      max_size = STORE_MAX_PIECES;
1958      ratio = CLEAR_RATIO (speed_p);
1959      break;
1960    case MOVE_BY_PIECES:
1961      max_size = MOVE_MAX_PIECES;
1962      ratio = get_move_ratio (speed_p);
1963      break;
1964    case SET_BY_PIECES:
1965      max_size = STORE_MAX_PIECES;
1966      ratio = SET_RATIO (speed_p);
1967      break;
1968    case STORE_BY_PIECES:
1969      max_size = STORE_MAX_PIECES;
1970      ratio = get_move_ratio (speed_p);
1971      break;
1972    case COMPARE_BY_PIECES:
1973      max_size = COMPARE_MAX_PIECES;
1974      /* Pick a likely default, just as in get_move_ratio.  */
1975      ratio = speed_p ? 15 : 3;
1976      break;
1977    }
1978
1979  return by_pieces_ninsns (size, alignment, max_size + 1, op) < ratio;
1980}
1981
1982/* This hook controls code generation for expanding a memcmp operation by
1983   pieces.  Return 1 for the normal pattern of compare/jump after each pair
1984   of loads, or a higher number to reduce the number of branches.  */
1985
1986int
1987default_compare_by_pieces_branch_ratio (machine_mode)
1988{
1989  return 1;
1990}
1991
1992/* Helper for default_print_patchable_function_entry and other
1993   print_patchable_function_entry hook implementations.  */
1994
1995void
1996default_print_patchable_function_entry_1 (FILE *file,
1997					  unsigned HOST_WIDE_INT
1998					  patch_area_size,
1999					  bool record_p,
2000					  unsigned int flags)
2001{
2002  const char *nop_templ = 0;
2003  int code_num;
2004  rtx_insn *my_nop = make_insn_raw (gen_nop ());
2005
2006  /* We use the template alone, relying on the (currently sane) assumption
2007     that the NOP template does not have variable operands.  */
2008  code_num = recog_memoized (my_nop);
2009  nop_templ = get_insn_template (code_num, my_nop);
2010
2011  if (record_p && targetm_common.have_named_sections)
2012    {
2013      char buf[256];
2014      static int patch_area_number;
2015      section *previous_section = in_section;
2016      const char *asm_op = integer_asm_op (POINTER_SIZE_UNITS, false);
2017
2018      gcc_assert (asm_op != NULL);
2019      patch_area_number++;
2020      ASM_GENERATE_INTERNAL_LABEL (buf, "LPFE", patch_area_number);
2021
2022      switch_to_section (get_section ("__patchable_function_entries",
2023				      flags, current_function_decl));
2024      assemble_align (POINTER_SIZE);
2025      fputs (asm_op, file);
2026      assemble_name_raw (file, buf);
2027      fputc ('\n', file);
2028
2029      switch_to_section (previous_section);
2030      ASM_OUTPUT_LABEL (file, buf);
2031    }
2032
2033  unsigned i;
2034  for (i = 0; i < patch_area_size; ++i)
2035    output_asm_insn (nop_templ, NULL);
2036}
2037
2038/* Write PATCH_AREA_SIZE NOPs into the asm outfile FILE around a function
2039   entry.  If RECORD_P is true and the target supports named sections,
2040   the location of the NOPs will be recorded in a special object section
2041   called "__patchable_function_entries".  This routine may be called
2042   twice per function to put NOPs before and after the function
2043   entry.  */
2044
2045void
2046default_print_patchable_function_entry (FILE *file,
2047					unsigned HOST_WIDE_INT patch_area_size,
2048					bool record_p)
2049{
2050  unsigned int flags = SECTION_WRITE | SECTION_RELRO;
2051  if (HAVE_GAS_SECTION_LINK_ORDER)
2052    flags |= SECTION_LINK_ORDER;
2053  default_print_patchable_function_entry_1 (file, patch_area_size, record_p,
2054					    flags);
2055}
2056
2057bool
2058default_profile_before_prologue (void)
2059{
2060#ifdef PROFILE_BEFORE_PROLOGUE
2061  return true;
2062#else
2063  return false;
2064#endif
2065}
2066
2067/* The default implementation of TARGET_PREFERRED_RELOAD_CLASS.  */
2068
2069reg_class_t
2070default_preferred_reload_class (rtx x ATTRIBUTE_UNUSED,
2071			        reg_class_t rclass)
2072{
2073#ifdef PREFERRED_RELOAD_CLASS
2074  return (reg_class_t) PREFERRED_RELOAD_CLASS (x, (enum reg_class) rclass);
2075#else
2076  return rclass;
2077#endif
2078}
2079
2080/* The default implementation of TARGET_OUTPUT_PREFERRED_RELOAD_CLASS.  */
2081
2082reg_class_t
2083default_preferred_output_reload_class (rtx x ATTRIBUTE_UNUSED,
2084				       reg_class_t rclass)
2085{
2086  return rclass;
2087}
2088
2089/* The default implementation of TARGET_PREFERRED_RENAME_CLASS.  */
2090reg_class_t
2091default_preferred_rename_class (reg_class_t rclass ATTRIBUTE_UNUSED)
2092{
2093  return NO_REGS;
2094}
2095
2096/* The default implementation of TARGET_CLASS_LIKELY_SPILLED_P.  */
2097
2098bool
2099default_class_likely_spilled_p (reg_class_t rclass)
2100{
2101  return (reg_class_size[(int) rclass] == 1);
2102}
2103
2104/* The default implementation of TARGET_CLASS_MAX_NREGS.  */
2105
2106unsigned char
2107default_class_max_nregs (reg_class_t rclass ATTRIBUTE_UNUSED,
2108			 machine_mode mode ATTRIBUTE_UNUSED)
2109{
2110#ifdef CLASS_MAX_NREGS
2111  return (unsigned char) CLASS_MAX_NREGS ((enum reg_class) rclass,
2112					  MACRO_MODE (mode));
2113#else
2114  /* Targets with variable-sized modes must provide their own definition
2115     of this hook.  */
2116  unsigned int size = GET_MODE_SIZE (mode).to_constant ();
2117  return (size + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
2118#endif
2119}
2120
2121/* Determine the debugging unwind mechanism for the target.  */
2122
2123enum unwind_info_type
2124default_debug_unwind_info (void)
2125{
2126  /* If the target wants to force the use of dwarf2 unwind info, let it.  */
2127  /* ??? Change all users to the hook, then poison this.  */
2128#ifdef DWARF2_FRAME_INFO
2129  if (DWARF2_FRAME_INFO)
2130    return UI_DWARF2;
2131#endif
2132
2133  /* Otherwise, only turn it on if dwarf2 debugging is enabled.  */
2134#ifdef DWARF2_DEBUGGING_INFO
2135  if (dwarf_debuginfo_p ())
2136    return UI_DWARF2;
2137#endif
2138
2139  return UI_NONE;
2140}
2141
2142/* Targets that set NUM_POLY_INT_COEFFS to something greater than 1
2143   must define this hook.  */
2144
2145unsigned int
2146default_dwarf_poly_indeterminate_value (unsigned int, unsigned int *, int *)
2147{
2148  gcc_unreachable ();
2149}
2150
2151/* Determine the correct mode for a Dwarf frame register that represents
2152   register REGNO.  */
2153
2154machine_mode
2155default_dwarf_frame_reg_mode (int regno)
2156{
2157  machine_mode save_mode = reg_raw_mode[regno];
2158
2159  if (targetm.hard_regno_call_part_clobbered (eh_edge_abi.id (),
2160					      regno, save_mode))
2161    save_mode = choose_hard_reg_mode (regno, 1, &eh_edge_abi);
2162  return save_mode;
2163}
2164
2165/* To be used by targets where reg_raw_mode doesn't return the right
2166   mode for registers used in apply_builtin_return and apply_builtin_arg.  */
2167
2168fixed_size_mode
2169default_get_reg_raw_mode (int regno)
2170{
2171  /* Targets must override this hook if the underlying register is
2172     variable-sized.  */
2173  return as_a <fixed_size_mode> (reg_raw_mode[regno]);
2174}
2175
2176/* Return true if a leaf function should stay leaf even with profiling
2177   enabled.  */
2178
2179bool
2180default_keep_leaf_when_profiled ()
2181{
2182  return false;
2183}
2184
2185/* Return true if the state of option OPTION should be stored in PCH files
2186   and checked by default_pch_valid_p.  Store the option's current state
2187   in STATE if so.  */
2188
2189static inline bool
2190option_affects_pch_p (int option, struct cl_option_state *state)
2191{
2192  if ((cl_options[option].flags & CL_TARGET) == 0)
2193    return false;
2194  if ((cl_options[option].flags & CL_PCH_IGNORE) != 0)
2195    return false;
2196  if (option_flag_var (option, &global_options) == &target_flags)
2197    if (targetm.check_pch_target_flags)
2198      return false;
2199  return get_option_state (&global_options, option, state);
2200}
2201
2202/* Default version of get_pch_validity.
2203   By default, every flag difference is fatal; that will be mostly right for
2204   most targets, but completely right for very few.  */
2205
2206void *
2207default_get_pch_validity (size_t *sz)
2208{
2209  struct cl_option_state state;
2210  size_t i;
2211  char *result, *r;
2212
2213  *sz = 2;
2214  if (targetm.check_pch_target_flags)
2215    *sz += sizeof (target_flags);
2216  for (i = 0; i < cl_options_count; i++)
2217    if (option_affects_pch_p (i, &state))
2218      *sz += state.size;
2219
2220  result = r = XNEWVEC (char, *sz);
2221  r[0] = flag_pic;
2222  r[1] = flag_pie;
2223  r += 2;
2224  if (targetm.check_pch_target_flags)
2225    {
2226      memcpy (r, &target_flags, sizeof (target_flags));
2227      r += sizeof (target_flags);
2228    }
2229
2230  for (i = 0; i < cl_options_count; i++)
2231    if (option_affects_pch_p (i, &state))
2232      {
2233	memcpy (r, state.data, state.size);
2234	r += state.size;
2235      }
2236
2237  return result;
2238}
2239
2240/* Return a message which says that a PCH file was created with a different
2241   setting of OPTION.  */
2242
2243static const char *
2244pch_option_mismatch (const char *option)
2245{
2246  return xasprintf (_("created and used with differing settings of '%s'"),
2247		    option);
2248}
2249
2250/* Default version of pch_valid_p.  */
2251
2252const char *
2253default_pch_valid_p (const void *data_p, size_t len ATTRIBUTE_UNUSED)
2254{
2255  struct cl_option_state state;
2256  const char *data = (const char *)data_p;
2257  size_t i;
2258
2259  /* -fpic and -fpie also usually make a PCH invalid.  */
2260  if (data[0] != flag_pic)
2261    return _("created and used with different settings of %<-fpic%>");
2262  if (data[1] != flag_pie)
2263    return _("created and used with different settings of %<-fpie%>");
2264  data += 2;
2265
2266  /* Check target_flags.  */
2267  if (targetm.check_pch_target_flags)
2268    {
2269      int tf;
2270      const char *r;
2271
2272      memcpy (&tf, data, sizeof (target_flags));
2273      data += sizeof (target_flags);
2274      r = targetm.check_pch_target_flags (tf);
2275      if (r != NULL)
2276	return r;
2277    }
2278
2279  for (i = 0; i < cl_options_count; i++)
2280    if (option_affects_pch_p (i, &state))
2281      {
2282	if (memcmp (data, state.data, state.size) != 0)
2283	  return pch_option_mismatch (cl_options[i].opt_text);
2284	data += state.size;
2285      }
2286
2287  return NULL;
2288}
2289
2290/* Default version of cstore_mode.  */
2291
2292scalar_int_mode
2293default_cstore_mode (enum insn_code icode)
2294{
2295  return as_a <scalar_int_mode> (insn_data[(int) icode].operand[0].mode);
2296}
2297
2298/* Default version of member_type_forces_blk.  */
2299
2300bool
2301default_member_type_forces_blk (const_tree, machine_mode)
2302{
2303  return false;
2304}
2305
2306/* Default version of canonicalize_comparison.  */
2307
2308void
2309default_canonicalize_comparison (int *, rtx *, rtx *, bool)
2310{
2311}
2312
2313/* Default implementation of TARGET_ATOMIC_ASSIGN_EXPAND_FENV.  */
2314
2315void
2316default_atomic_assign_expand_fenv (tree *, tree *, tree *)
2317{
2318}
2319
2320#ifndef PAD_VARARGS_DOWN
2321#define PAD_VARARGS_DOWN BYTES_BIG_ENDIAN
2322#endif
2323
2324/* Build an indirect-ref expression over the given TREE, which represents a
2325   piece of a va_arg() expansion.  */
2326tree
2327build_va_arg_indirect_ref (tree addr)
2328{
2329  addr = build_simple_mem_ref_loc (EXPR_LOCATION (addr), addr);
2330  return addr;
2331}
2332
2333/* The "standard" implementation of va_arg: read the value from the
2334   current (padded) address and increment by the (padded) size.  */
2335
2336tree
2337std_gimplify_va_arg_expr (tree valist, tree type, gimple_seq *pre_p,
2338			  gimple_seq *post_p)
2339{
2340  tree addr, t, type_size, rounded_size, valist_tmp;
2341  unsigned HOST_WIDE_INT align, boundary;
2342  bool indirect;
2343
2344  /* All of the alignment and movement below is for args-grow-up machines.
2345     As of 2004, there are only 3 ARGS_GROW_DOWNWARD targets, and they all
2346     implement their own specialized gimplify_va_arg_expr routines.  */
2347  if (ARGS_GROW_DOWNWARD)
2348    gcc_unreachable ();
2349
2350  indirect = pass_va_arg_by_reference (type);
2351  if (indirect)
2352    type = build_pointer_type (type);
2353
2354  if (targetm.calls.split_complex_arg
2355      && TREE_CODE (type) == COMPLEX_TYPE
2356      && targetm.calls.split_complex_arg (type))
2357    {
2358      tree real_part, imag_part;
2359
2360      real_part = std_gimplify_va_arg_expr (valist,
2361					    TREE_TYPE (type), pre_p, NULL);
2362      real_part = get_initialized_tmp_var (real_part, pre_p);
2363
2364      imag_part = std_gimplify_va_arg_expr (unshare_expr (valist),
2365					    TREE_TYPE (type), pre_p, NULL);
2366      imag_part = get_initialized_tmp_var (imag_part, pre_p);
2367
2368      return build2 (COMPLEX_EXPR, type, real_part, imag_part);
2369   }
2370
2371  align = PARM_BOUNDARY / BITS_PER_UNIT;
2372  boundary = targetm.calls.function_arg_boundary (TYPE_MODE (type), type);
2373
2374  /* When we align parameter on stack for caller, if the parameter
2375     alignment is beyond MAX_SUPPORTED_STACK_ALIGNMENT, it will be
2376     aligned at MAX_SUPPORTED_STACK_ALIGNMENT.  We will match callee
2377     here with caller.  */
2378  if (boundary > MAX_SUPPORTED_STACK_ALIGNMENT)
2379    boundary = MAX_SUPPORTED_STACK_ALIGNMENT;
2380
2381  boundary /= BITS_PER_UNIT;
2382
2383  /* Hoist the valist value into a temporary for the moment.  */
2384  valist_tmp = get_initialized_tmp_var (valist, pre_p);
2385
2386  /* va_list pointer is aligned to PARM_BOUNDARY.  If argument actually
2387     requires greater alignment, we must perform dynamic alignment.  */
2388  if (boundary > align
2389      && !TYPE_EMPTY_P (type)
2390      && !integer_zerop (TYPE_SIZE (type)))
2391    {
2392      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2393		  fold_build_pointer_plus_hwi (valist_tmp, boundary - 1));
2394      gimplify_and_add (t, pre_p);
2395
2396      t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist_tmp,
2397		  fold_build2 (BIT_AND_EXPR, TREE_TYPE (valist),
2398			       valist_tmp,
2399			       build_int_cst (TREE_TYPE (valist), -boundary)));
2400      gimplify_and_add (t, pre_p);
2401    }
2402  else
2403    boundary = align;
2404
2405  /* If the actual alignment is less than the alignment of the type,
2406     adjust the type accordingly so that we don't assume strict alignment
2407     when dereferencing the pointer.  */
2408  boundary *= BITS_PER_UNIT;
2409  if (boundary < TYPE_ALIGN (type))
2410    {
2411      type = build_variant_type_copy (type);
2412      SET_TYPE_ALIGN (type, boundary);
2413    }
2414
2415  /* Compute the rounded size of the type.  */
2416  type_size = arg_size_in_bytes (type);
2417  rounded_size = round_up (type_size, align);
2418
2419  /* Reduce rounded_size so it's sharable with the postqueue.  */
2420  gimplify_expr (&rounded_size, pre_p, post_p, is_gimple_val, fb_rvalue);
2421
2422  /* Get AP.  */
2423  addr = valist_tmp;
2424  if (PAD_VARARGS_DOWN && !integer_zerop (rounded_size))
2425    {
2426      /* Small args are padded downward.  */
2427      t = fold_build2_loc (input_location, GT_EXPR, sizetype,
2428		       rounded_size, size_int (align));
2429      t = fold_build3 (COND_EXPR, sizetype, t, size_zero_node,
2430		       size_binop (MINUS_EXPR, rounded_size, type_size));
2431      addr = fold_build_pointer_plus (addr, t);
2432    }
2433
2434  /* Compute new value for AP.  */
2435  t = fold_build_pointer_plus (valist_tmp, rounded_size);
2436  t = build2 (MODIFY_EXPR, TREE_TYPE (valist), valist, t);
2437  gimplify_and_add (t, pre_p);
2438
2439  addr = fold_convert (build_pointer_type (type), addr);
2440
2441  if (indirect)
2442    addr = build_va_arg_indirect_ref (addr);
2443
2444  return build_va_arg_indirect_ref (addr);
2445}
2446
2447/* An implementation of TARGET_CAN_USE_DOLOOP_P for targets that do
2448   not support nested low-overhead loops.  */
2449
2450bool
2451can_use_doloop_if_innermost (const widest_int &, const widest_int &,
2452			     unsigned int loop_depth, bool)
2453{
2454  return loop_depth == 1;
2455}
2456
2457/* Default implementation of TARGET_OPTAB_SUPPORTED_P.  */
2458
2459bool
2460default_optab_supported_p (int, machine_mode, machine_mode, optimization_type)
2461{
2462  return true;
2463}
2464
2465/* Default implementation of TARGET_MAX_NOCE_IFCVT_SEQ_COST.  */
2466
2467unsigned int
2468default_max_noce_ifcvt_seq_cost (edge e)
2469{
2470  bool predictable_p = predictable_edge_p (e);
2471
2472  if (predictable_p)
2473    {
2474      if (OPTION_SET_P (param_max_rtl_if_conversion_predictable_cost))
2475	return param_max_rtl_if_conversion_predictable_cost;
2476    }
2477  else
2478    {
2479      if (OPTION_SET_P (param_max_rtl_if_conversion_unpredictable_cost))
2480	return param_max_rtl_if_conversion_unpredictable_cost;
2481    }
2482
2483  return BRANCH_COST (true, predictable_p) * COSTS_N_INSNS (3);
2484}
2485
2486/* Default implementation of TARGET_MIN_ARITHMETIC_PRECISION.  */
2487
2488unsigned int
2489default_min_arithmetic_precision (void)
2490{
2491  return WORD_REGISTER_OPERATIONS ? BITS_PER_WORD : BITS_PER_UNIT;
2492}
2493
2494/* Default implementation of TARGET_C_EXCESS_PRECISION.  */
2495
2496enum flt_eval_method
2497default_excess_precision (enum excess_precision_type ATTRIBUTE_UNUSED)
2498{
2499  return FLT_EVAL_METHOD_PROMOTE_TO_FLOAT;
2500}
2501
2502/* Default implementation for
2503  TARGET_STACK_CLASH_PROTECTION_ALLOCA_PROBE_RANGE.  */
2504HOST_WIDE_INT
2505default_stack_clash_protection_alloca_probe_range (void)
2506{
2507  return 0;
2508}
2509
2510/* The default implementation of TARGET_EARLY_REMAT_MODES.  */
2511
2512void
2513default_select_early_remat_modes (sbitmap)
2514{
2515}
2516
2517/* The default implementation of TARGET_PREFERRED_ELSE_VALUE.  */
2518
2519tree
2520default_preferred_else_value (unsigned, tree type, unsigned, tree *)
2521{
2522  return build_zero_cst (type);
2523}
2524
2525/* Default implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE.  */
2526bool
2527default_have_speculation_safe_value (bool active ATTRIBUTE_UNUSED)
2528{
2529#ifdef HAVE_speculation_barrier
2530  return active ? HAVE_speculation_barrier : true;
2531#else
2532  return false;
2533#endif
2534}
2535/* Alternative implementation of TARGET_HAVE_SPECULATION_SAFE_VALUE
2536   that can be used on targets that never have speculative execution.  */
2537bool
2538speculation_safe_value_not_needed (bool active)
2539{
2540  return !active;
2541}
2542
2543/* Default implementation of the speculation-safe-load builtin.  This
2544   implementation simply copies val to result and generates a
2545   speculation_barrier insn, if such a pattern is defined.  */
2546rtx
2547default_speculation_safe_value (machine_mode mode ATTRIBUTE_UNUSED,
2548				rtx result, rtx val,
2549				rtx failval ATTRIBUTE_UNUSED)
2550{
2551  emit_move_insn (result, val);
2552
2553#ifdef HAVE_speculation_barrier
2554  /* Assume the target knows what it is doing: if it defines a
2555     speculation barrier, but it is not enabled, then assume that one
2556     isn't needed.  */
2557  if (HAVE_speculation_barrier)
2558    emit_insn (gen_speculation_barrier ());
2559#endif
2560
2561  return result;
2562}
2563
2564/* How many bits to shift in order to access the tag bits.
2565   The default is to store the tag in the top 8 bits of a 64 bit pointer, hence
2566   shifting 56 bits will leave just the tag.  */
2567#define HWASAN_SHIFT (GET_MODE_PRECISION (Pmode) - 8)
2568#define HWASAN_SHIFT_RTX GEN_INT (HWASAN_SHIFT)
2569
2570bool
2571default_memtag_can_tag_addresses ()
2572{
2573  return false;
2574}
2575
2576uint8_t
2577default_memtag_tag_size ()
2578{
2579  return 8;
2580}
2581
2582uint8_t
2583default_memtag_granule_size ()
2584{
2585  return 16;
2586}
2587
2588/* The default implementation of TARGET_MEMTAG_INSERT_RANDOM_TAG.  */
2589rtx
2590default_memtag_insert_random_tag (rtx untagged, rtx target)
2591{
2592  gcc_assert (param_hwasan_instrument_stack);
2593  if (param_hwasan_random_frame_tag)
2594    {
2595      rtx fn = init_one_libfunc ("__hwasan_generate_tag");
2596      rtx new_tag = emit_library_call_value (fn, NULL_RTX, LCT_NORMAL, QImode);
2597      return targetm.memtag.set_tag (untagged, new_tag, target);
2598    }
2599  else
2600    {
2601      /* NOTE: The kernel API does not have __hwasan_generate_tag exposed.
2602	 In the future we may add the option emit random tags with inline
2603	 instrumentation instead of function calls.  This would be the same
2604	 between the kernel and userland.  */
2605      return untagged;
2606    }
2607}
2608
2609/* The default implementation of TARGET_MEMTAG_ADD_TAG.  */
2610rtx
2611default_memtag_add_tag (rtx base, poly_int64 offset, uint8_t tag_offset)
2612{
2613  /* Need to look into what the most efficient code sequence is.
2614     This is a code sequence that would be emitted *many* times, so we
2615     want it as small as possible.
2616
2617     There are two places where tag overflow is a question:
2618       - Tagging the shadow stack.
2619	  (both tagging and untagging).
2620       - Tagging addressable pointers.
2621
2622     We need to ensure both behaviors are the same (i.e. that the tag that
2623     ends up in a pointer after "overflowing" the tag bits with a tag addition
2624     is the same that ends up in the shadow space).
2625
2626     The aim is that the behavior of tag addition should follow modulo
2627     wrapping in both instances.
2628
2629     The libhwasan code doesn't have any path that increments a pointer's tag,
2630     which means it has no opinion on what happens when a tag increment
2631     overflows (and hence we can choose our own behavior).  */
2632
2633  offset += ((uint64_t)tag_offset << HWASAN_SHIFT);
2634  return plus_constant (Pmode, base, offset);
2635}
2636
2637/* The default implementation of TARGET_MEMTAG_SET_TAG.  */
2638rtx
2639default_memtag_set_tag (rtx untagged, rtx tag, rtx target)
2640{
2641  gcc_assert (GET_MODE (untagged) == Pmode && GET_MODE (tag) == QImode);
2642  tag = expand_simple_binop (Pmode, ASHIFT, tag, HWASAN_SHIFT_RTX, NULL_RTX,
2643			     /* unsignedp = */1, OPTAB_WIDEN);
2644  rtx ret = expand_simple_binop (Pmode, IOR, untagged, tag, target,
2645				 /* unsignedp = */1, OPTAB_DIRECT);
2646  gcc_assert (ret);
2647  return ret;
2648}
2649
2650/* The default implementation of TARGET_MEMTAG_EXTRACT_TAG.  */
2651rtx
2652default_memtag_extract_tag (rtx tagged_pointer, rtx target)
2653{
2654  rtx tag = expand_simple_binop (Pmode, LSHIFTRT, tagged_pointer,
2655				 HWASAN_SHIFT_RTX, target,
2656				 /* unsignedp = */0,
2657				 OPTAB_DIRECT);
2658  rtx ret = gen_lowpart (QImode, tag);
2659  gcc_assert (ret);
2660  return ret;
2661}
2662
2663/* The default implementation of TARGET_MEMTAG_UNTAGGED_POINTER.  */
2664rtx
2665default_memtag_untagged_pointer (rtx tagged_pointer, rtx target)
2666{
2667  rtx tag_mask = gen_int_mode ((HOST_WIDE_INT_1U << HWASAN_SHIFT) - 1, Pmode);
2668  rtx untagged_base = expand_simple_binop (Pmode, AND, tagged_pointer,
2669					   tag_mask, target, true,
2670					   OPTAB_DIRECT);
2671  gcc_assert (untagged_base);
2672  return untagged_base;
2673}
2674
2675/* The default implementation of TARGET_GCOV_TYPE_SIZE.  */
2676HOST_WIDE_INT
2677default_gcov_type_size (void)
2678{
2679  return TYPE_PRECISION (long_long_integer_type_node) > 32 ? 64 : 32;
2680}
2681
2682#include "gt-targhooks.h"
2683