1/* Subroutines for insn-output.c for Matsushita MN10300 series
2   Copyright (C) 1996-2015 Free Software Foundation, Inc.
3   Contributed by Jeff Law (law@cygnus.com).
4
5   This file is part of GCC.
6
7   GCC is free software; you can redistribute it and/or modify
8   it under the terms of the GNU General Public License as published by
9   the Free Software Foundation; either version 3, or (at your option)
10   any later version.
11
12   GCC is distributed in the hope that it will be useful,
13   but WITHOUT ANY WARRANTY; without even the implied warranty of
14   MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
15   GNU General Public License for more details.
16
17   You should have received a copy of the GNU General Public License
18   along with GCC; see the file COPYING3.  If not see
19   <http://www.gnu.org/licenses/>.  */
20
21#include "config.h"
22#include "system.h"
23#include "coretypes.h"
24#include "tm.h"
25#include "rtl.h"
26#include "hash-set.h"
27#include "machmode.h"
28#include "vec.h"
29#include "double-int.h"
30#include "input.h"
31#include "alias.h"
32#include "symtab.h"
33#include "wide-int.h"
34#include "inchash.h"
35#include "tree.h"
36#include "stor-layout.h"
37#include "varasm.h"
38#include "calls.h"
39#include "regs.h"
40#include "hard-reg-set.h"
41#include "insn-config.h"
42#include "conditions.h"
43#include "output.h"
44#include "insn-attr.h"
45#include "flags.h"
46#include "recog.h"
47#include "reload.h"
48#include "hashtab.h"
49#include "function.h"
50#include "statistics.h"
51#include "real.h"
52#include "fixed-value.h"
53#include "expmed.h"
54#include "dojump.h"
55#include "explow.h"
56#include "emit-rtl.h"
57#include "stmt.h"
58#include "expr.h"
59#include "insn-codes.h"
60#include "optabs.h"
61#include "obstack.h"
62#include "diagnostic-core.h"
63#include "tm_p.h"
64#include "tm-constrs.h"
65#include "target.h"
66#include "target-def.h"
67#include "dominance.h"
68#include "cfg.h"
69#include "cfgrtl.h"
70#include "cfganal.h"
71#include "lcm.h"
72#include "cfgbuild.h"
73#include "cfgcleanup.h"
74#include "predict.h"
75#include "basic-block.h"
76#include "df.h"
77#include "opts.h"
78#include "cfgloop.h"
79#include "dumpfile.h"
80#include "builtins.h"
81
82/* This is used in the am33_2.0-linux-gnu port, in which global symbol
83   names are not prefixed by underscores, to tell whether to prefix a
84   label with a plus sign or not, so that the assembler can tell
85   symbol names from register names.  */
86int mn10300_protect_label;
87
88/* Selected processor type for tuning.  */
89enum processor_type mn10300_tune_cpu = PROCESSOR_DEFAULT;
90
91#define CC_FLAG_Z	1
92#define CC_FLAG_N	2
93#define CC_FLAG_C	4
94#define CC_FLAG_V	8
95
96static int cc_flags_for_mode(machine_mode);
97static int cc_flags_for_code(enum rtx_code);
98
99/* Implement TARGET_OPTION_OVERRIDE.  */
100static void
101mn10300_option_override (void)
102{
103  if (TARGET_AM33)
104    target_flags &= ~MASK_MULT_BUG;
105  else
106    {
107      /* Disable scheduling for the MN10300 as we do
108	 not have timing information available for it.  */
109      flag_schedule_insns = 0;
110      flag_schedule_insns_after_reload = 0;
111
112      /* Force enable splitting of wide types, as otherwise it is trivial
113	 to run out of registers.  Indeed, this works so well that register
114	 allocation problems are now more common *without* optimization,
115	 when this flag is not enabled by default.  */
116      flag_split_wide_types = 1;
117    }
118
119  if (mn10300_tune_string)
120    {
121      if (strcasecmp (mn10300_tune_string, "mn10300") == 0)
122	mn10300_tune_cpu = PROCESSOR_MN10300;
123      else if (strcasecmp (mn10300_tune_string, "am33") == 0)
124	mn10300_tune_cpu = PROCESSOR_AM33;
125      else if (strcasecmp (mn10300_tune_string, "am33-2") == 0)
126	mn10300_tune_cpu = PROCESSOR_AM33_2;
127      else if (strcasecmp (mn10300_tune_string, "am34") == 0)
128	mn10300_tune_cpu = PROCESSOR_AM34;
129      else
130	error ("-mtune= expects mn10300, am33, am33-2, or am34");
131    }
132}
133
134static void
135mn10300_file_start (void)
136{
137  default_file_start ();
138
139  if (TARGET_AM33_2)
140    fprintf (asm_out_file, "\t.am33_2\n");
141  else if (TARGET_AM33)
142    fprintf (asm_out_file, "\t.am33\n");
143}
144
145/* Note: This list must match the liw_op attribute in mn10300.md.  */
146
147static const char *liw_op_names[] =
148{
149  "add", "cmp", "sub", "mov",
150  "and", "or", "xor",
151  "asr", "lsr", "asl",
152  "none", "max"
153};
154
155/* Print operand X using operand code CODE to assembly language output file
156   FILE.  */
157
158void
159mn10300_print_operand (FILE *file, rtx x, int code)
160{
161  switch (code)
162    {
163    case 'W':
164      {
165	unsigned int liw_op = UINTVAL (x);
166
167	gcc_assert (TARGET_ALLOW_LIW);
168	gcc_assert (liw_op < LIW_OP_MAX);
169	fputs (liw_op_names[liw_op], file);
170	break;
171      }
172
173    case 'b':
174    case 'B':
175      {
176	enum rtx_code cmp = GET_CODE (x);
177	machine_mode mode = GET_MODE (XEXP (x, 0));
178	const char *str;
179	int have_flags;
180
181	if (code == 'B')
182	  cmp = reverse_condition (cmp);
183	have_flags = cc_flags_for_mode (mode);
184
185	switch (cmp)
186	  {
187	  case NE:
188	    str = "ne";
189	    break;
190	  case EQ:
191	    str = "eq";
192	    break;
193	  case GE:
194	    /* bge is smaller than bnc.  */
195	    str = (have_flags & CC_FLAG_V ? "ge" : "nc");
196	    break;
197	  case LT:
198	    str = (have_flags & CC_FLAG_V ? "lt" : "ns");
199	    break;
200	  case GT:
201	    str = "gt";
202	    break;
203	  case LE:
204	    str = "le";
205	    break;
206	  case GEU:
207	    str = "cc";
208	    break;
209	  case GTU:
210	    str = "hi";
211	    break;
212	  case LEU:
213	    str = "ls";
214	    break;
215	  case LTU:
216	    str = "cs";
217	    break;
218	  case ORDERED:
219	    str = "lge";
220	    break;
221	  case UNORDERED:
222	    str = "uo";
223	    break;
224	  case LTGT:
225	    str = "lg";
226	    break;
227	  case UNEQ:
228	    str = "ue";
229	    break;
230	  case UNGE:
231	    str = "uge";
232	    break;
233	  case UNGT:
234	    str = "ug";
235	    break;
236	  case UNLE:
237	    str = "ule";
238	    break;
239	  case UNLT:
240	    str = "ul";
241	    break;
242	  default:
243	    gcc_unreachable ();
244	  }
245
246	gcc_checking_assert ((cc_flags_for_code (cmp) & ~have_flags) == 0);
247	fputs (str, file);
248      }
249      break;
250
251    case 'C':
252      /* This is used for the operand to a call instruction;
253	 if it's a REG, enclose it in parens, else output
254	 the operand normally.  */
255      if (REG_P (x))
256	{
257	  fputc ('(', file);
258	  mn10300_print_operand (file, x, 0);
259	  fputc (')', file);
260	}
261      else
262	mn10300_print_operand (file, x, 0);
263      break;
264
265    case 'D':
266      switch (GET_CODE (x))
267	{
268	case MEM:
269	  fputc ('(', file);
270	  output_address (XEXP (x, 0));
271	  fputc (')', file);
272	  break;
273
274	case REG:
275	  fprintf (file, "fd%d", REGNO (x) - 18);
276	  break;
277
278	default:
279	  gcc_unreachable ();
280	}
281      break;
282
283      /* These are the least significant word in a 64bit value.  */
284    case 'L':
285      switch (GET_CODE (x))
286	{
287	case MEM:
288	  fputc ('(', file);
289	  output_address (XEXP (x, 0));
290	  fputc (')', file);
291	  break;
292
293	case REG:
294	  fprintf (file, "%s", reg_names[REGNO (x)]);
295	  break;
296
297	case SUBREG:
298	  fprintf (file, "%s", reg_names[subreg_regno (x)]);
299	  break;
300
301	case CONST_DOUBLE:
302	  {
303	    long val[2];
304	    REAL_VALUE_TYPE rv;
305
306	    switch (GET_MODE (x))
307	      {
308	      case DFmode:
309		REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
310		REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
311		fprintf (file, "0x%lx", val[0]);
312		break;;
313	      case SFmode:
314		REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
315		REAL_VALUE_TO_TARGET_SINGLE (rv, val[0]);
316		fprintf (file, "0x%lx", val[0]);
317		break;;
318	      case VOIDmode:
319	      case DImode:
320		mn10300_print_operand_address (file,
321					       GEN_INT (CONST_DOUBLE_LOW (x)));
322		break;
323	      default:
324		break;
325	      }
326	    break;
327	  }
328
329	case CONST_INT:
330	  {
331	    rtx low, high;
332	    split_double (x, &low, &high);
333	    fprintf (file, "%ld", (long)INTVAL (low));
334	    break;
335	    }
336
337	default:
338	  gcc_unreachable ();
339	}
340      break;
341
342      /* Similarly, but for the most significant word.  */
343    case 'H':
344      switch (GET_CODE (x))
345	{
346	case MEM:
347	  fputc ('(', file);
348	  x = adjust_address (x, SImode, 4);
349	  output_address (XEXP (x, 0));
350	  fputc (')', file);
351	  break;
352
353	case REG:
354	  fprintf (file, "%s", reg_names[REGNO (x) + 1]);
355	  break;
356
357	case SUBREG:
358	  fprintf (file, "%s", reg_names[subreg_regno (x) + 1]);
359	  break;
360
361	case CONST_DOUBLE:
362	  {
363	    long val[2];
364	    REAL_VALUE_TYPE rv;
365
366	    switch (GET_MODE (x))
367	      {
368	      case DFmode:
369		REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
370		REAL_VALUE_TO_TARGET_DOUBLE (rv, val);
371		fprintf (file, "0x%lx", val[1]);
372		break;;
373	      case SFmode:
374		gcc_unreachable ();
375	      case VOIDmode:
376	      case DImode:
377		mn10300_print_operand_address (file,
378					       GEN_INT (CONST_DOUBLE_HIGH (x)));
379		break;
380	      default:
381		break;
382	      }
383	    break;
384	  }
385
386	case CONST_INT:
387	  {
388	    rtx low, high;
389	    split_double (x, &low, &high);
390	    fprintf (file, "%ld", (long)INTVAL (high));
391	    break;
392	  }
393
394	default:
395	  gcc_unreachable ();
396	}
397      break;
398
399    case 'A':
400      fputc ('(', file);
401      if (REG_P (XEXP (x, 0)))
402	output_address (gen_rtx_PLUS (SImode, XEXP (x, 0), const0_rtx));
403      else
404	output_address (XEXP (x, 0));
405      fputc (')', file);
406      break;
407
408    case 'N':
409      gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
410      fprintf (file, "%d", (int)((~INTVAL (x)) & 0xff));
411      break;
412
413    case 'U':
414      gcc_assert (INTVAL (x) >= -128 && INTVAL (x) <= 255);
415      fprintf (file, "%d", (int)(INTVAL (x) & 0xff));
416      break;
417
418      /* For shift counts.  The hardware ignores the upper bits of
419	 any immediate, but the assembler will flag an out of range
420	 shift count as an error.  So we mask off the high bits
421	 of the immediate here.  */
422    case 'S':
423      if (CONST_INT_P (x))
424	{
425	  fprintf (file, "%d", (int)(INTVAL (x) & 0x1f));
426	  break;
427	}
428      /* FALL THROUGH */
429
430    default:
431      switch (GET_CODE (x))
432	{
433	case MEM:
434	  fputc ('(', file);
435	  output_address (XEXP (x, 0));
436	  fputc (')', file);
437	  break;
438
439	case PLUS:
440	  output_address (x);
441	  break;
442
443	case REG:
444	  fprintf (file, "%s", reg_names[REGNO (x)]);
445	  break;
446
447	case SUBREG:
448	  fprintf (file, "%s", reg_names[subreg_regno (x)]);
449	  break;
450
451	  /* This will only be single precision....  */
452	case CONST_DOUBLE:
453	  {
454	    unsigned long val;
455	    REAL_VALUE_TYPE rv;
456
457	    REAL_VALUE_FROM_CONST_DOUBLE (rv, x);
458	    REAL_VALUE_TO_TARGET_SINGLE (rv, val);
459	    fprintf (file, "0x%lx", val);
460	    break;
461	  }
462
463	case CONST_INT:
464	case SYMBOL_REF:
465	case CONST:
466	case LABEL_REF:
467	case CODE_LABEL:
468	case UNSPEC:
469	  mn10300_print_operand_address (file, x);
470	  break;
471	default:
472	  gcc_unreachable ();
473	}
474      break;
475    }
476}
477
478/* Output assembly language output for the address ADDR to FILE.  */
479
480void
481mn10300_print_operand_address (FILE *file, rtx addr)
482{
483  switch (GET_CODE (addr))
484    {
485    case POST_INC:
486      mn10300_print_operand (file, XEXP (addr, 0), 0);
487      fputc ('+', file);
488      break;
489
490    case POST_MODIFY:
491      mn10300_print_operand (file, XEXP (addr, 0), 0);
492      fputc ('+', file);
493      fputc (',', file);
494      mn10300_print_operand (file, XEXP (addr, 1), 0);
495      break;
496
497    case REG:
498      mn10300_print_operand (file, addr, 0);
499      break;
500    case PLUS:
501      {
502	rtx base = XEXP (addr, 0);
503	rtx index = XEXP (addr, 1);
504
505	if (REG_P (index) && !REG_OK_FOR_INDEX_P (index))
506	  {
507	    rtx x = base;
508	    base = index;
509	    index = x;
510
511	    gcc_assert (REG_P (index) && REG_OK_FOR_INDEX_P (index));
512	  }
513	gcc_assert (REG_OK_FOR_BASE_P (base));
514
515	mn10300_print_operand (file, index, 0);
516	fputc (',', file);
517	mn10300_print_operand (file, base, 0);
518	break;
519      }
520    case SYMBOL_REF:
521      output_addr_const (file, addr);
522      break;
523    default:
524      output_addr_const (file, addr);
525      break;
526    }
527}
528
529/* Implement TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA.
530
531   Used for PIC-specific UNSPECs.  */
532
533static bool
534mn10300_asm_output_addr_const_extra (FILE *file, rtx x)
535{
536  if (GET_CODE (x) == UNSPEC)
537    {
538      switch (XINT (x, 1))
539	{
540	case UNSPEC_PIC:
541	  /* GLOBAL_OFFSET_TABLE or local symbols, no suffix.  */
542	  output_addr_const (file, XVECEXP (x, 0, 0));
543	  break;
544	case UNSPEC_GOT:
545	  output_addr_const (file, XVECEXP (x, 0, 0));
546	  fputs ("@GOT", file);
547	  break;
548	case UNSPEC_GOTOFF:
549	  output_addr_const (file, XVECEXP (x, 0, 0));
550	  fputs ("@GOTOFF", file);
551	  break;
552	case UNSPEC_PLT:
553	  output_addr_const (file, XVECEXP (x, 0, 0));
554	  fputs ("@PLT", file);
555	  break;
556	case UNSPEC_GOTSYM_OFF:
557	  assemble_name (file, GOT_SYMBOL_NAME);
558	  fputs ("-(", file);
559	  output_addr_const (file, XVECEXP (x, 0, 0));
560	  fputs ("-.)", file);
561	  break;
562	default:
563	  return false;
564	}
565      return true;
566    }
567  else
568    return false;
569}
570
571/* Count the number of FP registers that have to be saved.  */
572static int
573fp_regs_to_save (void)
574{
575  int i, n = 0;
576
577  if (! TARGET_AM33_2)
578    return 0;
579
580  for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
581    if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
582      ++n;
583
584  return n;
585}
586
587/* Print a set of registers in the format required by "movm" and "ret".
588   Register K is saved if bit K of MASK is set.  The data and address
589   registers can be stored individually, but the extended registers cannot.
590   We assume that the mask already takes that into account.  For instance,
591   bits 14 to 17 must have the same value.  */
592
593void
594mn10300_print_reg_list (FILE *file, int mask)
595{
596  int need_comma;
597  int i;
598
599  need_comma = 0;
600  fputc ('[', file);
601
602  for (i = 0; i < FIRST_EXTENDED_REGNUM; i++)
603    if ((mask & (1 << i)) != 0)
604      {
605	if (need_comma)
606	  fputc (',', file);
607	fputs (reg_names [i], file);
608	need_comma = 1;
609      }
610
611  if ((mask & 0x3c000) != 0)
612    {
613      gcc_assert ((mask & 0x3c000) == 0x3c000);
614      if (need_comma)
615	fputc (',', file);
616      fputs ("exreg1", file);
617      need_comma = 1;
618    }
619
620  fputc (']', file);
621}
622
623/* If the MDR register is never clobbered, we can use the RETF instruction
624   which takes the address from the MDR register.  This is 3 cycles faster
625   than having to load the address from the stack.  */
626
627bool
628mn10300_can_use_retf_insn (void)
629{
630  /* Don't bother if we're not optimizing.  In this case we won't
631     have proper access to df_regs_ever_live_p.  */
632  if (!optimize)
633    return false;
634
635  /* EH returns alter the saved return address; MDR is not current.  */
636  if (crtl->calls_eh_return)
637    return false;
638
639  /* Obviously not if MDR is ever clobbered.  */
640  if (df_regs_ever_live_p (MDR_REG))
641    return false;
642
643  /* ??? Careful not to use this during expand_epilogue etc.  */
644  gcc_assert (!in_sequence_p ());
645  return leaf_function_p ();
646}
647
648bool
649mn10300_can_use_rets_insn (void)
650{
651  return !mn10300_initial_offset (ARG_POINTER_REGNUM, STACK_POINTER_REGNUM);
652}
653
654/* Returns the set of live, callee-saved registers as a bitmask.  The
655   callee-saved extended registers cannot be stored individually, so
656   all of them will be included in the mask if any one of them is used.
657   Also returns the number of bytes in the registers in the mask if
658   BYTES_SAVED is not NULL.  */
659
660unsigned int
661mn10300_get_live_callee_saved_regs (unsigned int * bytes_saved)
662{
663  int mask;
664  int i;
665  unsigned int count;
666
667  count = mask = 0;
668  for (i = 0; i <= LAST_EXTENDED_REGNUM; i++)
669    if (df_regs_ever_live_p (i) && ! call_really_used_regs[i])
670      {
671	mask |= (1 << i);
672	++ count;
673      }
674
675  if ((mask & 0x3c000) != 0)
676    {
677      for (i = 0x04000; i < 0x40000; i <<= 1)
678	if ((mask & i) == 0)
679	  ++ count;
680
681      mask |= 0x3c000;
682    }
683
684  if (bytes_saved)
685    * bytes_saved = count * UNITS_PER_WORD;
686
687  return mask;
688}
689
690static rtx
691F (rtx r)
692{
693  RTX_FRAME_RELATED_P (r) = 1;
694  return r;
695}
696
697/* Generate an instruction that pushes several registers onto the stack.
698   Register K will be saved if bit K in MASK is set.  The function does
699   nothing if MASK is zero.
700
701   To be compatible with the "movm" instruction, the lowest-numbered
702   register must be stored in the lowest slot.  If MASK is the set
703   { R1,...,RN }, where R1...RN are ordered least first, the generated
704   instruction will have the form:
705
706       (parallel
707         (set (reg:SI 9) (plus:SI (reg:SI 9) (const_int -N*4)))
708	 (set (mem:SI (plus:SI (reg:SI 9)
709	                       (const_int -1*4)))
710	      (reg:SI RN))
711	 ...
712	 (set (mem:SI (plus:SI (reg:SI 9)
713	                       (const_int -N*4)))
714	      (reg:SI R1))) */
715
716static void
717mn10300_gen_multiple_store (unsigned int mask)
718{
719  /* The order in which registers are stored, from SP-4 through SP-N*4.  */
720  static const unsigned int store_order[8] = {
721    /* e2, e3: never saved */
722    FIRST_EXTENDED_REGNUM + 4,
723    FIRST_EXTENDED_REGNUM + 5,
724    FIRST_EXTENDED_REGNUM + 6,
725    FIRST_EXTENDED_REGNUM + 7,
726    /* e0, e1, mdrq, mcrh, mcrl, mcvf: never saved. */
727    FIRST_DATA_REGNUM + 2,
728    FIRST_DATA_REGNUM + 3,
729    FIRST_ADDRESS_REGNUM + 2,
730    FIRST_ADDRESS_REGNUM + 3,
731    /* d0, d1, a0, a1, mdr, lir, lar: never saved.  */
732  };
733
734  rtx x, elts[9];
735  unsigned int i;
736  int count;
737
738  if (mask == 0)
739    return;
740
741  for (i = count = 0; i < ARRAY_SIZE(store_order); ++i)
742    {
743      unsigned regno = store_order[i];
744
745      if (((mask >> regno) & 1) == 0)
746	continue;
747
748      ++count;
749      x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
750      x = gen_frame_mem (SImode, x);
751      x = gen_rtx_SET (VOIDmode, x, gen_rtx_REG (SImode, regno));
752      elts[count] = F(x);
753
754      /* Remove the register from the mask so that... */
755      mask &= ~(1u << regno);
756    }
757
758  /* ... we can make sure that we didn't try to use a register
759     not listed in the store order.  */
760  gcc_assert (mask == 0);
761
762  /* Create the instruction that updates the stack pointer.  */
763  x = plus_constant (Pmode, stack_pointer_rtx, count * -4);
764  x = gen_rtx_SET (VOIDmode, stack_pointer_rtx, x);
765  elts[0] = F(x);
766
767  /* We need one PARALLEL element to update the stack pointer and
768     an additional element for each register that is stored.  */
769  x = gen_rtx_PARALLEL (VOIDmode, gen_rtvec_v (count + 1, elts));
770  F (emit_insn (x));
771}
772
773static inline unsigned int
774popcount (unsigned int mask)
775{
776  unsigned int count = 0;
777
778  while (mask)
779    {
780      ++ count;
781      mask &= ~ (mask & - mask);
782    }
783  return count;
784}
785
786void
787mn10300_expand_prologue (void)
788{
789  HOST_WIDE_INT size = mn10300_frame_size ();
790  unsigned int mask;
791
792  mask = mn10300_get_live_callee_saved_regs (NULL);
793  /* If we use any of the callee-saved registers, save them now.  */
794  mn10300_gen_multiple_store (mask);
795
796  if (flag_stack_usage_info)
797    current_function_static_stack_size = size + popcount (mask) * 4;
798
799  if (TARGET_AM33_2 && fp_regs_to_save ())
800    {
801      int num_regs_to_save = fp_regs_to_save (), i;
802      HOST_WIDE_INT xsize;
803      enum
804      {
805	save_sp_merge,
806	save_sp_no_merge,
807	save_sp_partial_merge,
808	save_a0_merge,
809	save_a0_no_merge
810      } strategy;
811      unsigned int strategy_size = (unsigned)-1, this_strategy_size;
812      rtx reg;
813
814      if (flag_stack_usage_info)
815	current_function_static_stack_size += num_regs_to_save * 4;
816
817      /* We have several different strategies to save FP registers.
818	 We can store them using SP offsets, which is beneficial if
819	 there are just a few registers to save, or we can use `a0' in
820	 post-increment mode (`a0' is the only call-clobbered address
821	 register that is never used to pass information to a
822	 function).  Furthermore, if we don't need a frame pointer, we
823	 can merge the two SP adds into a single one, but this isn't
824	 always beneficial; sometimes we can just split the two adds
825	 so that we don't exceed a 16-bit constant size.  The code
826	 below will select which strategy to use, so as to generate
827	 smallest code.  Ties are broken in favor or shorter sequences
828	 (in terms of number of instructions).  */
829
830#define SIZE_ADD_AX(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
831			: (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 2)
832#define SIZE_ADD_SP(S) ((((S) >= (1 << 15)) || ((S) < -(1 << 15))) ? 6 \
833			: (((S) >= (1 << 7)) || ((S) < -(1 << 7))) ? 4 : 3)
834
835/* We add 0 * (S) in two places to promote to the type of S,
836   so that all arms of the conditional have the same type.  */
837#define SIZE_FMOV_LIMIT(S,N,L,SIZE1,SIZE2,ELSE) \
838  (((S) >= (L)) ? 0 * (S) + (SIZE1) * (N) \
839   : ((S) + 4 * (N) >= (L)) ? (((L) - (S)) / 4 * (SIZE2) \
840			       + ((S) + 4 * (N) - (L)) / 4 * (SIZE1)) \
841   : 0 * (S) + (ELSE))
842#define SIZE_FMOV_SP_(S,N) \
843  (SIZE_FMOV_LIMIT ((S), (N), (1 << 24), 7, 6, \
844                   SIZE_FMOV_LIMIT ((S), (N), (1 << 8), 6, 4, \
845				    (S) ? 4 * (N) : 3 + 4 * ((N) - 1))))
846#define SIZE_FMOV_SP(S,N) (SIZE_FMOV_SP_ ((unsigned HOST_WIDE_INT)(S), (N)))
847
848      /* Consider alternative save_sp_merge only if we don't need the
849	 frame pointer and size is nonzero.  */
850      if (! frame_pointer_needed && size)
851	{
852	  /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
853	  this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
854	  /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
855	  this_strategy_size += SIZE_FMOV_SP (size, num_regs_to_save);
856
857	  if (this_strategy_size < strategy_size)
858	    {
859	      strategy = save_sp_merge;
860	      strategy_size = this_strategy_size;
861	    }
862	}
863
864      /* Consider alternative save_sp_no_merge unconditionally.  */
865      /* Insn: add -4 * num_regs_to_save, sp.  */
866      this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
867      /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
868      this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
869      if (size)
870	{
871	  /* Insn: add -size, sp.  */
872	  this_strategy_size += SIZE_ADD_SP (-size);
873	}
874
875      if (this_strategy_size < strategy_size)
876	{
877	  strategy = save_sp_no_merge;
878	  strategy_size = this_strategy_size;
879	}
880
881      /* Consider alternative save_sp_partial_merge only if we don't
882	 need a frame pointer and size is reasonably large.  */
883      if (! frame_pointer_needed && size + 4 * num_regs_to_save > 128)
884	{
885	  /* Insn: add -128, sp.  */
886	  this_strategy_size = SIZE_ADD_SP (-128);
887	  /* Insn: fmov fs#, (##, sp), for each fs# to be saved.  */
888	  this_strategy_size += SIZE_FMOV_SP (128 - 4 * num_regs_to_save,
889					      num_regs_to_save);
890	  if (size)
891	    {
892	      /* Insn: add 128-size, sp.  */
893	      this_strategy_size += SIZE_ADD_SP (128 - size);
894	    }
895
896	  if (this_strategy_size < strategy_size)
897	    {
898	      strategy = save_sp_partial_merge;
899	      strategy_size = this_strategy_size;
900	    }
901	}
902
903      /* Consider alternative save_a0_merge only if we don't need a
904	 frame pointer, size is nonzero and the user hasn't
905	 changed the calling conventions of a0.  */
906      if (! frame_pointer_needed && size
907	  && call_really_used_regs [FIRST_ADDRESS_REGNUM]
908	  && ! fixed_regs[FIRST_ADDRESS_REGNUM])
909	{
910	  /* Insn: add -(size + 4 * num_regs_to_save), sp.  */
911	  this_strategy_size = SIZE_ADD_SP (-(size + 4 * num_regs_to_save));
912	  /* Insn: mov sp, a0.  */
913	  this_strategy_size++;
914	  if (size)
915	    {
916	      /* Insn: add size, a0.  */
917	      this_strategy_size += SIZE_ADD_AX (size);
918	    }
919	  /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
920	  this_strategy_size += 3 * num_regs_to_save;
921
922	  if (this_strategy_size < strategy_size)
923	    {
924	      strategy = save_a0_merge;
925	      strategy_size = this_strategy_size;
926	    }
927	}
928
929      /* Consider alternative save_a0_no_merge if the user hasn't
930	 changed the calling conventions of a0.  */
931      if (call_really_used_regs [FIRST_ADDRESS_REGNUM]
932	  && ! fixed_regs[FIRST_ADDRESS_REGNUM])
933	{
934	  /* Insn: add -4 * num_regs_to_save, sp.  */
935	  this_strategy_size = SIZE_ADD_SP (-4 * num_regs_to_save);
936	  /* Insn: mov sp, a0.  */
937	  this_strategy_size++;
938	  /* Insn: fmov fs#, (a0+), for each fs# to be saved.  */
939	  this_strategy_size += 3 * num_regs_to_save;
940	  if (size)
941	    {
942	      /* Insn: add -size, sp.  */
943	      this_strategy_size += SIZE_ADD_SP (-size);
944	    }
945
946	  if (this_strategy_size < strategy_size)
947	    {
948	      strategy = save_a0_no_merge;
949	      strategy_size = this_strategy_size;
950	    }
951	}
952
953      /* Emit the initial SP add, common to all strategies.  */
954      switch (strategy)
955	{
956	case save_sp_no_merge:
957	case save_a0_no_merge:
958	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
959				    stack_pointer_rtx,
960				    GEN_INT (-4 * num_regs_to_save))));
961	  xsize = 0;
962	  break;
963
964	case save_sp_partial_merge:
965	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
966				    stack_pointer_rtx,
967				    GEN_INT (-128))));
968	  xsize = 128 - 4 * num_regs_to_save;
969	  size -= xsize;
970	  break;
971
972	case save_sp_merge:
973	case save_a0_merge:
974	  F (emit_insn (gen_addsi3 (stack_pointer_rtx,
975				    stack_pointer_rtx,
976				    GEN_INT (-(size + 4 * num_regs_to_save)))));
977	  /* We'll have to adjust FP register saves according to the
978	     frame size.  */
979	  xsize = size;
980	  /* Since we've already created the stack frame, don't do it
981	     again at the end of the function.  */
982	  size = 0;
983	  break;
984
985	default:
986	  gcc_unreachable ();
987	}
988
989      /* Now prepare register a0, if we have decided to use it.  */
990      switch (strategy)
991	{
992	case save_sp_merge:
993	case save_sp_no_merge:
994	case save_sp_partial_merge:
995	  reg = 0;
996	  break;
997
998	case save_a0_merge:
999	case save_a0_no_merge:
1000	  reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM);
1001	  F (emit_insn (gen_movsi (reg, stack_pointer_rtx)));
1002	  if (xsize)
1003	    F (emit_insn (gen_addsi3 (reg, reg, GEN_INT (xsize))));
1004	  reg = gen_rtx_POST_INC (SImode, reg);
1005	  break;
1006
1007	default:
1008	  gcc_unreachable ();
1009	}
1010
1011      /* Now actually save the FP registers.  */
1012      for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1013	if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1014	  {
1015	    rtx addr;
1016
1017	    if (reg)
1018	      addr = reg;
1019	    else
1020	      {
1021		/* If we aren't using `a0', use an SP offset.  */
1022		if (xsize)
1023		  {
1024		    addr = gen_rtx_PLUS (SImode,
1025					 stack_pointer_rtx,
1026					 GEN_INT (xsize));
1027		  }
1028		else
1029		  addr = stack_pointer_rtx;
1030
1031		xsize += 4;
1032	      }
1033
1034	    F (emit_insn (gen_movsf (gen_rtx_MEM (SFmode, addr),
1035				     gen_rtx_REG (SFmode, i))));
1036	  }
1037    }
1038
1039  /* Now put the frame pointer into the frame pointer register.  */
1040  if (frame_pointer_needed)
1041    F (emit_move_insn (frame_pointer_rtx, stack_pointer_rtx));
1042
1043  /* Allocate stack for this frame.  */
1044  if (size)
1045    F (emit_insn (gen_addsi3 (stack_pointer_rtx,
1046			      stack_pointer_rtx,
1047			      GEN_INT (-size))));
1048
1049  if (flag_pic && df_regs_ever_live_p (PIC_OFFSET_TABLE_REGNUM))
1050    emit_insn (gen_load_pic ());
1051}
1052
1053void
1054mn10300_expand_epilogue (void)
1055{
1056  HOST_WIDE_INT size = mn10300_frame_size ();
1057  unsigned int reg_save_bytes;
1058
1059  mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1060
1061  if (TARGET_AM33_2 && fp_regs_to_save ())
1062    {
1063      int num_regs_to_save = fp_regs_to_save (), i;
1064      rtx reg = 0;
1065
1066      /* We have several options to restore FP registers.  We could
1067	 load them from SP offsets, but, if there are enough FP
1068	 registers to restore, we win if we use a post-increment
1069	 addressing mode.  */
1070
1071      /* If we have a frame pointer, it's the best option, because we
1072	 already know it has the value we want.  */
1073      if (frame_pointer_needed)
1074	reg = gen_rtx_REG (SImode, FRAME_POINTER_REGNUM);
1075      /* Otherwise, we may use `a1', since it's call-clobbered and
1076	 it's never used for return values.  But only do so if it's
1077	 smaller than using SP offsets.  */
1078      else
1079	{
1080	  enum { restore_sp_post_adjust,
1081		 restore_sp_pre_adjust,
1082		 restore_sp_partial_adjust,
1083		 restore_a1 } strategy;
1084	  unsigned int this_strategy_size, strategy_size = (unsigned)-1;
1085
1086	  /* Consider using sp offsets before adjusting sp.  */
1087	  /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1088	  this_strategy_size = SIZE_FMOV_SP (size, num_regs_to_save);
1089	  /* If size is too large, we'll have to adjust SP with an
1090		 add.  */
1091	  if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1092	    {
1093	      /* Insn: add size + 4 * num_regs_to_save, sp.  */
1094	      this_strategy_size += SIZE_ADD_SP (size + 4 * num_regs_to_save);
1095	    }
1096	  /* If we don't have to restore any non-FP registers,
1097		 we'll be able to save one byte by using rets.  */
1098	  if (! reg_save_bytes)
1099	    this_strategy_size--;
1100
1101	  if (this_strategy_size < strategy_size)
1102	    {
1103	      strategy = restore_sp_post_adjust;
1104	      strategy_size = this_strategy_size;
1105	    }
1106
1107	  /* Consider using sp offsets after adjusting sp.  */
1108	  /* Insn: add size, sp.  */
1109	  this_strategy_size = SIZE_ADD_SP (size);
1110	  /* Insn: fmov (##,sp),fs#, for each fs# to be restored.  */
1111	  this_strategy_size += SIZE_FMOV_SP (0, num_regs_to_save);
1112	  /* We're going to use ret to release the FP registers
1113		 save area, so, no savings.  */
1114
1115	  if (this_strategy_size < strategy_size)
1116	    {
1117	      strategy = restore_sp_pre_adjust;
1118	      strategy_size = this_strategy_size;
1119	    }
1120
1121	  /* Consider using sp offsets after partially adjusting sp.
1122	     When size is close to 32Kb, we may be able to adjust SP
1123	     with an imm16 add instruction while still using fmov
1124	     (d8,sp).  */
1125	  if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1126	    {
1127	      /* Insn: add size + 4 * num_regs_to_save
1128				+ reg_save_bytes - 252,sp.  */
1129	      this_strategy_size = SIZE_ADD_SP (size + 4 * num_regs_to_save
1130						+ (int) reg_save_bytes - 252);
1131	      /* Insn: fmov (##,sp),fs#, fo each fs# to be restored.  */
1132	      this_strategy_size += SIZE_FMOV_SP (252 - reg_save_bytes
1133						  - 4 * num_regs_to_save,
1134						  num_regs_to_save);
1135	      /* We're going to use ret to release the FP registers
1136		 save area, so, no savings.  */
1137
1138	      if (this_strategy_size < strategy_size)
1139		{
1140		  strategy = restore_sp_partial_adjust;
1141		  strategy_size = this_strategy_size;
1142		}
1143	    }
1144
1145	  /* Consider using a1 in post-increment mode, as long as the
1146	     user hasn't changed the calling conventions of a1.  */
1147	  if (call_really_used_regs [FIRST_ADDRESS_REGNUM + 1]
1148	      && ! fixed_regs[FIRST_ADDRESS_REGNUM+1])
1149	    {
1150	      /* Insn: mov sp,a1.  */
1151	      this_strategy_size = 1;
1152	      if (size)
1153		{
1154		  /* Insn: add size,a1.  */
1155		  this_strategy_size += SIZE_ADD_AX (size);
1156		}
1157	      /* Insn: fmov (a1+),fs#, for each fs# to be restored.  */
1158	      this_strategy_size += 3 * num_regs_to_save;
1159	      /* If size is large enough, we may be able to save a
1160		 couple of bytes.  */
1161	      if (size + 4 * num_regs_to_save + reg_save_bytes > 255)
1162		{
1163		  /* Insn: mov a1,sp.  */
1164		  this_strategy_size += 2;
1165		}
1166	      /* If we don't have to restore any non-FP registers,
1167		 we'll be able to save one byte by using rets.  */
1168	      if (! reg_save_bytes)
1169		this_strategy_size--;
1170
1171	      if (this_strategy_size < strategy_size)
1172		{
1173		  strategy = restore_a1;
1174		  strategy_size = this_strategy_size;
1175		}
1176	    }
1177
1178	  switch (strategy)
1179	    {
1180	    case restore_sp_post_adjust:
1181	      break;
1182
1183	    case restore_sp_pre_adjust:
1184	      emit_insn (gen_addsi3 (stack_pointer_rtx,
1185				     stack_pointer_rtx,
1186				     GEN_INT (size)));
1187	      size = 0;
1188	      break;
1189
1190	    case restore_sp_partial_adjust:
1191	      emit_insn (gen_addsi3 (stack_pointer_rtx,
1192				     stack_pointer_rtx,
1193				     GEN_INT (size + 4 * num_regs_to_save
1194					      + reg_save_bytes - 252)));
1195	      size = 252 - reg_save_bytes - 4 * num_regs_to_save;
1196	      break;
1197
1198	    case restore_a1:
1199	      reg = gen_rtx_REG (SImode, FIRST_ADDRESS_REGNUM + 1);
1200	      emit_insn (gen_movsi (reg, stack_pointer_rtx));
1201	      if (size)
1202		emit_insn (gen_addsi3 (reg, reg, GEN_INT (size)));
1203	      break;
1204
1205	    default:
1206	      gcc_unreachable ();
1207	    }
1208	}
1209
1210      /* Adjust the selected register, if any, for post-increment.  */
1211      if (reg)
1212	reg = gen_rtx_POST_INC (SImode, reg);
1213
1214      for (i = FIRST_FP_REGNUM; i <= LAST_FP_REGNUM; ++i)
1215	if (df_regs_ever_live_p (i) && ! call_really_used_regs [i])
1216	  {
1217	    rtx addr;
1218
1219	    if (reg)
1220	      addr = reg;
1221	    else if (size)
1222	      {
1223		/* If we aren't using a post-increment register, use an
1224		   SP offset.  */
1225		addr = gen_rtx_PLUS (SImode,
1226				     stack_pointer_rtx,
1227				     GEN_INT (size));
1228	      }
1229	    else
1230	      addr = stack_pointer_rtx;
1231
1232	    size += 4;
1233
1234	    emit_insn (gen_movsf (gen_rtx_REG (SFmode, i),
1235				  gen_rtx_MEM (SFmode, addr)));
1236	  }
1237
1238      /* If we were using the restore_a1 strategy and the number of
1239	 bytes to be released won't fit in the `ret' byte, copy `a1'
1240	 to `sp', to avoid having to use `add' to adjust it.  */
1241      if (! frame_pointer_needed && reg && size + reg_save_bytes > 255)
1242	{
1243	  emit_move_insn (stack_pointer_rtx, XEXP (reg, 0));
1244	  size = 0;
1245	}
1246    }
1247
1248  /* Maybe cut back the stack, except for the register save area.
1249
1250     If the frame pointer exists, then use the frame pointer to
1251     cut back the stack.
1252
1253     If the stack size + register save area is more than 255 bytes,
1254     then the stack must be cut back here since the size + register
1255     save size is too big for a ret/retf instruction.
1256
1257     Else leave it alone, it will be cut back as part of the
1258     ret/retf instruction, or there wasn't any stack to begin with.
1259
1260     Under no circumstances should the register save area be
1261     deallocated here, that would leave a window where an interrupt
1262     could occur and trash the register save area.  */
1263  if (frame_pointer_needed)
1264    {
1265      emit_move_insn (stack_pointer_rtx, frame_pointer_rtx);
1266      size = 0;
1267    }
1268  else if (size + reg_save_bytes > 255)
1269    {
1270      emit_insn (gen_addsi3 (stack_pointer_rtx,
1271			     stack_pointer_rtx,
1272			     GEN_INT (size)));
1273      size = 0;
1274    }
1275
1276  /* Adjust the stack and restore callee-saved registers, if any.  */
1277  if (mn10300_can_use_rets_insn ())
1278    emit_jump_insn (ret_rtx);
1279  else
1280    emit_jump_insn (gen_return_ret (GEN_INT (size + reg_save_bytes)));
1281}
1282
1283/* Recognize the PARALLEL rtx generated by mn10300_gen_multiple_store().
1284   This function is for MATCH_PARALLEL and so assumes OP is known to be
1285   parallel.  If OP is a multiple store, return a mask indicating which
1286   registers it saves.  Return 0 otherwise.  */
1287
1288unsigned int
1289mn10300_store_multiple_regs (rtx op)
1290{
1291  int count;
1292  int mask;
1293  int i;
1294  unsigned int last;
1295  rtx elt;
1296
1297  count = XVECLEN (op, 0);
1298  if (count < 2)
1299    return 0;
1300
1301  /* Check that first instruction has the form (set (sp) (plus A B)) */
1302  elt = XVECEXP (op, 0, 0);
1303  if (GET_CODE (elt) != SET
1304      || (! REG_P (SET_DEST (elt)))
1305      || REGNO (SET_DEST (elt)) != STACK_POINTER_REGNUM
1306      || GET_CODE (SET_SRC (elt)) != PLUS)
1307    return 0;
1308
1309  /* Check that A is the stack pointer and B is the expected stack size.
1310     For OP to match, each subsequent instruction should push a word onto
1311     the stack.  We therefore expect the first instruction to create
1312     COUNT-1 stack slots.  */
1313  elt = SET_SRC (elt);
1314  if ((! REG_P (XEXP (elt, 0)))
1315      || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1316      || (! CONST_INT_P (XEXP (elt, 1)))
1317      || INTVAL (XEXP (elt, 1)) != -(count - 1) * 4)
1318    return 0;
1319
1320  mask = 0;
1321  for (i = 1; i < count; i++)
1322    {
1323      /* Check that element i is a (set (mem M) R).  */
1324      /* ??? Validate the register order a-la mn10300_gen_multiple_store.
1325	 Remember: the ordering is *not* monotonic.  */
1326      elt = XVECEXP (op, 0, i);
1327      if (GET_CODE (elt) != SET
1328	  || (! MEM_P (SET_DEST (elt)))
1329	  || (! REG_P (SET_SRC (elt))))
1330	return 0;
1331
1332      /* Remember which registers are to be saved.  */
1333      last = REGNO (SET_SRC (elt));
1334      mask |= (1 << last);
1335
1336      /* Check that M has the form (plus (sp) (const_int -I*4)) */
1337      elt = XEXP (SET_DEST (elt), 0);
1338      if (GET_CODE (elt) != PLUS
1339	  || (! REG_P (XEXP (elt, 0)))
1340	  || REGNO (XEXP (elt, 0)) != STACK_POINTER_REGNUM
1341	  || (! CONST_INT_P (XEXP (elt, 1)))
1342	  || INTVAL (XEXP (elt, 1)) != -i * 4)
1343	return 0;
1344    }
1345
1346  /* All or none of the callee-saved extended registers must be in the set.  */
1347  if ((mask & 0x3c000) != 0
1348      && (mask & 0x3c000) != 0x3c000)
1349    return 0;
1350
1351  return mask;
1352}
1353
1354/* Implement TARGET_PREFERRED_RELOAD_CLASS.  */
1355
1356static reg_class_t
1357mn10300_preferred_reload_class (rtx x, reg_class_t rclass)
1358{
1359  if (x == stack_pointer_rtx && rclass != SP_REGS)
1360    return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1361  else if (MEM_P (x)
1362	   || (REG_P (x)
1363	       && !HARD_REGISTER_P (x))
1364	   || (GET_CODE (x) == SUBREG
1365	       && REG_P (SUBREG_REG (x))
1366	       && !HARD_REGISTER_P (SUBREG_REG (x))))
1367    return LIMIT_RELOAD_CLASS (GET_MODE (x), rclass);
1368  else
1369    return rclass;
1370}
1371
1372/* Implement TARGET_PREFERRED_OUTPUT_RELOAD_CLASS.  */
1373
1374static reg_class_t
1375mn10300_preferred_output_reload_class (rtx x, reg_class_t rclass)
1376{
1377  if (x == stack_pointer_rtx && rclass != SP_REGS)
1378    return (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
1379  return rclass;
1380}
1381
1382/* Implement TARGET_SECONDARY_RELOAD.  */
1383
1384static reg_class_t
1385mn10300_secondary_reload (bool in_p, rtx x, reg_class_t rclass_i,
1386			  machine_mode mode, secondary_reload_info *sri)
1387{
1388  enum reg_class rclass = (enum reg_class) rclass_i;
1389  enum reg_class xclass = NO_REGS;
1390  unsigned int xregno = INVALID_REGNUM;
1391
1392  if (REG_P (x))
1393    {
1394      xregno = REGNO (x);
1395      if (xregno >= FIRST_PSEUDO_REGISTER)
1396	xregno = true_regnum (x);
1397      if (xregno != INVALID_REGNUM)
1398	xclass = REGNO_REG_CLASS (xregno);
1399    }
1400
1401  if (!TARGET_AM33)
1402    {
1403      /* Memory load/stores less than a full word wide can't have an
1404         address or stack pointer destination.  They must use a data
1405         register as an intermediate register.  */
1406      if (rclass != DATA_REGS
1407	  && (mode == QImode || mode == HImode)
1408	  && xclass == NO_REGS)
1409	return DATA_REGS;
1410
1411      /* We can only move SP to/from an address register.  */
1412      if (in_p
1413	  && rclass == SP_REGS
1414	  && xclass != ADDRESS_REGS)
1415	return ADDRESS_REGS;
1416      if (!in_p
1417	  && xclass == SP_REGS
1418	  && rclass != ADDRESS_REGS
1419	  && rclass != SP_OR_ADDRESS_REGS)
1420	return ADDRESS_REGS;
1421    }
1422
1423  /* We can't directly load sp + const_int into a register;
1424     we must use an address register as an scratch.  */
1425  if (in_p
1426      && rclass != SP_REGS
1427      && rclass != SP_OR_ADDRESS_REGS
1428      && rclass != SP_OR_GENERAL_REGS
1429      && GET_CODE (x) == PLUS
1430      && (XEXP (x, 0) == stack_pointer_rtx
1431	  || XEXP (x, 1) == stack_pointer_rtx))
1432    {
1433      sri->icode = CODE_FOR_reload_plus_sp_const;
1434      return NO_REGS;
1435    }
1436
1437  /* We can only move MDR to/from a data register.  */
1438  if (rclass == MDR_REGS && xclass != DATA_REGS)
1439    return DATA_REGS;
1440  if (xclass == MDR_REGS && rclass != DATA_REGS)
1441    return DATA_REGS;
1442
1443  /* We can't load/store an FP register from a constant address.  */
1444  if (TARGET_AM33_2
1445      && (rclass == FP_REGS || xclass == FP_REGS)
1446      && (xclass == NO_REGS || rclass == NO_REGS))
1447    {
1448      rtx addr = NULL;
1449
1450      if (xregno >= FIRST_PSEUDO_REGISTER && xregno != INVALID_REGNUM)
1451	{
1452	  addr = reg_equiv_mem (xregno);
1453	  if (addr)
1454	    addr = XEXP (addr, 0);
1455	}
1456      else if (MEM_P (x))
1457	addr = XEXP (x, 0);
1458
1459      if (addr && CONSTANT_ADDRESS_P (addr))
1460	return GENERAL_REGS;
1461    }
1462  /* Otherwise assume no secondary reloads are needed.  */
1463  return NO_REGS;
1464}
1465
1466int
1467mn10300_frame_size (void)
1468{
1469  /* size includes the fixed stack space needed for function calls.  */
1470  int size = get_frame_size () + crtl->outgoing_args_size;
1471
1472  /* And space for the return pointer.  */
1473  size += crtl->outgoing_args_size ? 4 : 0;
1474
1475  return size;
1476}
1477
1478int
1479mn10300_initial_offset (int from, int to)
1480{
1481  int diff = 0;
1482
1483  gcc_assert (from == ARG_POINTER_REGNUM || from == FRAME_POINTER_REGNUM);
1484  gcc_assert (to == FRAME_POINTER_REGNUM || to == STACK_POINTER_REGNUM);
1485
1486  if (to == STACK_POINTER_REGNUM)
1487    diff = mn10300_frame_size ();
1488
1489  /* The difference between the argument pointer and the frame pointer
1490     is the size of the callee register save area.  */
1491  if (from == ARG_POINTER_REGNUM)
1492    {
1493      unsigned int reg_save_bytes;
1494
1495      mn10300_get_live_callee_saved_regs (& reg_save_bytes);
1496      diff += reg_save_bytes;
1497      diff += 4 * fp_regs_to_save ();
1498    }
1499
1500  return diff;
1501}
1502
1503/* Worker function for TARGET_RETURN_IN_MEMORY.  */
1504
1505static bool
1506mn10300_return_in_memory (const_tree type, const_tree fntype ATTRIBUTE_UNUSED)
1507{
1508  /* Return values > 8 bytes in length in memory.  */
1509  return (int_size_in_bytes (type) > 8
1510	  || int_size_in_bytes (type) == 0
1511	  || TYPE_MODE (type) == BLKmode);
1512}
1513
1514/* Flush the argument registers to the stack for a stdarg function;
1515   return the new argument pointer.  */
1516static rtx
1517mn10300_builtin_saveregs (void)
1518{
1519  rtx offset, mem;
1520  tree fntype = TREE_TYPE (current_function_decl);
1521  int argadj = ((!stdarg_p (fntype))
1522                ? UNITS_PER_WORD : 0);
1523  alias_set_type set = get_varargs_alias_set ();
1524
1525  if (argadj)
1526    offset = plus_constant (Pmode, crtl->args.arg_offset_rtx, argadj);
1527  else
1528    offset = crtl->args.arg_offset_rtx;
1529
1530  mem = gen_rtx_MEM (SImode, crtl->args.internal_arg_pointer);
1531  set_mem_alias_set (mem, set);
1532  emit_move_insn (mem, gen_rtx_REG (SImode, 0));
1533
1534  mem = gen_rtx_MEM (SImode,
1535		     plus_constant (Pmode,
1536				    crtl->args.internal_arg_pointer, 4));
1537  set_mem_alias_set (mem, set);
1538  emit_move_insn (mem, gen_rtx_REG (SImode, 1));
1539
1540  return copy_to_reg (expand_binop (Pmode, add_optab,
1541				    crtl->args.internal_arg_pointer,
1542				    offset, 0, 0, OPTAB_LIB_WIDEN));
1543}
1544
1545static void
1546mn10300_va_start (tree valist, rtx nextarg)
1547{
1548  nextarg = expand_builtin_saveregs ();
1549  std_expand_builtin_va_start (valist, nextarg);
1550}
1551
1552/* Return true when a parameter should be passed by reference.  */
1553
1554static bool
1555mn10300_pass_by_reference (cumulative_args_t cum ATTRIBUTE_UNUSED,
1556			   machine_mode mode, const_tree type,
1557			   bool named ATTRIBUTE_UNUSED)
1558{
1559  unsigned HOST_WIDE_INT size;
1560
1561  if (type)
1562    size = int_size_in_bytes (type);
1563  else
1564    size = GET_MODE_SIZE (mode);
1565
1566  return (size > 8 || size == 0);
1567}
1568
1569/* Return an RTX to represent where a value with mode MODE will be returned
1570   from a function.  If the result is NULL_RTX, the argument is pushed.  */
1571
1572static rtx
1573mn10300_function_arg (cumulative_args_t cum_v, machine_mode mode,
1574		      const_tree type, bool named ATTRIBUTE_UNUSED)
1575{
1576  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1577  rtx result = NULL_RTX;
1578  int size;
1579
1580  /* We only support using 2 data registers as argument registers.  */
1581  int nregs = 2;
1582
1583  /* Figure out the size of the object to be passed.  */
1584  if (mode == BLKmode)
1585    size = int_size_in_bytes (type);
1586  else
1587    size = GET_MODE_SIZE (mode);
1588
1589  cum->nbytes = (cum->nbytes + 3) & ~3;
1590
1591  /* Don't pass this arg via a register if all the argument registers
1592     are used up.  */
1593  if (cum->nbytes > nregs * UNITS_PER_WORD)
1594    return result;
1595
1596  /* Don't pass this arg via a register if it would be split between
1597     registers and memory.  */
1598  if (type == NULL_TREE
1599      && cum->nbytes + size > nregs * UNITS_PER_WORD)
1600    return result;
1601
1602  switch (cum->nbytes / UNITS_PER_WORD)
1603    {
1604    case 0:
1605      result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM);
1606      break;
1607    case 1:
1608      result = gen_rtx_REG (mode, FIRST_ARGUMENT_REGNUM + 1);
1609      break;
1610    default:
1611      break;
1612    }
1613
1614  return result;
1615}
1616
1617/* Update the data in CUM to advance over an argument
1618   of mode MODE and data type TYPE.
1619   (TYPE is null for libcalls where that information may not be available.)  */
1620
1621static void
1622mn10300_function_arg_advance (cumulative_args_t cum_v, machine_mode mode,
1623			      const_tree type, bool named ATTRIBUTE_UNUSED)
1624{
1625  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1626
1627  cum->nbytes += (mode != BLKmode
1628		  ? (GET_MODE_SIZE (mode) + 3) & ~3
1629		  : (int_size_in_bytes (type) + 3) & ~3);
1630}
1631
1632/* Return the number of bytes of registers to use for an argument passed
1633   partially in registers and partially in memory.  */
1634
1635static int
1636mn10300_arg_partial_bytes (cumulative_args_t cum_v, machine_mode mode,
1637			   tree type, bool named ATTRIBUTE_UNUSED)
1638{
1639  CUMULATIVE_ARGS *cum = get_cumulative_args (cum_v);
1640  int size;
1641
1642  /* We only support using 2 data registers as argument registers.  */
1643  int nregs = 2;
1644
1645  /* Figure out the size of the object to be passed.  */
1646  if (mode == BLKmode)
1647    size = int_size_in_bytes (type);
1648  else
1649    size = GET_MODE_SIZE (mode);
1650
1651  cum->nbytes = (cum->nbytes + 3) & ~3;
1652
1653  /* Don't pass this arg via a register if all the argument registers
1654     are used up.  */
1655  if (cum->nbytes > nregs * UNITS_PER_WORD)
1656    return 0;
1657
1658  if (cum->nbytes + size <= nregs * UNITS_PER_WORD)
1659    return 0;
1660
1661  /* Don't pass this arg via a register if it would be split between
1662     registers and memory.  */
1663  if (type == NULL_TREE
1664      && cum->nbytes + size > nregs * UNITS_PER_WORD)
1665    return 0;
1666
1667  return nregs * UNITS_PER_WORD - cum->nbytes;
1668}
1669
1670/* Return the location of the function's value.  This will be either
1671   $d0 for integer functions, $a0 for pointers, or a PARALLEL of both
1672   $d0 and $a0 if the -mreturn-pointer-on-do flag is set.  Note that
1673   we only return the PARALLEL for outgoing values; we do not want
1674   callers relying on this extra copy.  */
1675
1676static rtx
1677mn10300_function_value (const_tree valtype,
1678			const_tree fn_decl_or_type ATTRIBUTE_UNUSED,
1679			bool outgoing)
1680{
1681  rtx rv;
1682  machine_mode mode = TYPE_MODE (valtype);
1683
1684  if (! POINTER_TYPE_P (valtype))
1685    return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1686  else if (! TARGET_PTR_A0D0 || ! outgoing
1687	   || cfun->returns_struct)
1688    return gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM);
1689
1690  rv = gen_rtx_PARALLEL (mode, rtvec_alloc (2));
1691  XVECEXP (rv, 0, 0)
1692    = gen_rtx_EXPR_LIST (VOIDmode,
1693			 gen_rtx_REG (mode, FIRST_ADDRESS_REGNUM),
1694			 GEN_INT (0));
1695
1696  XVECEXP (rv, 0, 1)
1697    = gen_rtx_EXPR_LIST (VOIDmode,
1698			 gen_rtx_REG (mode, FIRST_DATA_REGNUM),
1699			 GEN_INT (0));
1700  return rv;
1701}
1702
1703/* Implements TARGET_LIBCALL_VALUE.  */
1704
1705static rtx
1706mn10300_libcall_value (machine_mode mode,
1707		       const_rtx fun ATTRIBUTE_UNUSED)
1708{
1709  return gen_rtx_REG (mode, FIRST_DATA_REGNUM);
1710}
1711
1712/* Implements FUNCTION_VALUE_REGNO_P.  */
1713
1714bool
1715mn10300_function_value_regno_p (const unsigned int regno)
1716{
1717 return (regno == FIRST_DATA_REGNUM || regno == FIRST_ADDRESS_REGNUM);
1718}
1719
1720/* Output an addition operation.  */
1721
1722const char *
1723mn10300_output_add (rtx operands[3], bool need_flags)
1724{
1725  rtx dest, src1, src2;
1726  unsigned int dest_regnum, src1_regnum, src2_regnum;
1727  enum reg_class src1_class, src2_class, dest_class;
1728
1729  dest = operands[0];
1730  src1 = operands[1];
1731  src2 = operands[2];
1732
1733  dest_regnum = true_regnum (dest);
1734  src1_regnum = true_regnum (src1);
1735
1736  dest_class = REGNO_REG_CLASS (dest_regnum);
1737  src1_class = REGNO_REG_CLASS (src1_regnum);
1738
1739  if (CONST_INT_P (src2))
1740    {
1741      gcc_assert (dest_regnum == src1_regnum);
1742
1743      if (src2 == const1_rtx && !need_flags)
1744	return "inc %0";
1745      if (INTVAL (src2) == 4 && !need_flags && dest_class != DATA_REGS)
1746        return "inc4 %0";
1747
1748      gcc_assert (!need_flags || dest_class != SP_REGS);
1749      return "add %2,%0";
1750    }
1751  else if (CONSTANT_P (src2))
1752    return "add %2,%0";
1753
1754  src2_regnum = true_regnum (src2);
1755  src2_class = REGNO_REG_CLASS (src2_regnum);
1756
1757  if (dest_regnum == src1_regnum)
1758    return "add %2,%0";
1759  if (dest_regnum == src2_regnum)
1760    return "add %1,%0";
1761
1762  /* The rest of the cases are reg = reg+reg.  For AM33, we can implement
1763     this directly, as below, but when optimizing for space we can sometimes
1764     do better by using a mov+add.  For MN103, we claimed that we could
1765     implement a three-operand add because the various move and add insns
1766     change sizes across register classes, and we can often do better than
1767     reload in choosing which operand to move.  */
1768  if (TARGET_AM33 && optimize_insn_for_speed_p ())
1769    return "add %2,%1,%0";
1770
1771  /* Catch cases where no extended register was used.  */
1772  if (src1_class != EXTENDED_REGS
1773      && src2_class != EXTENDED_REGS
1774      && dest_class != EXTENDED_REGS)
1775    {
1776      /* We have to copy one of the sources into the destination, then
1777         add the other source to the destination.
1778
1779         Carefully select which source to copy to the destination; a
1780         naive implementation will waste a byte when the source classes
1781         are different and the destination is an address register.
1782         Selecting the lowest cost register copy will optimize this
1783         sequence.  */
1784      if (src1_class == dest_class)
1785        return "mov %1,%0\n\tadd %2,%0";
1786      else
1787	return "mov %2,%0\n\tadd %1,%0";
1788    }
1789
1790  /* At least one register is an extended register.  */
1791
1792  /* The three operand add instruction on the am33 is a win iff the
1793     output register is an extended register, or if both source
1794     registers are extended registers.  */
1795  if (dest_class == EXTENDED_REGS || src1_class == src2_class)
1796    return "add %2,%1,%0";
1797
1798  /* It is better to copy one of the sources to the destination, then
1799     perform a 2 address add.  The destination in this case must be
1800     an address or data register and one of the sources must be an
1801     extended register and the remaining source must not be an extended
1802     register.
1803
1804     The best code for this case is to copy the extended reg to the
1805     destination, then emit a two address add.  */
1806  if (src1_class == EXTENDED_REGS)
1807    return "mov %1,%0\n\tadd %2,%0";
1808  else
1809    return "mov %2,%0\n\tadd %1,%0";
1810}
1811
1812/* Return 1 if X contains a symbolic expression.  We know these
1813   expressions will have one of a few well defined forms, so
1814   we need only check those forms.  */
1815
1816int
1817mn10300_symbolic_operand (rtx op,
1818			  machine_mode mode ATTRIBUTE_UNUSED)
1819{
1820  switch (GET_CODE (op))
1821    {
1822    case SYMBOL_REF:
1823    case LABEL_REF:
1824      return 1;
1825    case CONST:
1826      op = XEXP (op, 0);
1827      return ((GET_CODE (XEXP (op, 0)) == SYMBOL_REF
1828               || GET_CODE (XEXP (op, 0)) == LABEL_REF)
1829              && CONST_INT_P (XEXP (op, 1)));
1830    default:
1831      return 0;
1832    }
1833}
1834
1835/* Try machine dependent ways of modifying an illegitimate address
1836   to be legitimate.  If we find one, return the new valid address.
1837   This macro is used in only one place: `memory_address' in explow.c.
1838
1839   OLDX is the address as it was before break_out_memory_refs was called.
1840   In some cases it is useful to look at this to decide what needs to be done.
1841
1842   Normally it is always safe for this macro to do nothing.  It exists to
1843   recognize opportunities to optimize the output.
1844
1845   But on a few ports with segmented architectures and indexed addressing
1846   (mn10300, hppa) it is used to rewrite certain problematical addresses.  */
1847
1848static rtx
1849mn10300_legitimize_address (rtx x, rtx oldx ATTRIBUTE_UNUSED,
1850			    machine_mode mode ATTRIBUTE_UNUSED)
1851{
1852  if (flag_pic && ! mn10300_legitimate_pic_operand_p (x))
1853    x = mn10300_legitimize_pic_address (oldx, NULL_RTX);
1854
1855  /* Uh-oh.  We might have an address for x[n-100000].  This needs
1856     special handling to avoid creating an indexed memory address
1857     with x-100000 as the base.  */
1858  if (GET_CODE (x) == PLUS
1859      && mn10300_symbolic_operand (XEXP (x, 1), VOIDmode))
1860    {
1861      /* Ugly.  We modify things here so that the address offset specified
1862         by the index expression is computed first, then added to x to form
1863         the entire address.  */
1864
1865      rtx regx1, regy1, regy2, y;
1866
1867      /* Strip off any CONST.  */
1868      y = XEXP (x, 1);
1869      if (GET_CODE (y) == CONST)
1870        y = XEXP (y, 0);
1871
1872      if (GET_CODE (y) == PLUS || GET_CODE (y) == MINUS)
1873	{
1874	  regx1 = force_reg (Pmode, force_operand (XEXP (x, 0), 0));
1875	  regy1 = force_reg (Pmode, force_operand (XEXP (y, 0), 0));
1876	  regy2 = force_reg (Pmode, force_operand (XEXP (y, 1), 0));
1877	  regx1 = force_reg (Pmode,
1878			     gen_rtx_fmt_ee (GET_CODE (y), Pmode, regx1,
1879					     regy2));
1880	  return force_reg (Pmode, gen_rtx_PLUS (Pmode, regx1, regy1));
1881	}
1882    }
1883  return x;
1884}
1885
1886/* Convert a non-PIC address in `orig' to a PIC address using @GOT or
1887   @GOTOFF in `reg'.  */
1888
1889rtx
1890mn10300_legitimize_pic_address (rtx orig, rtx reg)
1891{
1892  rtx x;
1893
1894  if (GET_CODE (orig) == LABEL_REF
1895      || (GET_CODE (orig) == SYMBOL_REF
1896	  && (CONSTANT_POOL_ADDRESS_P (orig)
1897	      || ! MN10300_GLOBAL_P (orig))))
1898    {
1899      if (reg == NULL)
1900	reg = gen_reg_rtx (Pmode);
1901
1902      x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOTOFF);
1903      x = gen_rtx_CONST (SImode, x);
1904      emit_move_insn (reg, x);
1905
1906      x = emit_insn (gen_addsi3 (reg, reg, pic_offset_table_rtx));
1907    }
1908  else if (GET_CODE (orig) == SYMBOL_REF)
1909    {
1910      if (reg == NULL)
1911	reg = gen_reg_rtx (Pmode);
1912
1913      x = gen_rtx_UNSPEC (SImode, gen_rtvec (1, orig), UNSPEC_GOT);
1914      x = gen_rtx_CONST (SImode, x);
1915      x = gen_rtx_PLUS (SImode, pic_offset_table_rtx, x);
1916      x = gen_const_mem (SImode, x);
1917
1918      x = emit_move_insn (reg, x);
1919    }
1920  else
1921    return orig;
1922
1923  set_unique_reg_note (x, REG_EQUAL, orig);
1924  return reg;
1925}
1926
1927/* Return zero if X references a SYMBOL_REF or LABEL_REF whose symbol
1928   isn't protected by a PIC unspec; nonzero otherwise.  */
1929
1930int
1931mn10300_legitimate_pic_operand_p (rtx x)
1932{
1933  const char *fmt;
1934  int i;
1935
1936  if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1937    return 0;
1938
1939  if (GET_CODE (x) == UNSPEC
1940      && (XINT (x, 1) == UNSPEC_PIC
1941	  || XINT (x, 1) == UNSPEC_GOT
1942	  || XINT (x, 1) == UNSPEC_GOTOFF
1943	  || XINT (x, 1) == UNSPEC_PLT
1944	  || XINT (x, 1) == UNSPEC_GOTSYM_OFF))
1945      return 1;
1946
1947  fmt = GET_RTX_FORMAT (GET_CODE (x));
1948  for (i = GET_RTX_LENGTH (GET_CODE (x)) - 1; i >= 0; i--)
1949    {
1950      if (fmt[i] == 'E')
1951	{
1952	  int j;
1953
1954	  for (j = XVECLEN (x, i) - 1; j >= 0; j--)
1955	    if (! mn10300_legitimate_pic_operand_p (XVECEXP (x, i, j)))
1956	      return 0;
1957	}
1958      else if (fmt[i] == 'e'
1959	       && ! mn10300_legitimate_pic_operand_p (XEXP (x, i)))
1960	return 0;
1961    }
1962
1963  return 1;
1964}
1965
1966/* Return TRUE if the address X, taken from a (MEM:MODE X) rtx, is
1967   legitimate, and FALSE otherwise.
1968
1969   On the mn10300, the value in the address register must be
1970   in the same memory space/segment as the effective address.
1971
1972   This is problematical for reload since it does not understand
1973   that base+index != index+base in a memory reference.
1974
1975   Note it is still possible to use reg+reg addressing modes,
1976   it's just much more difficult.  For a discussion of a possible
1977   workaround and solution, see the comments in pa.c before the
1978   function record_unscaled_index_insn_codes.  */
1979
1980static bool
1981mn10300_legitimate_address_p (machine_mode mode, rtx x, bool strict)
1982{
1983  rtx base, index;
1984
1985  if (CONSTANT_ADDRESS_P (x))
1986    return !flag_pic || mn10300_legitimate_pic_operand_p (x);
1987
1988  if (RTX_OK_FOR_BASE_P (x, strict))
1989    return true;
1990
1991  if (TARGET_AM33 && (mode == SImode || mode == SFmode || mode == HImode))
1992    {
1993      if (GET_CODE (x) == POST_INC)
1994	return RTX_OK_FOR_BASE_P (XEXP (x, 0), strict);
1995      if (GET_CODE (x) == POST_MODIFY)
1996	return (RTX_OK_FOR_BASE_P (XEXP (x, 0), strict)
1997		&& CONSTANT_ADDRESS_P (XEXP (x, 1)));
1998    }
1999
2000  if (GET_CODE (x) != PLUS)
2001    return false;
2002
2003  base = XEXP (x, 0);
2004  index = XEXP (x, 1);
2005
2006  if (!REG_P (base))
2007    return false;
2008  if (REG_P (index))
2009    {
2010      /* ??? Without AM33 generalized (Ri,Rn) addressing, reg+reg
2011	 addressing is hard to satisfy.  */
2012      if (!TARGET_AM33)
2013	return false;
2014
2015      return (REGNO_GENERAL_P (REGNO (base), strict)
2016	      && REGNO_GENERAL_P (REGNO (index), strict));
2017    }
2018
2019  if (!REGNO_STRICT_OK_FOR_BASE_P (REGNO (base), strict))
2020    return false;
2021
2022  if (CONST_INT_P (index))
2023    return IN_RANGE (INTVAL (index), -1 - 0x7fffffff, 0x7fffffff);
2024
2025  if (CONSTANT_ADDRESS_P (index))
2026    return !flag_pic || mn10300_legitimate_pic_operand_p (index);
2027
2028  return false;
2029}
2030
2031bool
2032mn10300_regno_in_class_p (unsigned regno, int rclass, bool strict)
2033{
2034  if (regno >= FIRST_PSEUDO_REGISTER)
2035    {
2036      if (!strict)
2037	return true;
2038      if (!reg_renumber)
2039	return false;
2040      regno = reg_renumber[regno];
2041      if (regno == INVALID_REGNUM)
2042	return false;
2043    }
2044  return TEST_HARD_REG_BIT (reg_class_contents[rclass], regno);
2045}
2046
2047rtx
2048mn10300_legitimize_reload_address (rtx x,
2049				   machine_mode mode ATTRIBUTE_UNUSED,
2050				   int opnum, int type,
2051				   int ind_levels ATTRIBUTE_UNUSED)
2052{
2053  bool any_change = false;
2054
2055  /* See above re disabling reg+reg addressing for MN103.  */
2056  if (!TARGET_AM33)
2057    return NULL_RTX;
2058
2059  if (GET_CODE (x) != PLUS)
2060    return NULL_RTX;
2061
2062  if (XEXP (x, 0) == stack_pointer_rtx)
2063    {
2064      push_reload (XEXP (x, 0), NULL_RTX, &XEXP (x, 0), NULL,
2065		   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2066		   opnum, (enum reload_type) type);
2067      any_change = true;
2068    }
2069  if (XEXP (x, 1) == stack_pointer_rtx)
2070    {
2071      push_reload (XEXP (x, 1), NULL_RTX, &XEXP (x, 1), NULL,
2072		   GENERAL_REGS, GET_MODE (x), VOIDmode, 0, 0,
2073		   opnum, (enum reload_type) type);
2074      any_change = true;
2075    }
2076
2077  return any_change ? x : NULL_RTX;
2078}
2079
2080/* Implement TARGET_LEGITIMATE_CONSTANT_P.  Returns TRUE if X is a valid
2081   constant.  Note that some "constants" aren't valid, such as TLS
2082   symbols and unconverted GOT-based references, so we eliminate
2083   those here.  */
2084
2085static bool
2086mn10300_legitimate_constant_p (machine_mode mode ATTRIBUTE_UNUSED, rtx x)
2087{
2088  switch (GET_CODE (x))
2089    {
2090    case CONST:
2091      x = XEXP (x, 0);
2092
2093      if (GET_CODE (x) == PLUS)
2094	{
2095	  if (! CONST_INT_P (XEXP (x, 1)))
2096	    return false;
2097	  x = XEXP (x, 0);
2098	}
2099
2100      /* Only some unspecs are valid as "constants".  */
2101      if (GET_CODE (x) == UNSPEC)
2102	{
2103	  switch (XINT (x, 1))
2104	    {
2105	    case UNSPEC_PIC:
2106	    case UNSPEC_GOT:
2107	    case UNSPEC_GOTOFF:
2108	    case UNSPEC_PLT:
2109	      return true;
2110	    default:
2111	      return false;
2112	    }
2113	}
2114
2115      /* We must have drilled down to a symbol.  */
2116      if (! mn10300_symbolic_operand (x, Pmode))
2117	return false;
2118      break;
2119
2120    default:
2121      break;
2122    }
2123
2124  return true;
2125}
2126
2127/* Undo pic address legitimization for the benefit of debug info.  */
2128
2129static rtx
2130mn10300_delegitimize_address (rtx orig_x)
2131{
2132  rtx x = orig_x, ret, addend = NULL;
2133  bool need_mem;
2134
2135  if (MEM_P (x))
2136    x = XEXP (x, 0);
2137  if (GET_CODE (x) != PLUS || GET_MODE (x) != Pmode)
2138    return orig_x;
2139
2140  if (XEXP (x, 0) == pic_offset_table_rtx)
2141    ;
2142  /* With the REG+REG addressing of AM33, var-tracking can re-assemble
2143     some odd-looking "addresses" that were never valid in the first place.
2144     We need to look harder to avoid warnings being emitted.  */
2145  else if (GET_CODE (XEXP (x, 0)) == PLUS)
2146    {
2147      rtx x0 = XEXP (x, 0);
2148      rtx x00 = XEXP (x0, 0);
2149      rtx x01 = XEXP (x0, 1);
2150
2151      if (x00 == pic_offset_table_rtx)
2152	addend = x01;
2153      else if (x01 == pic_offset_table_rtx)
2154	addend = x00;
2155      else
2156	return orig_x;
2157
2158    }
2159  else
2160    return orig_x;
2161  x = XEXP (x, 1);
2162
2163  if (GET_CODE (x) != CONST)
2164    return orig_x;
2165  x = XEXP (x, 0);
2166  if (GET_CODE (x) != UNSPEC)
2167    return orig_x;
2168
2169  ret = XVECEXP (x, 0, 0);
2170  if (XINT (x, 1) == UNSPEC_GOTOFF)
2171    need_mem = false;
2172  else if (XINT (x, 1) == UNSPEC_GOT)
2173    need_mem = true;
2174  else
2175    return orig_x;
2176
2177  gcc_assert (GET_CODE (ret) == SYMBOL_REF);
2178  if (need_mem != MEM_P (orig_x))
2179    return orig_x;
2180  if (need_mem && addend)
2181    return orig_x;
2182  if (addend)
2183    ret = gen_rtx_PLUS (Pmode, addend, ret);
2184  return ret;
2185}
2186
2187/* For addresses, costs are relative to "MOV (Rm),Rn".  For AM33 this is
2188   the 3-byte fully general instruction; for MN103 this is the 2-byte form
2189   with an address register.  */
2190
2191static int
2192mn10300_address_cost (rtx x, machine_mode mode ATTRIBUTE_UNUSED,
2193		      addr_space_t as ATTRIBUTE_UNUSED, bool speed)
2194{
2195  HOST_WIDE_INT i;
2196  rtx base, index;
2197
2198  switch (GET_CODE (x))
2199    {
2200    case CONST:
2201    case SYMBOL_REF:
2202    case LABEL_REF:
2203      /* We assume all of these require a 32-bit constant, even though
2204	 some symbol and label references can be relaxed.  */
2205      return speed ? 1 : 4;
2206
2207    case REG:
2208    case SUBREG:
2209    case POST_INC:
2210      return 0;
2211
2212    case POST_MODIFY:
2213      /* Assume any symbolic offset is a 32-bit constant.  */
2214      i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2215      if (IN_RANGE (i, -128, 127))
2216	return speed ? 0 : 1;
2217      if (speed)
2218	return 1;
2219      if (IN_RANGE (i, -0x800000, 0x7fffff))
2220	return 3;
2221      return 4;
2222
2223    case PLUS:
2224      base = XEXP (x, 0);
2225      index = XEXP (x, 1);
2226      if (register_operand (index, SImode))
2227	{
2228	  /* Attempt to minimize the number of registers in the address.
2229	     This is similar to what other ports do.  */
2230	  if (register_operand (base, SImode))
2231	    return 1;
2232
2233	  base = XEXP (x, 1);
2234	  index = XEXP (x, 0);
2235	}
2236
2237      /* Assume any symbolic offset is a 32-bit constant.  */
2238      i = (CONST_INT_P (XEXP (x, 1)) ? INTVAL (XEXP (x, 1)) : 0x12345678);
2239      if (IN_RANGE (i, -128, 127))
2240	return speed ? 0 : 1;
2241      if (IN_RANGE (i, -32768, 32767))
2242	return speed ? 0 : 2;
2243      return speed ? 2 : 6;
2244
2245    default:
2246      return rtx_cost (x, MEM, 0, speed);
2247    }
2248}
2249
2250/* Implement the TARGET_REGISTER_MOVE_COST hook.
2251
2252   Recall that the base value of 2 is required by assumptions elsewhere
2253   in the body of the compiler, and that cost 2 is special-cased as an
2254   early exit from reload meaning no work is required.  */
2255
2256static int
2257mn10300_register_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2258			    reg_class_t ifrom, reg_class_t ito)
2259{
2260  enum reg_class from = (enum reg_class) ifrom;
2261  enum reg_class to = (enum reg_class) ito;
2262  enum reg_class scratch, test;
2263
2264  /* Simplify the following code by unifying the fp register classes.  */
2265  if (to == FP_ACC_REGS)
2266    to = FP_REGS;
2267  if (from == FP_ACC_REGS)
2268    from = FP_REGS;
2269
2270  /* Diagnose invalid moves by costing them as two moves.  */
2271
2272  scratch = NO_REGS;
2273  test = from;
2274  if (to == SP_REGS)
2275    scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2276  else if (to == MDR_REGS)
2277    scratch = DATA_REGS;
2278  else if (to == FP_REGS && to != from)
2279    scratch = GENERAL_REGS;
2280  else
2281    {
2282      test = to;
2283      if (from == SP_REGS)
2284	scratch = (TARGET_AM33 ? GENERAL_REGS : ADDRESS_REGS);
2285      else if (from == MDR_REGS)
2286	scratch = DATA_REGS;
2287      else if (from == FP_REGS && to != from)
2288	scratch = GENERAL_REGS;
2289    }
2290  if (scratch != NO_REGS && !reg_class_subset_p (test, scratch))
2291    return (mn10300_register_move_cost (VOIDmode, from, scratch)
2292	    + mn10300_register_move_cost (VOIDmode, scratch, to));
2293
2294  /* From here on, all we need consider are legal combinations.  */
2295
2296  if (optimize_size)
2297    {
2298      /* The scale here is bytes * 2.  */
2299
2300      if (from == to && (to == ADDRESS_REGS || to == DATA_REGS))
2301	return 2;
2302
2303      if (from == SP_REGS)
2304	return (to == ADDRESS_REGS ? 2 : 6);
2305
2306      /* For MN103, all remaining legal moves are two bytes.  */
2307      if (TARGET_AM33)
2308	return 4;
2309
2310      if (to == SP_REGS)
2311	return (from == ADDRESS_REGS ? 4 : 6);
2312
2313      if ((from == ADDRESS_REGS || from == DATA_REGS)
2314	   && (to == ADDRESS_REGS || to == DATA_REGS))
2315	return 4;
2316
2317      if (to == EXTENDED_REGS)
2318	return (to == from ? 6 : 4);
2319
2320      /* What's left are SP_REGS, FP_REGS, or combinations of the above.  */
2321      return 6;
2322    }
2323  else
2324    {
2325      /* The scale here is cycles * 2.  */
2326
2327      if (to == FP_REGS)
2328	return 8;
2329      if (from == FP_REGS)
2330	return 4;
2331
2332      /* All legal moves between integral registers are single cycle.  */
2333      return 2;
2334    }
2335}
2336
2337/* Implement the TARGET_MEMORY_MOVE_COST hook.
2338
2339   Given lack of the form of the address, this must be speed-relative,
2340   though we should never be less expensive than a size-relative register
2341   move cost above.  This is not a problem.  */
2342
2343static int
2344mn10300_memory_move_cost (machine_mode mode ATTRIBUTE_UNUSED,
2345			  reg_class_t iclass, bool in ATTRIBUTE_UNUSED)
2346{
2347  enum reg_class rclass = (enum reg_class) iclass;
2348
2349  if (rclass == FP_REGS)
2350    return 8;
2351  return 6;
2352}
2353
2354/* Implement the TARGET_RTX_COSTS hook.
2355
2356   Speed-relative costs are relative to COSTS_N_INSNS, which is intended
2357   to represent cycles.  Size-relative costs are in bytes.  */
2358
2359static bool
2360mn10300_rtx_costs (rtx x, int code, int outer_code, int opno ATTRIBUTE_UNUSED,
2361		   int *ptotal, bool speed)
2362{
2363  /* This value is used for SYMBOL_REF etc where we want to pretend
2364     we have a full 32-bit constant.  */
2365  HOST_WIDE_INT i = 0x12345678;
2366  int total;
2367
2368  switch (code)
2369    {
2370    case CONST_INT:
2371      i = INTVAL (x);
2372    do_int_costs:
2373      if (speed)
2374	{
2375	  if (outer_code == SET)
2376	    {
2377	      /* 16-bit integer loads have latency 1, 32-bit loads 2.  */
2378	      if (IN_RANGE (i, -32768, 32767))
2379		total = COSTS_N_INSNS (1);
2380	      else
2381		total = COSTS_N_INSNS (2);
2382	    }
2383	  else
2384	    {
2385	      /* 16-bit integer operands don't affect latency;
2386		 24-bit and 32-bit operands add a cycle.  */
2387	      if (IN_RANGE (i, -32768, 32767))
2388		total = 0;
2389	      else
2390		total = COSTS_N_INSNS (1);
2391	    }
2392	}
2393      else
2394	{
2395	  if (outer_code == SET)
2396	    {
2397	      if (i == 0)
2398		total = 1;
2399	      else if (IN_RANGE (i, -128, 127))
2400		total = 2;
2401	      else if (IN_RANGE (i, -32768, 32767))
2402		total = 3;
2403	      else
2404		total = 6;
2405	    }
2406	  else
2407	    {
2408	      /* Reference here is ADD An,Dn, vs ADD imm,Dn.  */
2409	      if (IN_RANGE (i, -128, 127))
2410		total = 0;
2411	      else if (IN_RANGE (i, -32768, 32767))
2412		total = 2;
2413	      else if (TARGET_AM33 && IN_RANGE (i, -0x01000000, 0x00ffffff))
2414		total = 3;
2415	      else
2416		total = 4;
2417	    }
2418	}
2419      goto alldone;
2420
2421    case CONST:
2422    case LABEL_REF:
2423    case SYMBOL_REF:
2424    case CONST_DOUBLE:
2425      /* We assume all of these require a 32-bit constant, even though
2426	 some symbol and label references can be relaxed.  */
2427      goto do_int_costs;
2428
2429    case UNSPEC:
2430      switch (XINT (x, 1))
2431	{
2432	case UNSPEC_PIC:
2433	case UNSPEC_GOT:
2434	case UNSPEC_GOTOFF:
2435	case UNSPEC_PLT:
2436	case UNSPEC_GOTSYM_OFF:
2437	  /* The PIC unspecs also resolve to a 32-bit constant.  */
2438	  goto do_int_costs;
2439
2440	default:
2441	  /* Assume any non-listed unspec is some sort of arithmetic.  */
2442	  goto do_arith_costs;
2443	}
2444
2445    case PLUS:
2446      /* Notice the size difference of INC and INC4.  */
2447      if (!speed && outer_code == SET && CONST_INT_P (XEXP (x, 1)))
2448	{
2449	  i = INTVAL (XEXP (x, 1));
2450	  if (i == 1 || i == 4)
2451	    {
2452	      total = 1 + rtx_cost (XEXP (x, 0), PLUS, 0, speed);
2453	      goto alldone;
2454	    }
2455	}
2456      goto do_arith_costs;
2457
2458    case MINUS:
2459    case AND:
2460    case IOR:
2461    case XOR:
2462    case NOT:
2463    case NEG:
2464    case ZERO_EXTEND:
2465    case SIGN_EXTEND:
2466    case COMPARE:
2467    case BSWAP:
2468    case CLZ:
2469    do_arith_costs:
2470      total = (speed ? COSTS_N_INSNS (1) : 2);
2471      break;
2472
2473    case ASHIFT:
2474      /* Notice the size difference of ASL2 and variants.  */
2475      if (!speed && CONST_INT_P (XEXP (x, 1)))
2476	switch (INTVAL (XEXP (x, 1)))
2477	  {
2478	  case 1:
2479	  case 2:
2480	    total = 1;
2481	    goto alldone;
2482	  case 3:
2483	  case 4:
2484	    total = 2;
2485	    goto alldone;
2486	  }
2487      /* FALLTHRU */
2488
2489    case ASHIFTRT:
2490    case LSHIFTRT:
2491      total = (speed ? COSTS_N_INSNS (1) : 3);
2492      goto alldone;
2493
2494    case MULT:
2495      total = (speed ? COSTS_N_INSNS (3) : 2);
2496      break;
2497
2498    case DIV:
2499    case UDIV:
2500    case MOD:
2501    case UMOD:
2502      total = (speed ? COSTS_N_INSNS (39)
2503		/* Include space to load+retrieve MDR.  */
2504		: code == MOD || code == UMOD ? 6 : 4);
2505      break;
2506
2507    case MEM:
2508      total = mn10300_address_cost (XEXP (x, 0), GET_MODE (x),
2509				    MEM_ADDR_SPACE (x), speed);
2510      if (speed)
2511	total = COSTS_N_INSNS (2 + total);
2512      goto alldone;
2513
2514    default:
2515      /* Probably not implemented.  Assume external call.  */
2516      total = (speed ? COSTS_N_INSNS (10) : 7);
2517      break;
2518    }
2519
2520  *ptotal = total;
2521  return false;
2522
2523 alldone:
2524  *ptotal = total;
2525  return true;
2526}
2527
2528/* If using PIC, mark a SYMBOL_REF for a non-global symbol so that we
2529   may access it using GOTOFF instead of GOT.  */
2530
2531static void
2532mn10300_encode_section_info (tree decl, rtx rtl, int first)
2533{
2534  rtx symbol;
2535
2536  default_encode_section_info (decl, rtl, first);
2537
2538  if (! MEM_P (rtl))
2539    return;
2540
2541  symbol = XEXP (rtl, 0);
2542  if (GET_CODE (symbol) != SYMBOL_REF)
2543    return;
2544
2545  if (flag_pic)
2546    SYMBOL_REF_FLAG (symbol) = (*targetm.binds_local_p) (decl);
2547}
2548
2549/* Dispatch tables on the mn10300 are extremely expensive in terms of code
2550   and readonly data size.  So we crank up the case threshold value to
2551   encourage a series of if/else comparisons to implement many small switch
2552   statements.  In theory, this value could be increased much more if we
2553   were solely optimizing for space, but we keep it "reasonable" to avoid
2554   serious code efficiency lossage.  */
2555
2556static unsigned int
2557mn10300_case_values_threshold (void)
2558{
2559  return 6;
2560}
2561
2562/* Worker function for TARGET_TRAMPOLINE_INIT.  */
2563
2564static void
2565mn10300_trampoline_init (rtx m_tramp, tree fndecl, rtx chain_value)
2566{
2567  rtx mem, disp, fnaddr = XEXP (DECL_RTL (fndecl), 0);
2568
2569  /* This is a strict alignment target, which means that we play
2570     some games to make sure that the locations at which we need
2571     to store <chain> and <disp> wind up at aligned addresses.
2572
2573	0x28 0x00			add 0,d0
2574	          0xfc 0xdd		mov chain,a1
2575        <chain>
2576	0xf8 0xed 0x00			btst 0,d1
2577	               0xdc		jmp fnaddr
2578	<disp>
2579
2580     Note that the two extra insns are effectively nops; they
2581     clobber the flags but do not affect the contents of D0 or D1.  */
2582
2583  disp = expand_binop (SImode, sub_optab, fnaddr,
2584		       plus_constant (Pmode, XEXP (m_tramp, 0), 11),
2585		       NULL_RTX, 1, OPTAB_DIRECT);
2586
2587  mem = adjust_address (m_tramp, SImode, 0);
2588  emit_move_insn (mem, gen_int_mode (0xddfc0028, SImode));
2589  mem = adjust_address (m_tramp, SImode, 4);
2590  emit_move_insn (mem, chain_value);
2591  mem = adjust_address (m_tramp, SImode, 8);
2592  emit_move_insn (mem, gen_int_mode (0xdc00edf8, SImode));
2593  mem = adjust_address (m_tramp, SImode, 12);
2594  emit_move_insn (mem, disp);
2595}
2596
2597/* Output the assembler code for a C++ thunk function.
2598   THUNK_DECL is the declaration for the thunk function itself, FUNCTION
2599   is the decl for the target function.  DELTA is an immediate constant
2600   offset to be added to the THIS parameter.  If VCALL_OFFSET is nonzero
2601   the word at the adjusted address *(*THIS' + VCALL_OFFSET) should be
2602   additionally added to THIS.  Finally jump to the entry point of
2603   FUNCTION.  */
2604
2605static void
2606mn10300_asm_output_mi_thunk (FILE *        file,
2607			     tree          thunk_fndecl ATTRIBUTE_UNUSED,
2608			     HOST_WIDE_INT delta,
2609			     HOST_WIDE_INT vcall_offset,
2610			     tree          function)
2611{
2612  const char * _this;
2613
2614  /* Get the register holding the THIS parameter.  Handle the case
2615     where there is a hidden first argument for a returned structure.  */
2616  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
2617    _this = reg_names [FIRST_ARGUMENT_REGNUM + 1];
2618  else
2619    _this = reg_names [FIRST_ARGUMENT_REGNUM];
2620
2621  fprintf (file, "\t%s Thunk Entry Point:\n", ASM_COMMENT_START);
2622
2623  if (delta)
2624    fprintf (file, "\tadd %d, %s\n", (int) delta, _this);
2625
2626  if (vcall_offset)
2627    {
2628      const char * scratch = reg_names [FIRST_ADDRESS_REGNUM + 1];
2629
2630      fprintf (file, "\tmov %s, %s\n", _this, scratch);
2631      fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2632      fprintf (file, "\tadd %d, %s\n", (int) vcall_offset, scratch);
2633      fprintf (file, "\tmov (%s), %s\n", scratch, scratch);
2634      fprintf (file, "\tadd %s, %s\n", scratch, _this);
2635    }
2636
2637  fputs ("\tjmp ", file);
2638  assemble_name (file, XSTR (XEXP (DECL_RTL (function), 0), 0));
2639  putc ('\n', file);
2640}
2641
2642/* Return true if mn10300_output_mi_thunk would be able to output the
2643   assembler code for the thunk function specified by the arguments
2644   it is passed, and false otherwise.  */
2645
2646static bool
2647mn10300_can_output_mi_thunk (const_tree    thunk_fndecl ATTRIBUTE_UNUSED,
2648			     HOST_WIDE_INT delta        ATTRIBUTE_UNUSED,
2649			     HOST_WIDE_INT vcall_offset ATTRIBUTE_UNUSED,
2650			     const_tree    function     ATTRIBUTE_UNUSED)
2651{
2652  return true;
2653}
2654
2655bool
2656mn10300_hard_regno_mode_ok (unsigned int regno, machine_mode mode)
2657{
2658  if (REGNO_REG_CLASS (regno) == FP_REGS
2659      || REGNO_REG_CLASS (regno) == FP_ACC_REGS)
2660    /* Do not store integer values in FP registers.  */
2661    return GET_MODE_CLASS (mode) == MODE_FLOAT && ((regno & 1) == 0);
2662
2663  if (! TARGET_AM33 && REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2664    return false;
2665
2666  if (((regno) & 1) == 0 || GET_MODE_SIZE (mode) == 4)
2667    return true;
2668
2669  if (REGNO_REG_CLASS (regno) == DATA_REGS
2670      || (TARGET_AM33 && REGNO_REG_CLASS (regno) == ADDRESS_REGS)
2671      || REGNO_REG_CLASS (regno) == EXTENDED_REGS)
2672    return GET_MODE_SIZE (mode) <= 4;
2673
2674  return false;
2675}
2676
2677bool
2678mn10300_modes_tieable (machine_mode mode1, machine_mode mode2)
2679{
2680  if (GET_MODE_CLASS (mode1) == MODE_FLOAT
2681      && GET_MODE_CLASS (mode2) != MODE_FLOAT)
2682    return false;
2683
2684  if (GET_MODE_CLASS (mode2) == MODE_FLOAT
2685      && GET_MODE_CLASS (mode1) != MODE_FLOAT)
2686    return false;
2687
2688  if (TARGET_AM33
2689      || mode1 == mode2
2690      || (GET_MODE_SIZE (mode1) <= 4 && GET_MODE_SIZE (mode2) <= 4))
2691    return true;
2692
2693  return false;
2694}
2695
2696static int
2697cc_flags_for_mode (machine_mode mode)
2698{
2699  switch (mode)
2700    {
2701    case CCmode:
2702      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C | CC_FLAG_V;
2703    case CCZNCmode:
2704      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_C;
2705    case CCZNmode:
2706      return CC_FLAG_Z | CC_FLAG_N;
2707    case CC_FLOATmode:
2708      return -1;
2709    default:
2710      gcc_unreachable ();
2711    }
2712}
2713
2714static int
2715cc_flags_for_code (enum rtx_code code)
2716{
2717  switch (code)
2718    {
2719    case EQ:	/* Z */
2720    case NE:	/* ~Z */
2721      return CC_FLAG_Z;
2722
2723    case LT:	/* N */
2724    case GE:	/* ~N */
2725      return CC_FLAG_N;
2726      break;
2727
2728    case GT:    /* ~(Z|(N^V)) */
2729    case LE:    /* Z|(N^V) */
2730      return CC_FLAG_Z | CC_FLAG_N | CC_FLAG_V;
2731
2732    case GEU:	/* ~C */
2733    case LTU:	/* C */
2734      return CC_FLAG_C;
2735
2736    case GTU:	/* ~(C | Z) */
2737    case LEU:	/* C | Z */
2738      return CC_FLAG_Z | CC_FLAG_C;
2739
2740    case ORDERED:
2741    case UNORDERED:
2742    case LTGT:
2743    case UNEQ:
2744    case UNGE:
2745    case UNGT:
2746    case UNLE:
2747    case UNLT:
2748      return -1;
2749
2750    default:
2751      gcc_unreachable ();
2752    }
2753}
2754
2755machine_mode
2756mn10300_select_cc_mode (enum rtx_code code, rtx x, rtx y ATTRIBUTE_UNUSED)
2757{
2758  int req;
2759
2760  if (GET_MODE_CLASS (GET_MODE (x)) == MODE_FLOAT)
2761    return CC_FLOATmode;
2762
2763  req = cc_flags_for_code (code);
2764
2765  if (req & CC_FLAG_V)
2766    return CCmode;
2767  if (req & CC_FLAG_C)
2768    return CCZNCmode;
2769  return CCZNmode;
2770}
2771
2772static inline bool
2773set_is_load_p (rtx set)
2774{
2775  return MEM_P (SET_SRC (set));
2776}
2777
2778static inline bool
2779set_is_store_p (rtx set)
2780{
2781  return MEM_P (SET_DEST (set));
2782}
2783
2784/* Update scheduling costs for situations that cannot be
2785   described using the attributes and DFA machinery.
2786   DEP is the insn being scheduled.
2787   INSN is the previous insn.
2788   COST is the current cycle cost for DEP.  */
2789
2790static int
2791mn10300_adjust_sched_cost (rtx_insn *insn, rtx link, rtx_insn *dep, int cost)
2792{
2793  rtx insn_set;
2794  rtx dep_set;
2795  int timings;
2796
2797  if (!TARGET_AM33)
2798    return 1;
2799
2800  /* We are only interested in pairs of SET. */
2801  insn_set = single_set (insn);
2802  if (!insn_set)
2803    return cost;
2804
2805  dep_set = single_set (dep);
2806  if (!dep_set)
2807    return cost;
2808
2809  /* For the AM34 a load instruction that follows a
2810     store instruction incurs an extra cycle of delay.  */
2811  if (mn10300_tune_cpu == PROCESSOR_AM34
2812      && set_is_load_p (dep_set)
2813      && set_is_store_p (insn_set))
2814    cost += 1;
2815
2816  /* For the AM34 a non-store, non-branch FPU insn that follows
2817     another FPU insn incurs a one cycle throughput increase.  */
2818  else if (mn10300_tune_cpu == PROCESSOR_AM34
2819      && ! set_is_store_p (insn_set)
2820      && ! JUMP_P (insn)
2821      && GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) == MODE_FLOAT
2822      && GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) == MODE_FLOAT)
2823    cost += 1;
2824
2825  /*  Resolve the conflict described in section 1-7-4 of
2826      Chapter 3 of the MN103E Series Instruction Manual
2827      where it says:
2828
2829        "When the preceding instruction is a CPU load or
2830	 store instruction, a following FPU instruction
2831	 cannot be executed until the CPU completes the
2832	 latency period even though there are no register
2833	 or flag dependencies between them."  */
2834
2835  /* Only the AM33-2 (and later) CPUs have FPU instructions.  */
2836  if (! TARGET_AM33_2)
2837    return cost;
2838
2839  /* If a data dependence already exists then the cost is correct.  */
2840  if (REG_NOTE_KIND (link) == 0)
2841    return cost;
2842
2843  /* Check that the instruction about to scheduled is an FPU instruction.  */
2844  if (GET_MODE_CLASS (GET_MODE (SET_SRC (dep_set))) != MODE_FLOAT)
2845    return cost;
2846
2847  /* Now check to see if the previous instruction is a load or store.  */
2848  if (! set_is_load_p (insn_set) && ! set_is_store_p (insn_set))
2849    return cost;
2850
2851  /* XXX: Verify: The text of 1-7-4 implies that the restriction
2852     only applies when an INTEGER load/store precedes an FPU
2853     instruction, but is this true ?  For now we assume that it is.  */
2854  if (GET_MODE_CLASS (GET_MODE (SET_SRC (insn_set))) != MODE_INT)
2855    return cost;
2856
2857  /* Extract the latency value from the timings attribute.  */
2858  timings = get_attr_timings (insn);
2859  return timings < 100 ? (timings % 10) : (timings % 100);
2860}
2861
2862static void
2863mn10300_conditional_register_usage (void)
2864{
2865  unsigned int i;
2866
2867  if (!TARGET_AM33)
2868    {
2869      for (i = FIRST_EXTENDED_REGNUM;
2870	   i <= LAST_EXTENDED_REGNUM; i++)
2871	fixed_regs[i] = call_used_regs[i] = 1;
2872    }
2873  if (!TARGET_AM33_2)
2874    {
2875      for (i = FIRST_FP_REGNUM;
2876	   i <= LAST_FP_REGNUM; i++)
2877	fixed_regs[i] = call_used_regs[i] = 1;
2878    }
2879  if (flag_pic)
2880    fixed_regs[PIC_OFFSET_TABLE_REGNUM] =
2881    call_used_regs[PIC_OFFSET_TABLE_REGNUM] = 1;
2882}
2883
2884/* Worker function for TARGET_MD_ASM_CLOBBERS.
2885   We do this in the mn10300 backend to maintain source compatibility
2886   with the old cc0-based compiler.  */
2887
2888static tree
2889mn10300_md_asm_clobbers (tree outputs ATTRIBUTE_UNUSED,
2890                         tree inputs ATTRIBUTE_UNUSED,
2891                         tree clobbers)
2892{
2893  clobbers = tree_cons (NULL_TREE, build_string (5, "EPSW"),
2894                        clobbers);
2895  return clobbers;
2896}
2897
2898/* A helper function for splitting cbranch patterns after reload.  */
2899
2900void
2901mn10300_split_cbranch (machine_mode cmp_mode, rtx cmp_op, rtx label_ref)
2902{
2903  rtx flags, x;
2904
2905  flags = gen_rtx_REG (cmp_mode, CC_REG);
2906  x = gen_rtx_COMPARE (cmp_mode, XEXP (cmp_op, 0), XEXP (cmp_op, 1));
2907  x = gen_rtx_SET (VOIDmode, flags, x);
2908  emit_insn (x);
2909
2910  x = gen_rtx_fmt_ee (GET_CODE (cmp_op), VOIDmode, flags, const0_rtx);
2911  x = gen_rtx_IF_THEN_ELSE (VOIDmode, x, label_ref, pc_rtx);
2912  x = gen_rtx_SET (VOIDmode, pc_rtx, x);
2913  emit_jump_insn (x);
2914}
2915
2916/* A helper function for matching parallels that set the flags.  */
2917
2918bool
2919mn10300_match_ccmode (rtx insn, machine_mode cc_mode)
2920{
2921  rtx op1, flags;
2922  machine_mode flags_mode;
2923
2924  gcc_checking_assert (XVECLEN (PATTERN (insn), 0) == 2);
2925
2926  op1 = XVECEXP (PATTERN (insn), 0, 1);
2927  gcc_checking_assert (GET_CODE (SET_SRC (op1)) == COMPARE);
2928
2929  flags = SET_DEST (op1);
2930  flags_mode = GET_MODE (flags);
2931
2932  if (GET_MODE (SET_SRC (op1)) != flags_mode)
2933    return false;
2934  if (GET_MODE_CLASS (flags_mode) != MODE_CC)
2935    return false;
2936
2937  /* Ensure that the mode of FLAGS is compatible with CC_MODE.  */
2938  if (cc_flags_for_mode (flags_mode) & ~cc_flags_for_mode (cc_mode))
2939    return false;
2940
2941  return true;
2942}
2943
2944/* This function is used to help split:
2945
2946     (set (reg) (and (reg) (int)))
2947
2948   into:
2949
2950     (set (reg) (shift (reg) (int))
2951     (set (reg) (shift (reg) (int))
2952
2953   where the shitfs will be shorter than the "and" insn.
2954
2955   It returns the number of bits that should be shifted.  A positive
2956   values means that the low bits are to be cleared (and hence the
2957   shifts should be right followed by left) whereas a negative value
2958   means that the high bits are to be cleared (left followed by right).
2959   Zero is returned when it would not be economical to split the AND.  */
2960
2961int
2962mn10300_split_and_operand_count (rtx op)
2963{
2964  HOST_WIDE_INT val = INTVAL (op);
2965  int count;
2966
2967  if (val < 0)
2968    {
2969      /* High bit is set, look for bits clear at the bottom.  */
2970      count = exact_log2 (-val);
2971      if (count < 0)
2972	return 0;
2973      /* This is only size win if we can use the asl2 insn.  Otherwise we
2974	 would be replacing 1 6-byte insn with 2 3-byte insns.  */
2975      if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2976	return 0;
2977      return count;
2978    }
2979  else
2980    {
2981      /* High bit is clear, look for bits set at the bottom.  */
2982      count = exact_log2 (val + 1);
2983      count = 32 - count;
2984      /* Again, this is only a size win with asl2.  */
2985      if (count > (optimize_insn_for_speed_p () ? 2 : 4))
2986	return 0;
2987      return -count;
2988    }
2989}
2990
2991struct liw_data
2992{
2993  enum attr_liw slot;
2994  enum attr_liw_op op;
2995  rtx dest;
2996  rtx src;
2997};
2998
2999/* Decide if the given insn is a candidate for LIW bundling.  If it is then
3000   extract the operands and LIW attributes from the insn and use them to fill
3001   in the liw_data structure.  Return true upon success or false if the insn
3002   cannot be bundled.  */
3003
3004static bool
3005extract_bundle (rtx_insn *insn, struct liw_data * pdata)
3006{
3007  bool allow_consts = true;
3008  rtx p;
3009
3010  gcc_assert (pdata != NULL);
3011
3012  if (insn == NULL)
3013    return false;
3014  /* Make sure that we are dealing with a simple SET insn.  */
3015  p = single_set (insn);
3016  if (p == NULL_RTX)
3017    return false;
3018
3019  /* Make sure that it could go into one of the LIW pipelines.  */
3020  pdata->slot = get_attr_liw (insn);
3021  if (pdata->slot == LIW_BOTH)
3022    return false;
3023
3024  pdata->op = get_attr_liw_op (insn);
3025
3026  switch (pdata->op)
3027    {
3028    case LIW_OP_MOV:
3029      pdata->dest = SET_DEST (p);
3030      pdata->src = SET_SRC (p);
3031      break;
3032    case LIW_OP_CMP:
3033      pdata->dest = XEXP (SET_SRC (p), 0);
3034      pdata->src = XEXP (SET_SRC (p), 1);
3035      break;
3036    case LIW_OP_NONE:
3037      return false;
3038    case LIW_OP_AND:
3039    case LIW_OP_OR:
3040    case LIW_OP_XOR:
3041      /* The AND, OR and XOR long instruction words only accept register arguments.  */
3042      allow_consts = false;
3043      /* Fall through.  */
3044    default:
3045      pdata->dest = SET_DEST (p);
3046      pdata->src = XEXP (SET_SRC (p), 1);
3047      break;
3048    }
3049
3050  if (! REG_P (pdata->dest))
3051    return false;
3052
3053  if (REG_P (pdata->src))
3054    return true;
3055
3056  return allow_consts && satisfies_constraint_O (pdata->src);
3057}
3058
3059/* Make sure that it is OK to execute LIW1 and LIW2 in parallel.  GCC generated
3060   the instructions with the assumption that LIW1 would be executed before LIW2
3061   so we must check for overlaps between their sources and destinations.  */
3062
3063static bool
3064check_liw_constraints (struct liw_data * pliw1, struct liw_data * pliw2)
3065{
3066  /* Check for slot conflicts.  */
3067  if (pliw2->slot == pliw1->slot && pliw1->slot != LIW_EITHER)
3068    return false;
3069
3070  /* If either operation is a compare, then "dest" is really an input; the real
3071     destination is CC_REG.  So these instructions need different checks.  */
3072
3073  /* Changing "CMP ; OP" into "CMP | OP" is OK because the comparison will
3074     check its values prior to any changes made by OP.  */
3075  if (pliw1->op == LIW_OP_CMP)
3076    {
3077      /* Two sequential comparisons means dead code, which ought to
3078         have been eliminated given that bundling only happens with
3079         optimization.  We cannot bundle them in any case.  */
3080      gcc_assert (pliw1->op != pliw2->op);
3081      return true;
3082    }
3083
3084  /* Changing "OP ; CMP" into "OP | CMP" does not work if the value being compared
3085     is the destination of OP, as the CMP will look at the old value, not the new
3086     one.  */
3087  if (pliw2->op == LIW_OP_CMP)
3088    {
3089      if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3090	return false;
3091
3092      if (REG_P (pliw2->src))
3093	return REGNO (pliw2->src) != REGNO (pliw1->dest);
3094
3095      return true;
3096    }
3097
3098  /* Changing "OP1 ; OP2" into "OP1 | OP2" does not work if they both write to the
3099     same destination register.  */
3100  if (REGNO (pliw2->dest) == REGNO (pliw1->dest))
3101    return false;
3102
3103  /* Changing "OP1 ; OP2" into "OP1 | OP2" generally does not work if the destination
3104     of OP1 is the source of OP2.  The exception is when OP1 is a MOVE instruction when
3105     we can replace the source in OP2 with the source of OP1.  */
3106  if (REG_P (pliw2->src) && REGNO (pliw2->src) == REGNO (pliw1->dest))
3107    {
3108      if (pliw1->op == LIW_OP_MOV && REG_P (pliw1->src))
3109	{
3110	  if (! REG_P (pliw1->src)
3111	      && (pliw2->op == LIW_OP_AND
3112		  || pliw2->op == LIW_OP_OR
3113		  || pliw2->op == LIW_OP_XOR))
3114	    return false;
3115
3116	  pliw2->src = pliw1->src;
3117	  return true;
3118	}
3119      return false;
3120    }
3121
3122  /* Everything else is OK.  */
3123  return true;
3124}
3125
3126/* Combine pairs of insns into LIW bundles.  */
3127
3128static void
3129mn10300_bundle_liw (void)
3130{
3131  rtx_insn *r;
3132
3133  for (r = get_insns (); r != NULL; r = next_nonnote_nondebug_insn (r))
3134    {
3135      rtx_insn *insn1, *insn2;
3136      struct liw_data liw1, liw2;
3137
3138      insn1 = r;
3139      if (! extract_bundle (insn1, & liw1))
3140	continue;
3141
3142      insn2 = next_nonnote_nondebug_insn (insn1);
3143      if (! extract_bundle (insn2, & liw2))
3144	continue;
3145
3146      /* Check for source/destination overlap.  */
3147      if (! check_liw_constraints (& liw1, & liw2))
3148	continue;
3149
3150      if (liw1.slot == LIW_OP2 || liw2.slot == LIW_OP1)
3151	{
3152	  struct liw_data temp;
3153
3154	  temp = liw1;
3155	  liw1 = liw2;
3156	  liw2 = temp;
3157	}
3158
3159      delete_insn (insn2);
3160
3161      rtx insn2_pat;
3162      if (liw1.op == LIW_OP_CMP)
3163	insn2_pat = gen_cmp_liw (liw2.dest, liw2.src, liw1.dest, liw1.src,
3164				 GEN_INT (liw2.op));
3165      else if (liw2.op == LIW_OP_CMP)
3166	insn2_pat = gen_liw_cmp (liw1.dest, liw1.src, liw2.dest, liw2.src,
3167				 GEN_INT (liw1.op));
3168      else
3169	insn2_pat = gen_liw (liw1.dest, liw2.dest, liw1.src, liw2.src,
3170			     GEN_INT (liw1.op), GEN_INT (liw2.op));
3171
3172      insn2 = emit_insn_after (insn2_pat, insn1);
3173      delete_insn (insn1);
3174      r = insn2;
3175    }
3176}
3177
3178#define DUMP(reason, insn)			\
3179  do						\
3180    {						\
3181      if (dump_file)				\
3182	{					\
3183	  fprintf (dump_file, reason "\n");	\
3184	  if (insn != NULL_RTX)			\
3185	    print_rtl_single (dump_file, insn);	\
3186	  fprintf(dump_file, "\n");		\
3187	}					\
3188    }						\
3189  while (0)
3190
3191/* Replace the BRANCH insn with a Lcc insn that goes to LABEL.
3192   Insert a SETLB insn just before LABEL.  */
3193
3194static void
3195mn10300_insert_setlb_lcc (rtx label, rtx branch)
3196{
3197  rtx lcc, comparison, cmp_reg;
3198
3199  if (LABEL_NUSES (label) > 1)
3200    {
3201      rtx_insn *insn;
3202
3203      /* This label is used both as an entry point to the loop
3204	 and as a loop-back point for the loop.  We need to separate
3205	 these two functions so that the SETLB happens upon entry,
3206	 but the loop-back does not go to the SETLB instruction.  */
3207      DUMP ("Inserting SETLB insn after:", label);
3208      insn = emit_insn_after (gen_setlb (), label);
3209      label = gen_label_rtx ();
3210      emit_label_after (label, insn);
3211      DUMP ("Created new loop-back label:", label);
3212    }
3213  else
3214    {
3215      DUMP ("Inserting SETLB insn before:", label);
3216      emit_insn_before (gen_setlb (), label);
3217    }
3218
3219  comparison = XEXP (SET_SRC (PATTERN (branch)), 0);
3220  cmp_reg = XEXP (comparison, 0);
3221  gcc_assert (REG_P (cmp_reg));
3222
3223  /* If the comparison has not already been split out of the branch
3224     then do so now.  */
3225  gcc_assert (REGNO (cmp_reg) == CC_REG);
3226
3227  if (GET_MODE (cmp_reg) == CC_FLOATmode)
3228    lcc = gen_FLcc (comparison, label);
3229  else
3230    lcc = gen_Lcc (comparison, label);
3231
3232  rtx_insn *jump = emit_jump_insn_before (lcc, branch);
3233  mark_jump_label (XVECEXP (lcc, 0, 0), jump, 0);
3234  JUMP_LABEL (jump) = label;
3235  DUMP ("Replacing branch insn...", branch);
3236  DUMP ("... with Lcc insn:", jump);
3237  delete_insn (branch);
3238}
3239
3240static bool
3241mn10300_block_contains_call (basic_block block)
3242{
3243  rtx_insn *insn;
3244
3245  FOR_BB_INSNS (block, insn)
3246    if (CALL_P (insn))
3247      return true;
3248
3249  return false;
3250}
3251
3252static bool
3253mn10300_loop_contains_call_insn (loop_p loop)
3254{
3255  basic_block * bbs;
3256  bool result = false;
3257  unsigned int i;
3258
3259  bbs = get_loop_body (loop);
3260
3261  for (i = 0; i < loop->num_nodes; i++)
3262    if (mn10300_block_contains_call (bbs[i]))
3263      {
3264	result = true;
3265	break;
3266      }
3267
3268  free (bbs);
3269  return result;
3270}
3271
3272static void
3273mn10300_scan_for_setlb_lcc (void)
3274{
3275  loop_p loop;
3276
3277  DUMP ("Looking for loops that can use the SETLB insn", NULL_RTX);
3278
3279  df_analyze ();
3280  compute_bb_for_insn ();
3281
3282  /* Find the loops.  */
3283  loop_optimizer_init (AVOID_CFG_MODIFICATIONS);
3284
3285  /* FIXME: For now we only investigate innermost loops.  In practice however
3286     if an inner loop is not suitable for use with the SETLB/Lcc insns, it may
3287     be the case that its parent loop is suitable.  Thus we should check all
3288     loops, but work from the innermost outwards.  */
3289  FOR_EACH_LOOP (loop, LI_ONLY_INNERMOST)
3290    {
3291      const char * reason = NULL;
3292
3293      /* Check to see if we can modify this loop.  If we cannot
3294	 then set 'reason' to describe why it could not be done.  */
3295      if (loop->latch == NULL)
3296	reason = "it contains multiple latches";
3297      else if (loop->header != loop->latch)
3298	/* FIXME: We could handle loops that span multiple blocks,
3299	   but this requires a lot more work tracking down the branches
3300	   that need altering, so for now keep things simple.  */
3301	reason = "the loop spans multiple blocks";
3302      else if (mn10300_loop_contains_call_insn (loop))
3303	reason = "it contains CALL insns";
3304      else
3305	{
3306	  rtx_insn *branch = BB_END (loop->latch);
3307
3308	  gcc_assert (JUMP_P (branch));
3309	  if (single_set (branch) == NULL_RTX || ! any_condjump_p (branch))
3310	    /* We cannot optimize tablejumps and the like.  */
3311	    /* FIXME: We could handle unconditional jumps.  */
3312	    reason = "it is not a simple loop";
3313	  else
3314	    {
3315	      rtx_insn *label;
3316
3317	      if (dump_file)
3318		flow_loop_dump (loop, dump_file, NULL, 0);
3319
3320	      label = BB_HEAD (loop->header);
3321	      gcc_assert (LABEL_P (label));
3322
3323	      mn10300_insert_setlb_lcc (label, branch);
3324	    }
3325	}
3326
3327      if (dump_file && reason != NULL)
3328	fprintf (dump_file, "Loop starting with insn %d is not suitable because %s\n",
3329		 INSN_UID (BB_HEAD (loop->header)),
3330		 reason);
3331    }
3332
3333  loop_optimizer_finalize ();
3334
3335  df_finish_pass (false);
3336
3337  DUMP ("SETLB scan complete", NULL_RTX);
3338}
3339
3340static void
3341mn10300_reorg (void)
3342{
3343  /* These are optimizations, so only run them if optimizing.  */
3344  if (TARGET_AM33 && (optimize > 0 || optimize_size))
3345    {
3346      if (TARGET_ALLOW_SETLB)
3347	mn10300_scan_for_setlb_lcc ();
3348
3349      if (TARGET_ALLOW_LIW)
3350	mn10300_bundle_liw ();
3351    }
3352}
3353
3354/* Initialize the GCC target structure.  */
3355
3356#undef  TARGET_MACHINE_DEPENDENT_REORG
3357#define TARGET_MACHINE_DEPENDENT_REORG mn10300_reorg
3358
3359#undef  TARGET_ASM_ALIGNED_HI_OP
3360#define TARGET_ASM_ALIGNED_HI_OP "\t.hword\t"
3361
3362#undef  TARGET_LEGITIMIZE_ADDRESS
3363#define TARGET_LEGITIMIZE_ADDRESS mn10300_legitimize_address
3364
3365#undef  TARGET_ADDRESS_COST
3366#define TARGET_ADDRESS_COST  mn10300_address_cost
3367#undef  TARGET_REGISTER_MOVE_COST
3368#define TARGET_REGISTER_MOVE_COST  mn10300_register_move_cost
3369#undef  TARGET_MEMORY_MOVE_COST
3370#define TARGET_MEMORY_MOVE_COST  mn10300_memory_move_cost
3371#undef  TARGET_RTX_COSTS
3372#define TARGET_RTX_COSTS mn10300_rtx_costs
3373
3374#undef  TARGET_ASM_FILE_START
3375#define TARGET_ASM_FILE_START mn10300_file_start
3376#undef  TARGET_ASM_FILE_START_FILE_DIRECTIVE
3377#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
3378
3379#undef TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA
3380#define TARGET_ASM_OUTPUT_ADDR_CONST_EXTRA mn10300_asm_output_addr_const_extra
3381
3382#undef  TARGET_OPTION_OVERRIDE
3383#define TARGET_OPTION_OVERRIDE mn10300_option_override
3384
3385#undef  TARGET_ENCODE_SECTION_INFO
3386#define TARGET_ENCODE_SECTION_INFO mn10300_encode_section_info
3387
3388#undef  TARGET_PROMOTE_PROTOTYPES
3389#define TARGET_PROMOTE_PROTOTYPES hook_bool_const_tree_true
3390#undef  TARGET_RETURN_IN_MEMORY
3391#define TARGET_RETURN_IN_MEMORY mn10300_return_in_memory
3392#undef  TARGET_PASS_BY_REFERENCE
3393#define TARGET_PASS_BY_REFERENCE mn10300_pass_by_reference
3394#undef  TARGET_CALLEE_COPIES
3395#define TARGET_CALLEE_COPIES hook_bool_CUMULATIVE_ARGS_mode_tree_bool_true
3396#undef  TARGET_ARG_PARTIAL_BYTES
3397#define TARGET_ARG_PARTIAL_BYTES mn10300_arg_partial_bytes
3398#undef  TARGET_FUNCTION_ARG
3399#define TARGET_FUNCTION_ARG mn10300_function_arg
3400#undef  TARGET_FUNCTION_ARG_ADVANCE
3401#define TARGET_FUNCTION_ARG_ADVANCE mn10300_function_arg_advance
3402
3403#undef  TARGET_EXPAND_BUILTIN_SAVEREGS
3404#define TARGET_EXPAND_BUILTIN_SAVEREGS mn10300_builtin_saveregs
3405#undef  TARGET_EXPAND_BUILTIN_VA_START
3406#define TARGET_EXPAND_BUILTIN_VA_START mn10300_va_start
3407
3408#undef  TARGET_CASE_VALUES_THRESHOLD
3409#define TARGET_CASE_VALUES_THRESHOLD mn10300_case_values_threshold
3410
3411#undef  TARGET_LEGITIMATE_ADDRESS_P
3412#define TARGET_LEGITIMATE_ADDRESS_P	mn10300_legitimate_address_p
3413#undef  TARGET_DELEGITIMIZE_ADDRESS
3414#define TARGET_DELEGITIMIZE_ADDRESS	mn10300_delegitimize_address
3415#undef  TARGET_LEGITIMATE_CONSTANT_P
3416#define TARGET_LEGITIMATE_CONSTANT_P	mn10300_legitimate_constant_p
3417
3418#undef  TARGET_PREFERRED_RELOAD_CLASS
3419#define TARGET_PREFERRED_RELOAD_CLASS mn10300_preferred_reload_class
3420#undef  TARGET_PREFERRED_OUTPUT_RELOAD_CLASS
3421#define TARGET_PREFERRED_OUTPUT_RELOAD_CLASS \
3422  mn10300_preferred_output_reload_class
3423#undef  TARGET_SECONDARY_RELOAD
3424#define TARGET_SECONDARY_RELOAD  mn10300_secondary_reload
3425
3426#undef  TARGET_TRAMPOLINE_INIT
3427#define TARGET_TRAMPOLINE_INIT mn10300_trampoline_init
3428
3429#undef  TARGET_FUNCTION_VALUE
3430#define TARGET_FUNCTION_VALUE mn10300_function_value
3431#undef  TARGET_LIBCALL_VALUE
3432#define TARGET_LIBCALL_VALUE mn10300_libcall_value
3433
3434#undef  TARGET_ASM_OUTPUT_MI_THUNK
3435#define TARGET_ASM_OUTPUT_MI_THUNK      mn10300_asm_output_mi_thunk
3436#undef  TARGET_ASM_CAN_OUTPUT_MI_THUNK
3437#define TARGET_ASM_CAN_OUTPUT_MI_THUNK  mn10300_can_output_mi_thunk
3438
3439#undef  TARGET_SCHED_ADJUST_COST
3440#define TARGET_SCHED_ADJUST_COST mn10300_adjust_sched_cost
3441
3442#undef  TARGET_CONDITIONAL_REGISTER_USAGE
3443#define TARGET_CONDITIONAL_REGISTER_USAGE mn10300_conditional_register_usage
3444
3445#undef TARGET_MD_ASM_CLOBBERS
3446#define TARGET_MD_ASM_CLOBBERS  mn10300_md_asm_clobbers
3447
3448#undef  TARGET_FLAGS_REGNUM
3449#define TARGET_FLAGS_REGNUM  CC_REG
3450
3451struct gcc_target targetm = TARGET_INITIALIZER;
3452