1/* Subroutines used for MIPS code generation.
2   Copyright (C) 1989, 1990, 1991, 1993, 1994, 1995, 1996, 1997, 1998,
3   1999, 2000, 2001, 2002, 2003, 2004, 2005 Free Software Foundation, Inc.
4   Contributed by A. Lichnewsky, lich@inria.inria.fr.
5   Changes by Michael Meissner, meissner@osf.org.
6   64 bit r4000 support by Ian Lance Taylor, ian@cygnus.com, and
7   Brendan Eich, brendan@microunity.com.
8
9This file is part of GCC.
10
11GCC is free software; you can redistribute it and/or modify
12it under the terms of the GNU General Public License as published by
13the Free Software Foundation; either version 2, or (at your option)
14any later version.
15
16GCC is distributed in the hope that it will be useful,
17but WITHOUT ANY WARRANTY; without even the implied warranty of
18MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
19GNU General Public License for more details.
20
21You should have received a copy of the GNU General Public License
22along with GCC; see the file COPYING.  If not, write to
23the Free Software Foundation, 51 Franklin Street, Fifth Floor,
24Boston, MA 02110-1301, USA.  */
25
26#include "config.h"
27#include "system.h"
28#include "coretypes.h"
29#include "tm.h"
30#include <signal.h>
31#include "rtl.h"
32#include "regs.h"
33#include "hard-reg-set.h"
34#include "real.h"
35#include "insn-config.h"
36#include "conditions.h"
37#include "insn-attr.h"
38#include "recog.h"
39#include "toplev.h"
40#include "output.h"
41#include "tree.h"
42#include "function.h"
43#include "expr.h"
44#include "optabs.h"
45#include "flags.h"
46#include "reload.h"
47#include "tm_p.h"
48#include "ggc.h"
49#include "gstab.h"
50#include "hashtab.h"
51#include "debug.h"
52#include "target.h"
53#include "target-def.h"
54#include "integrate.h"
55#include "langhooks.h"
56#include "cfglayout.h"
57#include "sched-int.h"
58#include "tree-gimple.h"
59#include "bitmap.h"
60
61/* True if X is an unspec wrapper around a SYMBOL_REF or LABEL_REF.  */
62#define UNSPEC_ADDRESS_P(X)					\
63  (GET_CODE (X) == UNSPEC					\
64   && XINT (X, 1) >= UNSPEC_ADDRESS_FIRST			\
65   && XINT (X, 1) < UNSPEC_ADDRESS_FIRST + NUM_SYMBOL_TYPES)
66
67/* Extract the symbol or label from UNSPEC wrapper X.  */
68#define UNSPEC_ADDRESS(X) \
69  XVECEXP (X, 0, 0)
70
71/* Extract the symbol type from UNSPEC wrapper X.  */
72#define UNSPEC_ADDRESS_TYPE(X) \
73  ((enum mips_symbol_type) (XINT (X, 1) - UNSPEC_ADDRESS_FIRST))
74
75/* The maximum distance between the top of the stack frame and the
76   value $sp has when we save & restore registers.
77
78   Use a maximum gap of 0x100 in the mips16 case.  We can then use
79   unextended instructions to save and restore registers, and to
80   allocate and deallocate the top part of the frame.
81
82   The value in the !mips16 case must be a SMALL_OPERAND and must
83   preserve the maximum stack alignment.  */
84#define MIPS_MAX_FIRST_STACK_STEP (TARGET_MIPS16 ? 0x100 : 0x7ff0)
85
86/* True if INSN is a mips.md pattern or asm statement.  */
87#define USEFUL_INSN_P(INSN)						\
88  (INSN_P (INSN)							\
89   && GET_CODE (PATTERN (INSN)) != USE					\
90   && GET_CODE (PATTERN (INSN)) != CLOBBER				\
91   && GET_CODE (PATTERN (INSN)) != ADDR_VEC				\
92   && GET_CODE (PATTERN (INSN)) != ADDR_DIFF_VEC)
93
94/* If INSN is a delayed branch sequence, return the first instruction
95   in the sequence, otherwise return INSN itself.  */
96#define SEQ_BEGIN(INSN)							\
97  (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE		\
98   ? XVECEXP (PATTERN (INSN), 0, 0)					\
99   : (INSN))
100
101/* Likewise for the last instruction in a delayed branch sequence.  */
102#define SEQ_END(INSN)							\
103  (INSN_P (INSN) && GET_CODE (PATTERN (INSN)) == SEQUENCE		\
104   ? XVECEXP (PATTERN (INSN), 0, XVECLEN (PATTERN (INSN), 0) - 1)	\
105   : (INSN))
106
107/* Execute the following loop body with SUBINSN set to each instruction
108   between SEQ_BEGIN (INSN) and SEQ_END (INSN) inclusive.  */
109#define FOR_EACH_SUBINSN(SUBINSN, INSN)					\
110  for ((SUBINSN) = SEQ_BEGIN (INSN);					\
111       (SUBINSN) != NEXT_INSN (SEQ_END (INSN));				\
112       (SUBINSN) = NEXT_INSN (SUBINSN))
113
114/* Classifies an address.
115
116   ADDRESS_REG
117       A natural register + offset address.  The register satisfies
118       mips_valid_base_register_p and the offset is a const_arith_operand.
119
120   ADDRESS_LO_SUM
121       A LO_SUM rtx.  The first operand is a valid base register and
122       the second operand is a symbolic address.
123
124   ADDRESS_CONST_INT
125       A signed 16-bit constant address.
126
127   ADDRESS_SYMBOLIC:
128       A constant symbolic address (equivalent to CONSTANT_SYMBOLIC).  */
129enum mips_address_type {
130  ADDRESS_REG,
131  ADDRESS_LO_SUM,
132  ADDRESS_CONST_INT,
133  ADDRESS_SYMBOLIC
134};
135
136/* Classifies the prototype of a builtin function.  */
137enum mips_function_type
138{
139  MIPS_V2SF_FTYPE_V2SF,
140  MIPS_V2SF_FTYPE_V2SF_V2SF,
141  MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
142  MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,
143  MIPS_V2SF_FTYPE_SF_SF,
144  MIPS_INT_FTYPE_V2SF_V2SF,
145  MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,
146  MIPS_INT_FTYPE_SF_SF,
147  MIPS_INT_FTYPE_DF_DF,
148  MIPS_SF_FTYPE_V2SF,
149  MIPS_SF_FTYPE_SF,
150  MIPS_SF_FTYPE_SF_SF,
151  MIPS_DF_FTYPE_DF,
152  MIPS_DF_FTYPE_DF_DF,
153
154  /* For MIPS DSP ASE  */
155  MIPS_DI_FTYPE_DI_SI,
156  MIPS_DI_FTYPE_DI_SI_SI,
157  MIPS_DI_FTYPE_DI_V2HI_V2HI,
158  MIPS_DI_FTYPE_DI_V4QI_V4QI,
159  MIPS_SI_FTYPE_DI_SI,
160  MIPS_SI_FTYPE_PTR_SI,
161  MIPS_SI_FTYPE_SI,
162  MIPS_SI_FTYPE_SI_SI,
163  MIPS_SI_FTYPE_V2HI,
164  MIPS_SI_FTYPE_V2HI_V2HI,
165  MIPS_SI_FTYPE_V4QI,
166  MIPS_SI_FTYPE_V4QI_V4QI,
167  MIPS_SI_FTYPE_VOID,
168  MIPS_V2HI_FTYPE_SI,
169  MIPS_V2HI_FTYPE_SI_SI,
170  MIPS_V2HI_FTYPE_V2HI,
171  MIPS_V2HI_FTYPE_V2HI_SI,
172  MIPS_V2HI_FTYPE_V2HI_V2HI,
173  MIPS_V2HI_FTYPE_V4QI,
174  MIPS_V2HI_FTYPE_V4QI_V2HI,
175  MIPS_V4QI_FTYPE_SI,
176  MIPS_V4QI_FTYPE_V2HI_V2HI,
177  MIPS_V4QI_FTYPE_V4QI_SI,
178  MIPS_V4QI_FTYPE_V4QI_V4QI,
179  MIPS_VOID_FTYPE_SI_SI,
180  MIPS_VOID_FTYPE_V2HI_V2HI,
181  MIPS_VOID_FTYPE_V4QI_V4QI,
182
183  /* The last type.  */
184  MIPS_MAX_FTYPE_MAX
185};
186
187/* Specifies how a builtin function should be converted into rtl.  */
188enum mips_builtin_type
189{
190  /* The builtin corresponds directly to an .md pattern.  The return
191     value is mapped to operand 0 and the arguments are mapped to
192     operands 1 and above.  */
193  MIPS_BUILTIN_DIRECT,
194
195  /* The builtin corresponds directly to an .md pattern.  There is no return
196     value and the arguments are mapped to operands 0 and above.  */
197  MIPS_BUILTIN_DIRECT_NO_TARGET,
198
199  /* The builtin corresponds to a comparison instruction followed by
200     a mips_cond_move_tf_ps pattern.  The first two arguments are the
201     values to compare and the second two arguments are the vector
202     operands for the movt.ps or movf.ps instruction (in assembly order).  */
203  MIPS_BUILTIN_MOVF,
204  MIPS_BUILTIN_MOVT,
205
206  /* The builtin corresponds to a V2SF comparison instruction.  Operand 0
207     of this instruction is the result of the comparison, which has mode
208     CCV2 or CCV4.  The function arguments are mapped to operands 1 and
209     above.  The function's return value is an SImode boolean that is
210     true under the following conditions:
211
212     MIPS_BUILTIN_CMP_ANY: one of the registers is true
213     MIPS_BUILTIN_CMP_ALL: all of the registers are true
214     MIPS_BUILTIN_CMP_LOWER: the first register is true
215     MIPS_BUILTIN_CMP_UPPER: the second register is true.  */
216  MIPS_BUILTIN_CMP_ANY,
217  MIPS_BUILTIN_CMP_ALL,
218  MIPS_BUILTIN_CMP_UPPER,
219  MIPS_BUILTIN_CMP_LOWER,
220
221  /* As above, but the instruction only sets a single $fcc register.  */
222  MIPS_BUILTIN_CMP_SINGLE,
223
224  /* For generating bposge32 branch instructions in MIPS32 DSP ASE.  */
225  MIPS_BUILTIN_BPOSGE32
226};
227
228/* Invokes MACRO (COND) for each c.cond.fmt condition.  */
229#define MIPS_FP_CONDITIONS(MACRO) \
230  MACRO (f),	\
231  MACRO (un),	\
232  MACRO (eq),	\
233  MACRO (ueq),	\
234  MACRO (olt),	\
235  MACRO (ult),	\
236  MACRO (ole),	\
237  MACRO (ule),	\
238  MACRO (sf),	\
239  MACRO (ngle),	\
240  MACRO (seq),	\
241  MACRO (ngl),	\
242  MACRO (lt),	\
243  MACRO (nge),	\
244  MACRO (le),	\
245  MACRO (ngt)
246
247/* Enumerates the codes above as MIPS_FP_COND_<X>.  */
248#define DECLARE_MIPS_COND(X) MIPS_FP_COND_ ## X
249enum mips_fp_condition {
250  MIPS_FP_CONDITIONS (DECLARE_MIPS_COND)
251};
252
253/* Index X provides the string representation of MIPS_FP_COND_<X>.  */
254#define STRINGIFY(X) #X
255static const char *const mips_fp_conditions[] = {
256  MIPS_FP_CONDITIONS (STRINGIFY)
257};
258
259/* A function to save or store a register.  The first argument is the
260   register and the second is the stack slot.  */
261typedef void (*mips_save_restore_fn) (rtx, rtx);
262
263struct mips16_constant;
264struct mips_arg_info;
265struct mips_address_info;
266struct mips_integer_op;
267struct mips_sim;
268
269static enum mips_symbol_type mips_classify_symbol (rtx);
270static void mips_split_const (rtx, rtx *, HOST_WIDE_INT *);
271static bool mips_offset_within_object_p (rtx, HOST_WIDE_INT);
272static bool mips_valid_base_register_p (rtx, enum machine_mode, int);
273static bool mips_symbolic_address_p (enum mips_symbol_type, enum machine_mode);
274static bool mips_classify_address (struct mips_address_info *, rtx,
275				   enum machine_mode, int);
276static bool mips_cannot_force_const_mem (rtx);
277static bool mips_use_blocks_for_constant_p (enum machine_mode, rtx);
278static int mips_symbol_insns (enum mips_symbol_type);
279static bool mips16_unextended_reference_p (enum machine_mode mode, rtx, rtx);
280static rtx mips_force_temporary (rtx, rtx);
281static rtx mips_unspec_offset_high (rtx, rtx, rtx, enum mips_symbol_type);
282static rtx mips_add_offset (rtx, rtx, HOST_WIDE_INT);
283static unsigned int mips_build_shift (struct mips_integer_op *, HOST_WIDE_INT);
284static unsigned int mips_build_lower (struct mips_integer_op *,
285				      unsigned HOST_WIDE_INT);
286static unsigned int mips_build_integer (struct mips_integer_op *,
287					unsigned HOST_WIDE_INT);
288static void mips_legitimize_const_move (enum machine_mode, rtx, rtx);
289static int m16_check_op (rtx, int, int, int);
290static bool mips_rtx_costs (rtx, int, int, int *);
291static int mips_address_cost (rtx);
292static void mips_emit_compare (enum rtx_code *, rtx *, rtx *, bool);
293static void mips_load_call_address (rtx, rtx, int);
294static bool mips_function_ok_for_sibcall (tree, tree);
295static void mips_block_move_straight (rtx, rtx, HOST_WIDE_INT);
296static void mips_adjust_block_mem (rtx, HOST_WIDE_INT, rtx *, rtx *);
297static void mips_block_move_loop (rtx, rtx, HOST_WIDE_INT);
298static void mips_arg_info (const CUMULATIVE_ARGS *, enum machine_mode,
299			   tree, int, struct mips_arg_info *);
300static bool mips_get_unaligned_mem (rtx *, unsigned int, int, rtx *, rtx *);
301static void mips_set_architecture (const struct mips_cpu_info *);
302static void mips_set_tune (const struct mips_cpu_info *);
303static bool mips_handle_option (size_t, const char *, int);
304static struct machine_function *mips_init_machine_status (void);
305static void print_operand_reloc (FILE *, rtx, const char **);
306#if TARGET_IRIX
307static void irix_output_external_libcall (rtx);
308#endif
309static void mips_file_start (void);
310static void mips_file_end (void);
311static bool mips_rewrite_small_data_p (rtx);
312static int mips_small_data_pattern_1 (rtx *, void *);
313static int mips_rewrite_small_data_1 (rtx *, void *);
314static bool mips_function_has_gp_insn (void);
315static unsigned int mips_global_pointer	(void);
316static bool mips_save_reg_p (unsigned int);
317static void mips_save_restore_reg (enum machine_mode, int, HOST_WIDE_INT,
318				   mips_save_restore_fn);
319static void mips_for_each_saved_reg (HOST_WIDE_INT, mips_save_restore_fn);
320static void mips_output_cplocal (void);
321static void mips_emit_loadgp (void);
322static void mips_output_function_prologue (FILE *, HOST_WIDE_INT);
323static void mips_set_frame_expr (rtx);
324static rtx mips_frame_set (rtx, rtx);
325static void mips_save_reg (rtx, rtx);
326static void mips_output_function_epilogue (FILE *, HOST_WIDE_INT);
327static void mips_restore_reg (rtx, rtx);
328static void mips_output_mi_thunk (FILE *, tree, HOST_WIDE_INT,
329				  HOST_WIDE_INT, tree);
330static int symbolic_expression_p (rtx);
331static section *mips_select_rtx_section (enum machine_mode, rtx,
332					 unsigned HOST_WIDE_INT);
333static section *mips_function_rodata_section (tree);
334static bool mips_in_small_data_p (tree);
335static bool mips_use_anchors_for_symbol_p (rtx);
336static int mips_fpr_return_fields (tree, tree *);
337static bool mips_return_in_msb (tree);
338static rtx mips_return_fpr_pair (enum machine_mode mode,
339				 enum machine_mode mode1, HOST_WIDE_INT,
340				 enum machine_mode mode2, HOST_WIDE_INT);
341static rtx mips16_gp_pseudo_reg (void);
342static void mips16_fp_args (FILE *, int, int);
343static void build_mips16_function_stub (FILE *);
344static rtx dump_constants_1 (enum machine_mode, rtx, rtx);
345static void dump_constants (struct mips16_constant *, rtx);
346static int mips16_insn_length (rtx);
347static int mips16_rewrite_pool_refs (rtx *, void *);
348static void mips16_lay_out_constants (void);
349static void mips_sim_reset (struct mips_sim *);
350static void mips_sim_init (struct mips_sim *, state_t);
351static void mips_sim_next_cycle (struct mips_sim *);
352static void mips_sim_wait_reg (struct mips_sim *, rtx, rtx);
353static int mips_sim_wait_regs_2 (rtx *, void *);
354static void mips_sim_wait_regs_1 (rtx *, void *);
355static void mips_sim_wait_regs (struct mips_sim *, rtx);
356static void mips_sim_wait_units (struct mips_sim *, rtx);
357static void mips_sim_wait_insn (struct mips_sim *, rtx);
358static void mips_sim_record_set (rtx, rtx, void *);
359static void mips_sim_issue_insn (struct mips_sim *, rtx);
360static void mips_sim_issue_nop (struct mips_sim *);
361static void mips_sim_finish_insn (struct mips_sim *, rtx);
362static void vr4130_avoid_branch_rt_conflict (rtx);
363static void vr4130_align_insns (void);
364static void mips_avoid_hazard (rtx, rtx, int *, rtx *, rtx);
365static void mips_avoid_hazards (void);
366static void mips_reorg (void);
367static bool mips_strict_matching_cpu_name_p (const char *, const char *);
368static bool mips_matching_cpu_name_p (const char *, const char *);
369static const struct mips_cpu_info *mips_parse_cpu (const char *);
370static const struct mips_cpu_info *mips_cpu_info_from_isa (int);
371static bool mips_return_in_memory (tree, tree);
372static bool mips_strict_argument_naming (CUMULATIVE_ARGS *);
373static void mips_macc_chains_record (rtx);
374static void mips_macc_chains_reorder (rtx *, int);
375static void vr4130_true_reg_dependence_p_1 (rtx, rtx, void *);
376static bool vr4130_true_reg_dependence_p (rtx);
377static bool vr4130_swap_insns_p (rtx, rtx);
378static void vr4130_reorder (rtx *, int);
379static void mips_promote_ready (rtx *, int, int);
380static int mips_sched_reorder (FILE *, int, rtx *, int *, int);
381static int mips_variable_issue (FILE *, int, rtx, int);
382static int mips_adjust_cost (rtx, rtx, rtx, int);
383static int mips_issue_rate (void);
384static int mips_multipass_dfa_lookahead (void);
385static void mips_init_libfuncs (void);
386static void mips_setup_incoming_varargs (CUMULATIVE_ARGS *, enum machine_mode,
387					 tree, int *, int);
388static tree mips_build_builtin_va_list (void);
389static tree mips_gimplify_va_arg_expr (tree, tree, tree *, tree *);
390static bool mips_pass_by_reference (CUMULATIVE_ARGS *, enum machine_mode mode,
391				    tree, bool);
392static bool mips_callee_copies (CUMULATIVE_ARGS *, enum machine_mode mode,
393				tree, bool);
394static int mips_arg_partial_bytes (CUMULATIVE_ARGS *, enum machine_mode mode,
395				   tree, bool);
396static bool mips_valid_pointer_mode (enum machine_mode);
397static bool mips_vector_mode_supported_p (enum machine_mode);
398static rtx mips_prepare_builtin_arg (enum insn_code, unsigned int, tree *);
399static rtx mips_prepare_builtin_target (enum insn_code, unsigned int, rtx);
400static rtx mips_expand_builtin (tree, rtx, rtx, enum machine_mode, int);
401static void mips_init_builtins (void);
402static rtx mips_expand_builtin_direct (enum insn_code, rtx, tree, bool);
403static rtx mips_expand_builtin_movtf (enum mips_builtin_type,
404				      enum insn_code, enum mips_fp_condition,
405				      rtx, tree);
406static rtx mips_expand_builtin_compare (enum mips_builtin_type,
407					enum insn_code, enum mips_fp_condition,
408					rtx, tree);
409static rtx mips_expand_builtin_bposge (enum mips_builtin_type, rtx);
410static void mips_encode_section_info (tree, rtx, int);
411static void mips_extra_live_on_entry (bitmap);
412static int mips_mode_rep_extended (enum machine_mode, enum machine_mode);
413
414/* Structure to be filled in by compute_frame_size with register
415   save masks, and offsets for the current function.  */
416
417struct mips_frame_info GTY(())
418{
419  HOST_WIDE_INT total_size;	/* # bytes that the entire frame takes up */
420  HOST_WIDE_INT var_size;	/* # bytes that variables take up */
421  HOST_WIDE_INT args_size;	/* # bytes that outgoing arguments take up */
422  HOST_WIDE_INT cprestore_size;	/* # bytes that the .cprestore slot takes up */
423  HOST_WIDE_INT gp_reg_size;	/* # bytes needed to store gp regs */
424  HOST_WIDE_INT fp_reg_size;	/* # bytes needed to store fp regs */
425  unsigned int mask;		/* mask of saved gp registers */
426  unsigned int fmask;		/* mask of saved fp registers */
427  HOST_WIDE_INT gp_save_offset;	/* offset from vfp to store gp registers */
428  HOST_WIDE_INT fp_save_offset;	/* offset from vfp to store fp registers */
429  HOST_WIDE_INT gp_sp_offset;	/* offset from new sp to store gp registers */
430  HOST_WIDE_INT fp_sp_offset;	/* offset from new sp to store fp registers */
431  bool initialized;		/* true if frame size already calculated */
432  int num_gp;			/* number of gp registers saved */
433  int num_fp;			/* number of fp registers saved */
434};
435
436struct machine_function GTY(()) {
437  /* Pseudo-reg holding the value of $28 in a mips16 function which
438     refers to GP relative global variables.  */
439  rtx mips16_gp_pseudo_rtx;
440
441  /* The number of extra stack bytes taken up by register varargs.
442     This area is allocated by the callee at the very top of the frame.  */
443  int varargs_size;
444
445  /* Current frame information, calculated by compute_frame_size.  */
446  struct mips_frame_info frame;
447
448  /* The register to use as the global pointer within this function.  */
449  unsigned int global_pointer;
450
451  /* True if mips_adjust_insn_length should ignore an instruction's
452     hazard attribute.  */
453  bool ignore_hazard_length_p;
454
455  /* True if the whole function is suitable for .set noreorder and
456     .set nomacro.  */
457  bool all_noreorder_p;
458
459  /* True if the function is known to have an instruction that needs $gp.  */
460  bool has_gp_insn_p;
461};
462
463/* Information about a single argument.  */
464struct mips_arg_info
465{
466  /* True if the argument is passed in a floating-point register, or
467     would have been if we hadn't run out of registers.  */
468  bool fpr_p;
469
470  /* The number of words passed in registers, rounded up.  */
471  unsigned int reg_words;
472
473  /* For EABI, the offset of the first register from GP_ARG_FIRST or
474     FP_ARG_FIRST.  For other ABIs, the offset of the first register from
475     the start of the ABI's argument structure (see the CUMULATIVE_ARGS
476     comment for details).
477
478     The value is MAX_ARGS_IN_REGISTERS if the argument is passed entirely
479     on the stack.  */
480  unsigned int reg_offset;
481
482  /* The number of words that must be passed on the stack, rounded up.  */
483  unsigned int stack_words;
484
485  /* The offset from the start of the stack overflow area of the argument's
486     first stack word.  Only meaningful when STACK_WORDS is nonzero.  */
487  unsigned int stack_offset;
488};
489
490
491/* Information about an address described by mips_address_type.
492
493   ADDRESS_CONST_INT
494       No fields are used.
495
496   ADDRESS_REG
497       REG is the base register and OFFSET is the constant offset.
498
499   ADDRESS_LO_SUM
500       REG is the register that contains the high part of the address,
501       OFFSET is the symbolic address being referenced and SYMBOL_TYPE
502       is the type of OFFSET's symbol.
503
504   ADDRESS_SYMBOLIC
505       SYMBOL_TYPE is the type of symbol being referenced.  */
506
507struct mips_address_info
508{
509  enum mips_address_type type;
510  rtx reg;
511  rtx offset;
512  enum mips_symbol_type symbol_type;
513};
514
515
516/* One stage in a constant building sequence.  These sequences have
517   the form:
518
519	A = VALUE[0]
520	A = A CODE[1] VALUE[1]
521	A = A CODE[2] VALUE[2]
522	...
523
524   where A is an accumulator, each CODE[i] is a binary rtl operation
525   and each VALUE[i] is a constant integer.  */
526struct mips_integer_op {
527  enum rtx_code code;
528  unsigned HOST_WIDE_INT value;
529};
530
531
532/* The largest number of operations needed to load an integer constant.
533   The worst accepted case for 64-bit constants is LUI,ORI,SLL,ORI,SLL,ORI.
534   When the lowest bit is clear, we can try, but reject a sequence with
535   an extra SLL at the end.  */
536#define MIPS_MAX_INTEGER_OPS 7
537
538
539/* Global variables for machine-dependent things.  */
540
541/* Threshold for data being put into the small data/bss area, instead
542   of the normal data area.  */
543int mips_section_threshold = -1;
544
545/* Count the number of .file directives, so that .loc is up to date.  */
546int num_source_filenames = 0;
547
548/* Count the number of sdb related labels are generated (to find block
549   start and end boundaries).  */
550int sdb_label_count = 0;
551
552/* Next label # for each statement for Silicon Graphics IRIS systems.  */
553int sym_lineno = 0;
554
555/* Linked list of all externals that are to be emitted when optimizing
556   for the global pointer if they haven't been declared by the end of
557   the program with an appropriate .comm or initialization.  */
558
559struct extern_list GTY (())
560{
561  struct extern_list *next;	/* next external */
562  const char *name;		/* name of the external */
563  int size;			/* size in bytes */
564};
565
566static GTY (()) struct extern_list *extern_head = 0;
567
568/* Name of the file containing the current function.  */
569const char *current_function_file = "";
570
571/* Number of nested .set noreorder, noat, nomacro, and volatile requests.  */
572int set_noreorder;
573int set_noat;
574int set_nomacro;
575int set_volatile;
576
577/* The next branch instruction is a branch likely, not branch normal.  */
578int mips_branch_likely;
579
580/* The operands passed to the last cmpMM expander.  */
581rtx cmp_operands[2];
582
583/* The target cpu for code generation.  */
584enum processor_type mips_arch;
585const struct mips_cpu_info *mips_arch_info;
586
587/* The target cpu for optimization and scheduling.  */
588enum processor_type mips_tune;
589const struct mips_cpu_info *mips_tune_info;
590
591/* Which instruction set architecture to use.  */
592int mips_isa;
593
594/* Which ABI to use.  */
595int mips_abi = MIPS_ABI_DEFAULT;
596
597/* Cost information to use.  */
598const struct mips_rtx_cost_data *mips_cost;
599
600/* Whether we are generating mips16 hard float code.  In mips16 mode
601   we always set TARGET_SOFT_FLOAT; this variable is nonzero if
602   -msoft-float was not specified by the user, which means that we
603   should arrange to call mips32 hard floating point code.  */
604int mips16_hard_float;
605
606/* The architecture selected by -mipsN.  */
607static const struct mips_cpu_info *mips_isa_info;
608
609/* If TRUE, we split addresses into their high and low parts in the RTL.  */
610int mips_split_addresses;
611
612/* Mode used for saving/restoring general purpose registers.  */
613static enum machine_mode gpr_mode;
614
615/* Array giving truth value on whether or not a given hard register
616   can support a given mode.  */
617char mips_hard_regno_mode_ok[(int)MAX_MACHINE_MODE][FIRST_PSEUDO_REGISTER];
618
619/* List of all MIPS punctuation characters used by print_operand.  */
620char mips_print_operand_punct[256];
621
622/* Map GCC register number to debugger register number.  */
623int mips_dbx_regno[FIRST_PSEUDO_REGISTER];
624
625/* A copy of the original flag_delayed_branch: see override_options.  */
626static int mips_flag_delayed_branch;
627
628static GTY (()) int mips_output_filename_first_time = 1;
629
630/* mips_split_p[X] is true if symbols of type X can be split by
631   mips_split_symbol().  */
632bool mips_split_p[NUM_SYMBOL_TYPES];
633
634/* mips_lo_relocs[X] is the relocation to use when a symbol of type X
635   appears in a LO_SUM.  It can be null if such LO_SUMs aren't valid or
636   if they are matched by a special .md file pattern.  */
637static const char *mips_lo_relocs[NUM_SYMBOL_TYPES];
638
639/* Likewise for HIGHs.  */
640static const char *mips_hi_relocs[NUM_SYMBOL_TYPES];
641
642/* Map hard register number to register class */
643const enum reg_class mips_regno_to_class[] =
644{
645  LEA_REGS,	LEA_REGS,	M16_NA_REGS,	V1_REG,
646  M16_REGS,	M16_REGS,	M16_REGS,	M16_REGS,
647  LEA_REGS,	LEA_REGS,	LEA_REGS,	LEA_REGS,
648  LEA_REGS,	LEA_REGS,	LEA_REGS,	LEA_REGS,
649  M16_NA_REGS,	M16_NA_REGS,	LEA_REGS,	LEA_REGS,
650  LEA_REGS,	LEA_REGS,	LEA_REGS,	LEA_REGS,
651  T_REG,	PIC_FN_ADDR_REG, LEA_REGS,	LEA_REGS,
652  LEA_REGS,	LEA_REGS,	LEA_REGS,	LEA_REGS,
653  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
654  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
655  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
656  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
657  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
658  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
659  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
660  FP_REGS,	FP_REGS,	FP_REGS,	FP_REGS,
661  HI_REG,	LO_REG,		NO_REGS,	ST_REGS,
662  ST_REGS,	ST_REGS,	ST_REGS,	ST_REGS,
663  ST_REGS,	ST_REGS,	ST_REGS,	NO_REGS,
664  NO_REGS,	ALL_REGS,	ALL_REGS,	NO_REGS,
665  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
666  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
667  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
668  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
669  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
670  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
671  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
672  COP0_REGS,	COP0_REGS,	COP0_REGS,	COP0_REGS,
673  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
674  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
675  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
676  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
677  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
678  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
679  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
680  COP2_REGS,	COP2_REGS,	COP2_REGS,	COP2_REGS,
681  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
682  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
683  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
684  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
685  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
686  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
687  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
688  COP3_REGS,	COP3_REGS,	COP3_REGS,	COP3_REGS,
689  DSP_ACC_REGS,	DSP_ACC_REGS,	DSP_ACC_REGS,	DSP_ACC_REGS,
690  DSP_ACC_REGS,	DSP_ACC_REGS,	ALL_REGS,	ALL_REGS,
691  ALL_REGS,	ALL_REGS,	ALL_REGS,	ALL_REGS
692};
693
694/* Table of machine dependent attributes.  */
695const struct attribute_spec mips_attribute_table[] =
696{
697  { "long_call",   0, 0, false, true,  true,  NULL },
698  { NULL,	   0, 0, false, false, false, NULL }
699};
700
701/* A table describing all the processors gcc knows about.  Names are
702   matched in the order listed.  The first mention of an ISA level is
703   taken as the canonical name for that ISA.
704
705   To ease comparison, please keep this table in the same order as
706   gas's mips_cpu_info_table[].  */
707const struct mips_cpu_info mips_cpu_info_table[] = {
708  /* Entries for generic ISAs */
709  { "mips1", PROCESSOR_R3000, 1 },
710  { "mips2", PROCESSOR_R6000, 2 },
711  { "mips3", PROCESSOR_R4000, 3 },
712  { "mips4", PROCESSOR_R8000, 4 },
713  { "mips32", PROCESSOR_4KC, 32 },
714  { "mips32r2", PROCESSOR_M4K, 33 },
715  { "mips64", PROCESSOR_5KC, 64 },
716  { "mips64r2", PROCESSOR_5KC, 65 },
717
718  /* MIPS I */
719  { "r3000", PROCESSOR_R3000, 1 },
720  { "r2000", PROCESSOR_R3000, 1 }, /* = r3000 */
721  { "r3900", PROCESSOR_R3900, 1 },
722
723  /* MIPS II */
724  { "r6000", PROCESSOR_R6000, 2 },
725
726  /* MIPS III */
727  { "r4000", PROCESSOR_R4000, 3 },
728  { "vr4100", PROCESSOR_R4100, 3 },
729  { "vr4111", PROCESSOR_R4111, 3 },
730  { "vr4120", PROCESSOR_R4120, 3 },
731  { "vr4130", PROCESSOR_R4130, 3 },
732  { "vr4300", PROCESSOR_R4300, 3 },
733  { "r4400", PROCESSOR_R4000, 3 }, /* = r4000 */
734  { "r4600", PROCESSOR_R4600, 3 },
735  { "orion", PROCESSOR_R4600, 3 }, /* = r4600 */
736  { "r4650", PROCESSOR_R4650, 3 },
737
738  /* MIPS IV */
739  { "r8000", PROCESSOR_R8000, 4 },
740  { "vr5000", PROCESSOR_R5000, 4 },
741  { "vr5400", PROCESSOR_R5400, 4 },
742  { "vr5500", PROCESSOR_R5500, 4 },
743  { "rm7000", PROCESSOR_R7000, 4 },
744  { "rm9000", PROCESSOR_R9000, 4 },
745
746  /* MIPS32 */
747  { "4kc", PROCESSOR_4KC, 32 },
748  { "4km", PROCESSOR_4KC, 32 }, /* = 4kc */
749  { "4kp", PROCESSOR_4KP, 32 },
750
751  /* MIPS32 Release 2 */
752  { "m4k", PROCESSOR_M4K, 33 },
753  { "24k", PROCESSOR_24K, 33 },
754  { "24kc", PROCESSOR_24K, 33 },  /* 24K  no FPU */
755  { "24kf", PROCESSOR_24K, 33 },  /* 24K 1:2 FPU */
756  { "24kx", PROCESSOR_24KX, 33 }, /* 24K 1:1 FPU */
757
758  /* MIPS64 */
759  { "5kc", PROCESSOR_5KC, 64 },
760  { "5kf", PROCESSOR_5KF, 64 },
761  { "20kc", PROCESSOR_20KC, 64 },
762  { "sb1", PROCESSOR_SB1, 64 },
763  { "sb1a", PROCESSOR_SB1A, 64 },
764  { "sr71000", PROCESSOR_SR71000, 64 },
765
766  /* MIPS64R2 */
767  { "octeon", PROCESSOR_OCTEON, 65 },
768  { "octeon+", PROCESSOR_OCTEON, 65 },
769
770  /* End marker */
771  { 0, 0, 0 }
772};
773
774/* Default costs. If these are used for a processor we should look
775   up the actual costs.  */
776#define DEFAULT_COSTS COSTS_N_INSNS (6),  /* fp_add */       \
777                      COSTS_N_INSNS (7),  /* fp_mult_sf */   \
778                      COSTS_N_INSNS (8),  /* fp_mult_df */   \
779                      COSTS_N_INSNS (23), /* fp_div_sf */    \
780                      COSTS_N_INSNS (36), /* fp_div_df */    \
781                      COSTS_N_INSNS (10), /* int_mult_si */  \
782                      COSTS_N_INSNS (10), /* int_mult_di */  \
783                      COSTS_N_INSNS (69), /* int_div_si */   \
784                      COSTS_N_INSNS (69), /* int_div_di */   \
785                                       2, /* branch_cost */  \
786                                       4  /* memory_latency */
787
788/* Need to replace these with the costs of calling the appropriate
789   libgcc routine.  */
790#define SOFT_FP_COSTS COSTS_N_INSNS (256), /* fp_add */       \
791                      COSTS_N_INSNS (256), /* fp_mult_sf */   \
792                      COSTS_N_INSNS (256), /* fp_mult_df */   \
793                      COSTS_N_INSNS (256), /* fp_div_sf */    \
794                      COSTS_N_INSNS (256)  /* fp_div_df */
795
796static struct mips_rtx_cost_data const mips_rtx_cost_data[PROCESSOR_MAX] =
797  {
798    { /* R3000 */
799      COSTS_N_INSNS (2),            /* fp_add */
800      COSTS_N_INSNS (4),            /* fp_mult_sf */
801      COSTS_N_INSNS (5),            /* fp_mult_df */
802      COSTS_N_INSNS (12),           /* fp_div_sf */
803      COSTS_N_INSNS (19),           /* fp_div_df */
804      COSTS_N_INSNS (12),           /* int_mult_si */
805      COSTS_N_INSNS (12),           /* int_mult_di */
806      COSTS_N_INSNS (35),           /* int_div_si */
807      COSTS_N_INSNS (35),           /* int_div_di */
808                       1,           /* branch_cost */
809                       4            /* memory_latency */
810
811    },
812    { /* 4KC */
813      SOFT_FP_COSTS,
814      COSTS_N_INSNS (6),            /* int_mult_si */
815      COSTS_N_INSNS (6),            /* int_mult_di */
816      COSTS_N_INSNS (36),           /* int_div_si */
817      COSTS_N_INSNS (36),           /* int_div_di */
818                       1,           /* branch_cost */
819                       4            /* memory_latency */
820    },
821    { /* 4KP */
822      SOFT_FP_COSTS,
823      COSTS_N_INSNS (36),           /* int_mult_si */
824      COSTS_N_INSNS (36),           /* int_mult_di */
825      COSTS_N_INSNS (37),           /* int_div_si */
826      COSTS_N_INSNS (37),           /* int_div_di */
827                       1,           /* branch_cost */
828                       4            /* memory_latency */
829    },
830    { /* 5KC */
831      SOFT_FP_COSTS,
832      COSTS_N_INSNS (4),            /* int_mult_si */
833      COSTS_N_INSNS (11),           /* int_mult_di */
834      COSTS_N_INSNS (36),           /* int_div_si */
835      COSTS_N_INSNS (68),           /* int_div_di */
836                       1,           /* branch_cost */
837                       4            /* memory_latency */
838    },
839    { /* 5KF */
840      COSTS_N_INSNS (4),            /* fp_add */
841      COSTS_N_INSNS (4),            /* fp_mult_sf */
842      COSTS_N_INSNS (5),            /* fp_mult_df */
843      COSTS_N_INSNS (17),           /* fp_div_sf */
844      COSTS_N_INSNS (32),           /* fp_div_df */
845      COSTS_N_INSNS (4),            /* int_mult_si */
846      COSTS_N_INSNS (11),           /* int_mult_di */
847      COSTS_N_INSNS (36),           /* int_div_si */
848      COSTS_N_INSNS (68),           /* int_div_di */
849                       1,           /* branch_cost */
850                       4            /* memory_latency */
851    },
852    { /* 20KC */
853      DEFAULT_COSTS
854    },
855    { /* 24k */
856      COSTS_N_INSNS (8),            /* fp_add */
857      COSTS_N_INSNS (8),            /* fp_mult_sf */
858      COSTS_N_INSNS (10),           /* fp_mult_df */
859      COSTS_N_INSNS (34),           /* fp_div_sf */
860      COSTS_N_INSNS (64),           /* fp_div_df */
861      COSTS_N_INSNS (5),            /* int_mult_si */
862      COSTS_N_INSNS (5),            /* int_mult_di */
863      COSTS_N_INSNS (41),           /* int_div_si */
864      COSTS_N_INSNS (41),           /* int_div_di */
865                       1,           /* branch_cost */
866                       4            /* memory_latency */
867    },
868    { /* 24kx */
869      COSTS_N_INSNS (4),            /* fp_add */
870      COSTS_N_INSNS (4),            /* fp_mult_sf */
871      COSTS_N_INSNS (5),            /* fp_mult_df */
872      COSTS_N_INSNS (17),           /* fp_div_sf */
873      COSTS_N_INSNS (32),           /* fp_div_df */
874      COSTS_N_INSNS (5),            /* int_mult_si */
875      COSTS_N_INSNS (5),            /* int_mult_di */
876      COSTS_N_INSNS (41),           /* int_div_si */
877      COSTS_N_INSNS (41),           /* int_div_di */
878                       1,           /* branch_cost */
879                       4            /* memory_latency */
880    },
881    { /* M4k */
882      DEFAULT_COSTS
883    },
884    { /* R3900 */
885      COSTS_N_INSNS (2),            /* fp_add */
886      COSTS_N_INSNS (4),            /* fp_mult_sf */
887      COSTS_N_INSNS (5),            /* fp_mult_df */
888      COSTS_N_INSNS (12),           /* fp_div_sf */
889      COSTS_N_INSNS (19),           /* fp_div_df */
890      COSTS_N_INSNS (2),            /* int_mult_si */
891      COSTS_N_INSNS (2),            /* int_mult_di */
892      COSTS_N_INSNS (35),           /* int_div_si */
893      COSTS_N_INSNS (35),           /* int_div_di */
894                       1,           /* branch_cost */
895                       4            /* memory_latency */
896    },
897    { /* R6000 */
898      COSTS_N_INSNS (3),            /* fp_add */
899      COSTS_N_INSNS (5),            /* fp_mult_sf */
900      COSTS_N_INSNS (6),            /* fp_mult_df */
901      COSTS_N_INSNS (15),           /* fp_div_sf */
902      COSTS_N_INSNS (16),           /* fp_div_df */
903      COSTS_N_INSNS (17),           /* int_mult_si */
904      COSTS_N_INSNS (17),           /* int_mult_di */
905      COSTS_N_INSNS (38),           /* int_div_si */
906      COSTS_N_INSNS (38),           /* int_div_di */
907                       2,           /* branch_cost */
908                       6            /* memory_latency */
909    },
910    { /* R4000 */
911       COSTS_N_INSNS (6),           /* fp_add */
912       COSTS_N_INSNS (7),           /* fp_mult_sf */
913       COSTS_N_INSNS (8),           /* fp_mult_df */
914       COSTS_N_INSNS (23),          /* fp_div_sf */
915       COSTS_N_INSNS (36),          /* fp_div_df */
916       COSTS_N_INSNS (10),          /* int_mult_si */
917       COSTS_N_INSNS (10),          /* int_mult_di */
918       COSTS_N_INSNS (69),          /* int_div_si */
919       COSTS_N_INSNS (69),          /* int_div_di */
920                        2,          /* branch_cost */
921                        6           /* memory_latency */
922    },
923    { /* R4100 */
924      DEFAULT_COSTS
925    },
926    { /* R4111 */
927      DEFAULT_COSTS
928    },
929    { /* R4120 */
930      DEFAULT_COSTS
931    },
932    { /* R4130 */
933      /* The only costs that appear to be updated here are
934	 integer multiplication.  */
935      SOFT_FP_COSTS,
936      COSTS_N_INSNS (4),            /* int_mult_si */
937      COSTS_N_INSNS (6),            /* int_mult_di */
938      COSTS_N_INSNS (69),           /* int_div_si */
939      COSTS_N_INSNS (69),           /* int_div_di */
940                       1,           /* branch_cost */
941                       4            /* memory_latency */
942    },
943    { /* R4300 */
944      DEFAULT_COSTS
945    },
946    { /* R4600 */
947      DEFAULT_COSTS
948    },
949    { /* R4650 */
950      DEFAULT_COSTS
951    },
952    { /* R5000 */
953      COSTS_N_INSNS (6),            /* fp_add */
954      COSTS_N_INSNS (4),            /* fp_mult_sf */
955      COSTS_N_INSNS (5),            /* fp_mult_df */
956      COSTS_N_INSNS (23),           /* fp_div_sf */
957      COSTS_N_INSNS (36),           /* fp_div_df */
958      COSTS_N_INSNS (5),            /* int_mult_si */
959      COSTS_N_INSNS (5),            /* int_mult_di */
960      COSTS_N_INSNS (36),           /* int_div_si */
961      COSTS_N_INSNS (36),           /* int_div_di */
962                       1,           /* branch_cost */
963                       4            /* memory_latency */
964    },
965    { /* R5400 */
966      COSTS_N_INSNS (6),            /* fp_add */
967      COSTS_N_INSNS (5),            /* fp_mult_sf */
968      COSTS_N_INSNS (6),            /* fp_mult_df */
969      COSTS_N_INSNS (30),           /* fp_div_sf */
970      COSTS_N_INSNS (59),           /* fp_div_df */
971      COSTS_N_INSNS (3),            /* int_mult_si */
972      COSTS_N_INSNS (4),            /* int_mult_di */
973      COSTS_N_INSNS (42),           /* int_div_si */
974      COSTS_N_INSNS (74),           /* int_div_di */
975                       1,           /* branch_cost */
976                       4            /* memory_latency */
977    },
978    { /* R5500 */
979      COSTS_N_INSNS (6),            /* fp_add */
980      COSTS_N_INSNS (5),            /* fp_mult_sf */
981      COSTS_N_INSNS (6),            /* fp_mult_df */
982      COSTS_N_INSNS (30),           /* fp_div_sf */
983      COSTS_N_INSNS (59),           /* fp_div_df */
984      COSTS_N_INSNS (5),            /* int_mult_si */
985      COSTS_N_INSNS (9),            /* int_mult_di */
986      COSTS_N_INSNS (42),           /* int_div_si */
987      COSTS_N_INSNS (74),           /* int_div_di */
988                       1,           /* branch_cost */
989                       4            /* memory_latency */
990    },
991    { /* R7000 */
992      /* The only costs that are changed here are
993	 integer multiplication.  */
994      COSTS_N_INSNS (6),            /* fp_add */
995      COSTS_N_INSNS (7),            /* fp_mult_sf */
996      COSTS_N_INSNS (8),            /* fp_mult_df */
997      COSTS_N_INSNS (23),           /* fp_div_sf */
998      COSTS_N_INSNS (36),           /* fp_div_df */
999      COSTS_N_INSNS (5),            /* int_mult_si */
1000      COSTS_N_INSNS (9),            /* int_mult_di */
1001      COSTS_N_INSNS (69),           /* int_div_si */
1002      COSTS_N_INSNS (69),           /* int_div_di */
1003                       1,           /* branch_cost */
1004                       4            /* memory_latency */
1005    },
1006    { /* R8000 */
1007      DEFAULT_COSTS
1008    },
1009    { /* R9000 */
1010      /* The only costs that are changed here are
1011	 integer multiplication.  */
1012      COSTS_N_INSNS (6),            /* fp_add */
1013      COSTS_N_INSNS (7),            /* fp_mult_sf */
1014      COSTS_N_INSNS (8),            /* fp_mult_df */
1015      COSTS_N_INSNS (23),           /* fp_div_sf */
1016      COSTS_N_INSNS (36),           /* fp_div_df */
1017      COSTS_N_INSNS (3),            /* int_mult_si */
1018      COSTS_N_INSNS (8),            /* int_mult_di */
1019      COSTS_N_INSNS (69),           /* int_div_si */
1020      COSTS_N_INSNS (69),           /* int_div_di */
1021                       1,           /* branch_cost */
1022                       4            /* memory_latency */
1023    },
1024    { /* SB1 */
1025      /* These costs are the same as the SB-1A below.  */
1026      COSTS_N_INSNS (4),            /* fp_add */
1027      COSTS_N_INSNS (4),            /* fp_mult_sf */
1028      COSTS_N_INSNS (4),            /* fp_mult_df */
1029      COSTS_N_INSNS (24),           /* fp_div_sf */
1030      COSTS_N_INSNS (32),           /* fp_div_df */
1031      COSTS_N_INSNS (3),            /* int_mult_si */
1032      COSTS_N_INSNS (4),            /* int_mult_di */
1033      COSTS_N_INSNS (36),           /* int_div_si */
1034      COSTS_N_INSNS (68),           /* int_div_di */
1035                       1,           /* branch_cost */
1036                       4            /* memory_latency */
1037    },
1038    { /* SB1-A */
1039      /* These costs are the same as the SB-1 above.  */
1040      COSTS_N_INSNS (4),            /* fp_add */
1041      COSTS_N_INSNS (4),            /* fp_mult_sf */
1042      COSTS_N_INSNS (4),            /* fp_mult_df */
1043      COSTS_N_INSNS (24),           /* fp_div_sf */
1044      COSTS_N_INSNS (32),           /* fp_div_df */
1045      COSTS_N_INSNS (3),            /* int_mult_si */
1046      COSTS_N_INSNS (4),            /* int_mult_di */
1047      COSTS_N_INSNS (36),           /* int_div_si */
1048      COSTS_N_INSNS (68),           /* int_div_di */
1049                       1,           /* branch_cost */
1050                       4            /* memory_latency */
1051    },
1052    { /* SR71000 */
1053      DEFAULT_COSTS
1054    },
1055  };
1056
1057
1058/* Nonzero if -march should decide the default value of MASK_SOFT_FLOAT.  */
1059#ifndef MIPS_MARCH_CONTROLS_SOFT_FLOAT
1060#define MIPS_MARCH_CONTROLS_SOFT_FLOAT 0
1061#endif
1062
1063/* Initialize the GCC target structure.  */
1064#undef TARGET_ASM_ALIGNED_HI_OP
1065#define TARGET_ASM_ALIGNED_HI_OP "\t.half\t"
1066#undef TARGET_ASM_ALIGNED_SI_OP
1067#define TARGET_ASM_ALIGNED_SI_OP "\t.word\t"
1068#undef TARGET_ASM_ALIGNED_DI_OP
1069#define TARGET_ASM_ALIGNED_DI_OP "\t.dword\t"
1070
1071#undef TARGET_ASM_FUNCTION_PROLOGUE
1072#define TARGET_ASM_FUNCTION_PROLOGUE mips_output_function_prologue
1073#undef TARGET_ASM_FUNCTION_EPILOGUE
1074#define TARGET_ASM_FUNCTION_EPILOGUE mips_output_function_epilogue
1075#undef TARGET_ASM_SELECT_RTX_SECTION
1076#define TARGET_ASM_SELECT_RTX_SECTION mips_select_rtx_section
1077#undef TARGET_ASM_FUNCTION_RODATA_SECTION
1078#define TARGET_ASM_FUNCTION_RODATA_SECTION mips_function_rodata_section
1079
1080#undef TARGET_SCHED_REORDER
1081#define TARGET_SCHED_REORDER mips_sched_reorder
1082#undef TARGET_SCHED_VARIABLE_ISSUE
1083#define TARGET_SCHED_VARIABLE_ISSUE mips_variable_issue
1084#undef TARGET_SCHED_ADJUST_COST
1085#define TARGET_SCHED_ADJUST_COST mips_adjust_cost
1086#undef TARGET_SCHED_ISSUE_RATE
1087#define TARGET_SCHED_ISSUE_RATE mips_issue_rate
1088#undef TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD
1089#define TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD \
1090  mips_multipass_dfa_lookahead
1091
1092#undef TARGET_DEFAULT_TARGET_FLAGS
1093#define TARGET_DEFAULT_TARGET_FLAGS		\
1094  (TARGET_DEFAULT				\
1095   | TARGET_CPU_DEFAULT				\
1096   | TARGET_ENDIAN_DEFAULT			\
1097   | TARGET_FP_EXCEPTIONS_DEFAULT		\
1098   | MASK_CHECK_ZERO_DIV			\
1099   | MASK_FUSED_MADD)
1100#undef TARGET_HANDLE_OPTION
1101#define TARGET_HANDLE_OPTION mips_handle_option
1102
1103#undef TARGET_FUNCTION_OK_FOR_SIBCALL
1104#define TARGET_FUNCTION_OK_FOR_SIBCALL mips_function_ok_for_sibcall
1105
1106#undef TARGET_VALID_POINTER_MODE
1107#define TARGET_VALID_POINTER_MODE mips_valid_pointer_mode
1108#undef TARGET_RTX_COSTS
1109#define TARGET_RTX_COSTS mips_rtx_costs
1110#undef TARGET_ADDRESS_COST
1111#define TARGET_ADDRESS_COST mips_address_cost
1112
1113#undef TARGET_IN_SMALL_DATA_P
1114#define TARGET_IN_SMALL_DATA_P mips_in_small_data_p
1115
1116#undef TARGET_MACHINE_DEPENDENT_REORG
1117#define TARGET_MACHINE_DEPENDENT_REORG mips_reorg
1118
1119#undef TARGET_ASM_FILE_START
1120#undef TARGET_ASM_FILE_END
1121#define TARGET_ASM_FILE_START mips_file_start
1122#define TARGET_ASM_FILE_END mips_file_end
1123#undef TARGET_ASM_FILE_START_FILE_DIRECTIVE
1124#define TARGET_ASM_FILE_START_FILE_DIRECTIVE true
1125
1126#undef TARGET_INIT_LIBFUNCS
1127#define TARGET_INIT_LIBFUNCS mips_init_libfuncs
1128
1129#undef TARGET_BUILD_BUILTIN_VA_LIST
1130#define TARGET_BUILD_BUILTIN_VA_LIST mips_build_builtin_va_list
1131#undef TARGET_GIMPLIFY_VA_ARG_EXPR
1132#define TARGET_GIMPLIFY_VA_ARG_EXPR mips_gimplify_va_arg_expr
1133
1134#undef TARGET_PROMOTE_FUNCTION_ARGS
1135#define TARGET_PROMOTE_FUNCTION_ARGS hook_bool_tree_true
1136#undef TARGET_PROMOTE_FUNCTION_RETURN
1137#define TARGET_PROMOTE_FUNCTION_RETURN hook_bool_tree_true
1138#undef TARGET_PROMOTE_PROTOTYPES
1139#define TARGET_PROMOTE_PROTOTYPES hook_bool_tree_true
1140
1141#undef TARGET_RETURN_IN_MEMORY
1142#define TARGET_RETURN_IN_MEMORY mips_return_in_memory
1143#undef TARGET_RETURN_IN_MSB
1144#define TARGET_RETURN_IN_MSB mips_return_in_msb
1145
1146#undef TARGET_ASM_OUTPUT_MI_THUNK
1147#define TARGET_ASM_OUTPUT_MI_THUNK mips_output_mi_thunk
1148#undef TARGET_ASM_CAN_OUTPUT_MI_THUNK
1149#define TARGET_ASM_CAN_OUTPUT_MI_THUNK hook_bool_tree_hwi_hwi_tree_true
1150
1151#undef TARGET_SETUP_INCOMING_VARARGS
1152#define TARGET_SETUP_INCOMING_VARARGS mips_setup_incoming_varargs
1153#undef TARGET_STRICT_ARGUMENT_NAMING
1154#define TARGET_STRICT_ARGUMENT_NAMING mips_strict_argument_naming
1155#undef TARGET_MUST_PASS_IN_STACK
1156#define TARGET_MUST_PASS_IN_STACK must_pass_in_stack_var_size
1157#undef TARGET_PASS_BY_REFERENCE
1158#define TARGET_PASS_BY_REFERENCE mips_pass_by_reference
1159#undef TARGET_CALLEE_COPIES
1160#define TARGET_CALLEE_COPIES mips_callee_copies
1161#undef TARGET_ARG_PARTIAL_BYTES
1162#define TARGET_ARG_PARTIAL_BYTES mips_arg_partial_bytes
1163
1164#undef TARGET_MODE_REP_EXTENDED
1165#define TARGET_MODE_REP_EXTENDED mips_mode_rep_extended
1166
1167#undef TARGET_VECTOR_MODE_SUPPORTED_P
1168#define TARGET_VECTOR_MODE_SUPPORTED_P mips_vector_mode_supported_p
1169
1170#undef TARGET_INIT_BUILTINS
1171#define TARGET_INIT_BUILTINS mips_init_builtins
1172#undef TARGET_EXPAND_BUILTIN
1173#define TARGET_EXPAND_BUILTIN mips_expand_builtin
1174
1175#undef TARGET_HAVE_TLS
1176#define TARGET_HAVE_TLS HAVE_AS_TLS
1177
1178#undef TARGET_CANNOT_FORCE_CONST_MEM
1179#define TARGET_CANNOT_FORCE_CONST_MEM mips_cannot_force_const_mem
1180
1181#undef TARGET_ENCODE_SECTION_INFO
1182#define TARGET_ENCODE_SECTION_INFO mips_encode_section_info
1183
1184#undef TARGET_ATTRIBUTE_TABLE
1185#define TARGET_ATTRIBUTE_TABLE mips_attribute_table
1186
1187#undef TARGET_EXTRA_LIVE_ON_ENTRY
1188#define TARGET_EXTRA_LIVE_ON_ENTRY mips_extra_live_on_entry
1189
1190#undef TARGET_MIN_ANCHOR_OFFSET
1191#define TARGET_MIN_ANCHOR_OFFSET -32768
1192#undef TARGET_MAX_ANCHOR_OFFSET
1193#define TARGET_MAX_ANCHOR_OFFSET 32767
1194#undef TARGET_USE_BLOCKS_FOR_CONSTANT_P
1195#define TARGET_USE_BLOCKS_FOR_CONSTANT_P mips_use_blocks_for_constant_p
1196#undef TARGET_USE_ANCHORS_FOR_SYMBOL_P
1197#define TARGET_USE_ANCHORS_FOR_SYMBOL_P mips_use_anchors_for_symbol_p
1198
1199struct gcc_target targetm = TARGET_INITIALIZER;
1200
1201/* Classify symbol X, which must be a SYMBOL_REF or a LABEL_REF.  */
1202
1203static enum mips_symbol_type
1204mips_classify_symbol (rtx x)
1205{
1206  if (GET_CODE (x) == LABEL_REF)
1207    {
1208      if (TARGET_MIPS16)
1209	return SYMBOL_CONSTANT_POOL;
1210      if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
1211	return SYMBOL_GOT_LOCAL;
1212      return SYMBOL_GENERAL;
1213    }
1214
1215  gcc_assert (GET_CODE (x) == SYMBOL_REF);
1216
1217  if (SYMBOL_REF_TLS_MODEL (x))
1218    return SYMBOL_TLS;
1219
1220  if (CONSTANT_POOL_ADDRESS_P (x))
1221    {
1222      if (TARGET_MIPS16)
1223	return SYMBOL_CONSTANT_POOL;
1224
1225      if (GET_MODE_SIZE (get_pool_mode (x)) <= mips_section_threshold)
1226	return SYMBOL_SMALL_DATA;
1227    }
1228
1229  /* Do not use small-data accesses for weak symbols; they may end up
1230     being zero.  */
1231  if (SYMBOL_REF_SMALL_P (x)
1232      && !SYMBOL_REF_WEAK (x))
1233    return SYMBOL_SMALL_DATA;
1234
1235  if (TARGET_ABICALLS)
1236    {
1237      if (SYMBOL_REF_DECL (x) == 0)
1238	{
1239	  if (!SYMBOL_REF_LOCAL_P (x))
1240	    return SYMBOL_GOT_GLOBAL;
1241	}
1242      else
1243	{
1244	  /* Don't use GOT accesses for locally-binding symbols if
1245	     TARGET_ABSOLUTE_ABICALLS.  Otherwise, there are three
1246	     cases to consider:
1247
1248		- o32 PIC (either with or without explicit relocs)
1249		- n32/n64 PIC without explicit relocs
1250		- n32/n64 PIC with explicit relocs
1251
1252	     In the first case, both local and global accesses will use an
1253	     R_MIPS_GOT16 relocation.  We must correctly predict which of
1254	     the two semantics (local or global) the assembler and linker
1255	     will apply.  The choice doesn't depend on the symbol's
1256	     visibility, so we deliberately ignore decl_visibility and
1257	     binds_local_p here.
1258
1259	     In the second case, the assembler will not use R_MIPS_GOT16
1260	     relocations, but it chooses between local and global accesses
1261	     in the same way as for o32 PIC.
1262
1263	     In the third case we have more freedom since both forms of
1264	     access will work for any kind of symbol.  However, there seems
1265	     little point in doing things differently.  */
1266	  if (DECL_P (SYMBOL_REF_DECL (x))
1267	      && TREE_PUBLIC (SYMBOL_REF_DECL (x))
1268	      && !(TARGET_ABSOLUTE_ABICALLS
1269		   && targetm.binds_local_p (SYMBOL_REF_DECL (x))))
1270	    return SYMBOL_GOT_GLOBAL;
1271	}
1272
1273      if (!TARGET_ABSOLUTE_ABICALLS)
1274	return SYMBOL_GOT_LOCAL;
1275    }
1276
1277  return SYMBOL_GENERAL;
1278}
1279
1280
1281/* Split X into a base and a constant offset, storing them in *BASE
1282   and *OFFSET respectively.  */
1283
1284static void
1285mips_split_const (rtx x, rtx *base, HOST_WIDE_INT *offset)
1286{
1287  *offset = 0;
1288
1289  if (GET_CODE (x) == CONST)
1290    {
1291      x = XEXP (x, 0);
1292      if (GET_CODE (x) == PLUS && GET_CODE (XEXP (x, 1)) == CONST_INT)
1293	{
1294	  *offset += INTVAL (XEXP (x, 1));
1295	  x = XEXP (x, 0);
1296	}
1297    }
1298  *base = x;
1299}
1300
1301/* Classify symbolic expression X, given that it appears in context
1302   CONTEXT.  */
1303
1304static enum mips_symbol_type
1305mips_classify_symbolic_expression (rtx x)
1306{
1307  HOST_WIDE_INT offset;
1308
1309  mips_split_const (x, &x, &offset);
1310  if (UNSPEC_ADDRESS_P (x))
1311    return UNSPEC_ADDRESS_TYPE (x);
1312
1313  return mips_classify_symbol (x);
1314}
1315
1316/* Return true if SYMBOL is a SYMBOL_REF and OFFSET + SYMBOL points
1317   to the same object as SYMBOL, or to the same object_block.  */
1318
1319static bool
1320mips_offset_within_object_p (rtx symbol, HOST_WIDE_INT offset)
1321{
1322  if (GET_CODE (symbol) != SYMBOL_REF)
1323    return false;
1324
1325  if (CONSTANT_POOL_ADDRESS_P (symbol)
1326      && offset >= 0
1327      && offset < (int) GET_MODE_SIZE (get_pool_mode (symbol)))
1328    return true;
1329
1330  if (SYMBOL_REF_DECL (symbol) != 0
1331      && offset >= 0
1332      && offset < int_size_in_bytes (TREE_TYPE (SYMBOL_REF_DECL (symbol))))
1333    return true;
1334
1335  if (SYMBOL_REF_HAS_BLOCK_INFO_P (symbol)
1336      && SYMBOL_REF_BLOCK (symbol)
1337      && SYMBOL_REF_BLOCK_OFFSET (symbol) >= 0
1338      && ((unsigned HOST_WIDE_INT) offset + SYMBOL_REF_BLOCK_OFFSET (symbol)
1339	  < (unsigned HOST_WIDE_INT) SYMBOL_REF_BLOCK (symbol)->size))
1340    return true;
1341
1342  return false;
1343}
1344
1345
1346/* Return true if X is a symbolic constant that can be calculated in
1347   the same way as a bare symbol.  If it is, store the type of the
1348   symbol in *SYMBOL_TYPE.  */
1349
1350bool
1351mips_symbolic_constant_p (rtx x, enum mips_symbol_type *symbol_type)
1352{
1353  HOST_WIDE_INT offset;
1354
1355  mips_split_const (x, &x, &offset);
1356  if (UNSPEC_ADDRESS_P (x))
1357    *symbol_type = UNSPEC_ADDRESS_TYPE (x);
1358  else if (GET_CODE (x) == SYMBOL_REF || GET_CODE (x) == LABEL_REF)
1359    {
1360      *symbol_type = mips_classify_symbol (x);
1361      if (*symbol_type == SYMBOL_TLS)
1362	return false;
1363    }
1364  else
1365    return false;
1366
1367  if (offset == 0)
1368    return true;
1369
1370  /* Check whether a nonzero offset is valid for the underlying
1371     relocations.  */
1372  switch (*symbol_type)
1373    {
1374    case SYMBOL_GENERAL:
1375    case SYMBOL_64_HIGH:
1376    case SYMBOL_64_MID:
1377    case SYMBOL_64_LOW:
1378      /* If the target has 64-bit pointers and the object file only
1379	 supports 32-bit symbols, the values of those symbols will be
1380	 sign-extended.  In this case we can't allow an arbitrary offset
1381	 in case the 32-bit value X + OFFSET has a different sign from X.  */
1382      if (Pmode == DImode && !ABI_HAS_64BIT_SYMBOLS)
1383	return mips_offset_within_object_p (x, offset);
1384
1385      /* In other cases the relocations can handle any offset.  */
1386      return true;
1387
1388    case SYMBOL_CONSTANT_POOL:
1389      /* Allow constant pool references to be converted to LABEL+CONSTANT.
1390	 In this case, we no longer have access to the underlying constant,
1391	 but the original symbol-based access was known to be valid.  */
1392      if (GET_CODE (x) == LABEL_REF)
1393	return true;
1394
1395      /* Fall through.  */
1396
1397    case SYMBOL_SMALL_DATA:
1398      /* Make sure that the offset refers to something within the
1399	 underlying object.  This should guarantee that the final
1400	 PC- or GP-relative offset is within the 16-bit limit.  */
1401      return mips_offset_within_object_p (x, offset);
1402
1403    case SYMBOL_GOT_LOCAL:
1404    case SYMBOL_GOTOFF_PAGE:
1405      /* The linker should provide enough local GOT entries for a
1406	 16-bit offset.  Larger offsets may lead to GOT overflow.  */
1407      return SMALL_OPERAND (offset);
1408
1409    case SYMBOL_GOT_GLOBAL:
1410    case SYMBOL_GOTOFF_GLOBAL:
1411    case SYMBOL_GOTOFF_CALL:
1412    case SYMBOL_GOTOFF_LOADGP:
1413    case SYMBOL_TLSGD:
1414    case SYMBOL_TLSLDM:
1415    case SYMBOL_DTPREL:
1416    case SYMBOL_TPREL:
1417    case SYMBOL_GOTTPREL:
1418    case SYMBOL_TLS:
1419      return false;
1420    }
1421  gcc_unreachable ();
1422}
1423
1424
1425/* This function is used to implement REG_MODE_OK_FOR_BASE_P.  */
1426
1427int
1428mips_regno_mode_ok_for_base_p (int regno, enum machine_mode mode, int strict)
1429{
1430  if (regno >= FIRST_PSEUDO_REGISTER)
1431    {
1432      if (!strict)
1433	return true;
1434      regno = reg_renumber[regno];
1435    }
1436
1437  /* These fake registers will be eliminated to either the stack or
1438     hard frame pointer, both of which are usually valid base registers.
1439     Reload deals with the cases where the eliminated form isn't valid.  */
1440  if (regno == ARG_POINTER_REGNUM || regno == FRAME_POINTER_REGNUM)
1441    return true;
1442
1443  /* In mips16 mode, the stack pointer can only address word and doubleword
1444     values, nothing smaller.  There are two problems here:
1445
1446       (a) Instantiating virtual registers can introduce new uses of the
1447	   stack pointer.  If these virtual registers are valid addresses,
1448	   the stack pointer should be too.
1449
1450       (b) Most uses of the stack pointer are not made explicit until
1451	   FRAME_POINTER_REGNUM and ARG_POINTER_REGNUM have been eliminated.
1452	   We don't know until that stage whether we'll be eliminating to the
1453	   stack pointer (which needs the restriction) or the hard frame
1454	   pointer (which doesn't).
1455
1456     All in all, it seems more consistent to only enforce this restriction
1457     during and after reload.  */
1458  if (TARGET_MIPS16 && regno == STACK_POINTER_REGNUM)
1459    return !strict || GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1460
1461  return TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
1462}
1463
1464
1465/* Return true if X is a valid base register for the given mode.
1466   Allow only hard registers if STRICT.  */
1467
1468static bool
1469mips_valid_base_register_p (rtx x, enum machine_mode mode, int strict)
1470{
1471  if (!strict && GET_CODE (x) == SUBREG)
1472    x = SUBREG_REG (x);
1473
1474  return (REG_P (x)
1475	  && mips_regno_mode_ok_for_base_p (REGNO (x), mode, strict));
1476}
1477
1478
1479/* Return true if symbols of type SYMBOL_TYPE can directly address a value
1480   with mode MODE.  This is used for both symbolic and LO_SUM addresses.  */
1481
1482static bool
1483mips_symbolic_address_p (enum mips_symbol_type symbol_type,
1484			 enum machine_mode mode)
1485{
1486  switch (symbol_type)
1487    {
1488    case SYMBOL_GENERAL:
1489      return !TARGET_MIPS16;
1490
1491    case SYMBOL_SMALL_DATA:
1492      return true;
1493
1494    case SYMBOL_CONSTANT_POOL:
1495      /* PC-relative addressing is only available for lw and ld.  */
1496      return GET_MODE_SIZE (mode) == 4 || GET_MODE_SIZE (mode) == 8;
1497
1498    case SYMBOL_GOT_LOCAL:
1499      return true;
1500
1501    case SYMBOL_GOT_GLOBAL:
1502      /* The address will have to be loaded from the GOT first.  */
1503      return false;
1504
1505    case SYMBOL_GOTOFF_PAGE:
1506    case SYMBOL_GOTOFF_GLOBAL:
1507    case SYMBOL_GOTOFF_CALL:
1508    case SYMBOL_GOTOFF_LOADGP:
1509    case SYMBOL_TLS:
1510    case SYMBOL_TLSGD:
1511    case SYMBOL_TLSLDM:
1512    case SYMBOL_DTPREL:
1513    case SYMBOL_GOTTPREL:
1514    case SYMBOL_TPREL:
1515    case SYMBOL_64_HIGH:
1516    case SYMBOL_64_MID:
1517    case SYMBOL_64_LOW:
1518      return true;
1519    }
1520  gcc_unreachable ();
1521}
1522
1523
1524/* Return true if X is a valid address for machine mode MODE.  If it is,
1525   fill in INFO appropriately.  STRICT is true if we should only accept
1526   hard base registers.  */
1527
1528static bool
1529mips_classify_address (struct mips_address_info *info, rtx x,
1530		       enum machine_mode mode, int strict)
1531{
1532  switch (GET_CODE (x))
1533    {
1534    case REG:
1535    case SUBREG:
1536      info->type = ADDRESS_REG;
1537      info->reg = x;
1538      info->offset = const0_rtx;
1539      return mips_valid_base_register_p (info->reg, mode, strict);
1540
1541    case PLUS:
1542      info->type = ADDRESS_REG;
1543      info->reg = XEXP (x, 0);
1544      info->offset = XEXP (x, 1);
1545      return (mips_valid_base_register_p (info->reg, mode, strict)
1546	      && const_arith_operand (info->offset, VOIDmode));
1547
1548    case LO_SUM:
1549      info->type = ADDRESS_LO_SUM;
1550      info->reg = XEXP (x, 0);
1551      info->offset = XEXP (x, 1);
1552      /* We have to trust the creator of the LO_SUM to do something vaguely
1553	 sane.  Target-independent code that creates a LO_SUM should also
1554	 create and verify the matching HIGH.  Target-independent code that
1555	 adds an offset to a LO_SUM must prove that the offset will not
1556	 induce a carry.  Failure to do either of these things would be
1557	 a bug, and we are not required to check for it here.  The MIPS
1558	 backend itself should only create LO_SUMs for valid symbolic
1559	 constants, with the high part being either a HIGH or a copy
1560	 of _gp. */
1561      info->symbol_type = mips_classify_symbolic_expression (info->offset);
1562      return (mips_valid_base_register_p (info->reg, mode, strict)
1563	      && mips_symbolic_address_p (info->symbol_type, mode)
1564	      && mips_lo_relocs[info->symbol_type] != 0);
1565
1566    case CONST_INT:
1567      /* Small-integer addresses don't occur very often, but they
1568	 are legitimate if $0 is a valid base register.  */
1569      info->type = ADDRESS_CONST_INT;
1570      return !TARGET_MIPS16 && SMALL_INT (x);
1571
1572    case CONST:
1573    case LABEL_REF:
1574    case SYMBOL_REF:
1575      info->type = ADDRESS_SYMBOLIC;
1576      return (mips_symbolic_constant_p (x, &info->symbol_type)
1577	      && mips_symbolic_address_p (info->symbol_type, mode)
1578	      && !mips_split_p[info->symbol_type]);
1579
1580    default:
1581      return false;
1582    }
1583}
1584
1585/* Return true if X is a thread-local symbol.  */
1586
1587static bool
1588mips_tls_operand_p (rtx x)
1589{
1590  return GET_CODE (x) == SYMBOL_REF && SYMBOL_REF_TLS_MODEL (x) != 0;
1591}
1592
1593/* Return true if X can not be forced into a constant pool.  */
1594
1595static int
1596mips_tls_symbol_ref_1 (rtx *x, void *data ATTRIBUTE_UNUSED)
1597{
1598  return mips_tls_operand_p (*x);
1599}
1600
1601/* Return true if X can not be forced into a constant pool.  */
1602
1603static bool
1604mips_cannot_force_const_mem (rtx x)
1605{
1606  rtx base;
1607  HOST_WIDE_INT offset;
1608
1609  if (!TARGET_MIPS16)
1610    {
1611      /* As an optimization, reject constants that mips_legitimize_move
1612	 can expand inline.
1613
1614	 Suppose we have a multi-instruction sequence that loads constant C
1615	 into register R.  If R does not get allocated a hard register, and
1616	 R is used in an operand that allows both registers and memory
1617	 references, reload will consider forcing C into memory and using
1618	 one of the instruction's memory alternatives.  Returning false
1619	 here will force it to use an input reload instead.  */
1620      if (GET_CODE (x) == CONST_INT)
1621	return true;
1622
1623      mips_split_const (x, &base, &offset);
1624      if (symbolic_operand (base, VOIDmode) && SMALL_OPERAND (offset))
1625	return true;
1626    }
1627
1628  if (TARGET_HAVE_TLS && for_each_rtx (&x, &mips_tls_symbol_ref_1, 0))
1629    return true;
1630
1631  return false;
1632}
1633
1634/* Implement TARGET_USE_BLOCKS_FOR_CONSTANT_P.  MIPS16 uses per-function
1635   constant pools, but normal-mode code doesn't need to.  */
1636
1637static bool
1638mips_use_blocks_for_constant_p (enum machine_mode mode ATTRIBUTE_UNUSED,
1639				rtx x ATTRIBUTE_UNUSED)
1640{
1641  return !TARGET_MIPS16;
1642}
1643
1644/* Return the number of instructions needed to load a symbol of the
1645   given type into a register.  If valid in an address, the same number
1646   of instructions are needed for loads and stores.  Treat extended
1647   mips16 instructions as two instructions.  */
1648
1649static int
1650mips_symbol_insns (enum mips_symbol_type type)
1651{
1652  switch (type)
1653    {
1654    case SYMBOL_GENERAL:
1655      /* In mips16 code, general symbols must be fetched from the
1656	 constant pool.  */
1657      if (TARGET_MIPS16)
1658	return 0;
1659
1660      /* When using 64-bit symbols, we need 5 preparatory instructions,
1661	 such as:
1662
1663	     lui     $at,%highest(symbol)
1664	     daddiu  $at,$at,%higher(symbol)
1665	     dsll    $at,$at,16
1666	     daddiu  $at,$at,%hi(symbol)
1667	     dsll    $at,$at,16
1668
1669	 The final address is then $at + %lo(symbol).  With 32-bit
1670	 symbols we just need a preparatory lui.  */
1671      return (ABI_HAS_64BIT_SYMBOLS ? 6 : 2);
1672
1673    case SYMBOL_SMALL_DATA:
1674      return 1;
1675
1676    case SYMBOL_CONSTANT_POOL:
1677      /* This case is for mips16 only.  Assume we'll need an
1678	 extended instruction.  */
1679      return 2;
1680
1681    case SYMBOL_GOT_LOCAL:
1682    case SYMBOL_GOT_GLOBAL:
1683      /* Unless -funit-at-a-time is in effect, we can't be sure whether
1684	 the local/global classification is accurate.  See override_options
1685	 for details.
1686
1687	 The worst cases are:
1688
1689	 (1) For local symbols when generating o32 or o64 code.  The assembler
1690	     will use:
1691
1692		 lw	      $at,%got(symbol)
1693		 nop
1694
1695	     ...and the final address will be $at + %lo(symbol).
1696
1697	 (2) For global symbols when -mxgot.  The assembler will use:
1698
1699	         lui     $at,%got_hi(symbol)
1700	         (d)addu $at,$at,$gp
1701
1702	     ...and the final address will be $at + %got_lo(symbol).  */
1703      return 3;
1704
1705    case SYMBOL_GOTOFF_PAGE:
1706    case SYMBOL_GOTOFF_GLOBAL:
1707    case SYMBOL_GOTOFF_CALL:
1708    case SYMBOL_GOTOFF_LOADGP:
1709    case SYMBOL_64_HIGH:
1710    case SYMBOL_64_MID:
1711    case SYMBOL_64_LOW:
1712    case SYMBOL_TLSGD:
1713    case SYMBOL_TLSLDM:
1714    case SYMBOL_DTPREL:
1715    case SYMBOL_GOTTPREL:
1716    case SYMBOL_TPREL:
1717      /* Check whether the offset is a 16- or 32-bit value.  */
1718      return mips_split_p[type] ? 2 : 1;
1719
1720    case SYMBOL_TLS:
1721      /* We don't treat a bare TLS symbol as a constant.  */
1722      return 0;
1723    }
1724  gcc_unreachable ();
1725}
1726
1727/* Return true if X is a legitimate $sp-based address for mode MDOE.  */
1728
1729bool
1730mips_stack_address_p (rtx x, enum machine_mode mode)
1731{
1732  struct mips_address_info addr;
1733
1734  return (mips_classify_address (&addr, x, mode, false)
1735	  && addr.type == ADDRESS_REG
1736	  && addr.reg == stack_pointer_rtx);
1737}
1738
1739/* Return true if a value at OFFSET bytes from BASE can be accessed
1740   using an unextended mips16 instruction.  MODE is the mode of the
1741   value.
1742
1743   Usually the offset in an unextended instruction is a 5-bit field.
1744   The offset is unsigned and shifted left once for HIs, twice
1745   for SIs, and so on.  An exception is SImode accesses off the
1746   stack pointer, which have an 8-bit immediate field.  */
1747
1748static bool
1749mips16_unextended_reference_p (enum machine_mode mode, rtx base, rtx offset)
1750{
1751  if (TARGET_MIPS16
1752      && GET_CODE (offset) == CONST_INT
1753      && INTVAL (offset) >= 0
1754      && (INTVAL (offset) & (GET_MODE_SIZE (mode) - 1)) == 0)
1755    {
1756      if (GET_MODE_SIZE (mode) == 4 && base == stack_pointer_rtx)
1757	return INTVAL (offset) < 256 * GET_MODE_SIZE (mode);
1758      return INTVAL (offset) < 32 * GET_MODE_SIZE (mode);
1759    }
1760  return false;
1761}
1762
1763
1764/* Return the number of instructions needed to load or store a value
1765   of mode MODE at X.  Return 0 if X isn't valid for MODE.
1766
1767   For mips16 code, count extended instructions as two instructions.  */
1768
1769int
1770mips_address_insns (rtx x, enum machine_mode mode)
1771{
1772  struct mips_address_info addr;
1773  int factor;
1774
1775  if (mode == BLKmode)
1776    /* BLKmode is used for single unaligned loads and stores.  */
1777    factor = 1;
1778  else
1779    /* Each word of a multi-word value will be accessed individually.  */
1780    factor = (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
1781
1782  if (mips_classify_address (&addr, x, mode, false))
1783    switch (addr.type)
1784      {
1785      case ADDRESS_REG:
1786	if (TARGET_MIPS16
1787	    && !mips16_unextended_reference_p (mode, addr.reg, addr.offset))
1788	  return factor * 2;
1789	return factor;
1790
1791      case ADDRESS_LO_SUM:
1792	return (TARGET_MIPS16 ? factor * 2 : factor);
1793
1794      case ADDRESS_CONST_INT:
1795	return factor;
1796
1797      case ADDRESS_SYMBOLIC:
1798	return factor * mips_symbol_insns (addr.symbol_type);
1799      }
1800  return 0;
1801}
1802
1803
1804/* Likewise for constant X.  */
1805
1806int
1807mips_const_insns (rtx x)
1808{
1809  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
1810  enum mips_symbol_type symbol_type;
1811  HOST_WIDE_INT offset;
1812
1813  switch (GET_CODE (x))
1814    {
1815    case HIGH:
1816      if (TARGET_MIPS16
1817	  || !mips_symbolic_constant_p (XEXP (x, 0), &symbol_type)
1818	  || !mips_split_p[symbol_type])
1819	return 0;
1820
1821      return 1;
1822
1823    case CONST_INT:
1824      if (TARGET_MIPS16)
1825	/* Unsigned 8-bit constants can be loaded using an unextended
1826	   LI instruction.  Unsigned 16-bit constants can be loaded
1827	   using an extended LI.  Negative constants must be loaded
1828	   using LI and then negated.  */
1829	return (INTVAL (x) >= 0 && INTVAL (x) < 256 ? 1
1830		: SMALL_OPERAND_UNSIGNED (INTVAL (x)) ? 2
1831		: INTVAL (x) > -256 && INTVAL (x) < 0 ? 2
1832		: SMALL_OPERAND_UNSIGNED (-INTVAL (x)) ? 3
1833		: 0);
1834
1835      return mips_build_integer (codes, INTVAL (x));
1836
1837    case CONST_DOUBLE:
1838    case CONST_VECTOR:
1839      return (!TARGET_MIPS16 && x == CONST0_RTX (GET_MODE (x)) ? 1 : 0);
1840
1841    case CONST:
1842      if (CONST_GP_P (x))
1843	return 1;
1844
1845      /* See if we can refer to X directly.  */
1846      if (mips_symbolic_constant_p (x, &symbol_type))
1847	return mips_symbol_insns (symbol_type);
1848
1849      /* Otherwise try splitting the constant into a base and offset.
1850	 16-bit offsets can be added using an extra addiu.  Larger offsets
1851	 must be calculated separately and then added to the base.  */
1852      mips_split_const (x, &x, &offset);
1853      if (offset != 0)
1854	{
1855	  int n = mips_const_insns (x);
1856	  if (n != 0)
1857	    {
1858	      if (SMALL_OPERAND (offset))
1859		return n + 1;
1860	      else
1861		return n + 1 + mips_build_integer (codes, offset);
1862	    }
1863	}
1864      return 0;
1865
1866    case SYMBOL_REF:
1867    case LABEL_REF:
1868      return mips_symbol_insns (mips_classify_symbol (x));
1869
1870    default:
1871      return 0;
1872    }
1873}
1874
1875
1876/* Return the number of instructions needed for memory reference X.
1877   Count extended mips16 instructions as two instructions.  */
1878
1879int
1880mips_fetch_insns (rtx x)
1881{
1882  gcc_assert (MEM_P (x));
1883  return mips_address_insns (XEXP (x, 0), GET_MODE (x));
1884}
1885
1886
1887/* Return the number of instructions needed for an integer division.  */
1888
1889int
1890mips_idiv_insns (void)
1891{
1892  int count;
1893
1894  count = 1;
1895  if (TARGET_CHECK_ZERO_DIV)
1896    {
1897      if (GENERATE_DIVIDE_TRAPS)
1898        count++;
1899      else
1900        count += 2;
1901    }
1902
1903  if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
1904    count++;
1905  return count;
1906}
1907
1908/* This function is used to implement GO_IF_LEGITIMATE_ADDRESS.  It
1909   returns a nonzero value if X is a legitimate address for a memory
1910   operand of the indicated MODE.  STRICT is nonzero if this function
1911   is called during reload.  */
1912
1913bool
1914mips_legitimate_address_p (enum machine_mode mode, rtx x, int strict)
1915{
1916  struct mips_address_info addr;
1917
1918  return mips_classify_address (&addr, x, mode, strict);
1919}
1920
1921
1922/* Copy VALUE to a register and return that register.  If new psuedos
1923   are allowed, copy it into a new register, otherwise use DEST.  */
1924
1925static rtx
1926mips_force_temporary (rtx dest, rtx value)
1927{
1928  if (!no_new_pseudos)
1929    return force_reg (Pmode, value);
1930  else
1931    {
1932      emit_move_insn (copy_rtx (dest), value);
1933      return dest;
1934    }
1935}
1936
1937
1938/* Return a LO_SUM expression for ADDR.  TEMP is as for mips_force_temporary
1939   and is used to load the high part into a register.  */
1940
1941rtx
1942mips_split_symbol (rtx temp, rtx addr)
1943{
1944  rtx high;
1945
1946  if (TARGET_MIPS16)
1947    high = mips16_gp_pseudo_reg ();
1948  else
1949    high = mips_force_temporary (temp, gen_rtx_HIGH (Pmode, copy_rtx (addr)));
1950  return gen_rtx_LO_SUM (Pmode, high, addr);
1951}
1952
1953
1954/* Return an UNSPEC address with underlying address ADDRESS and symbol
1955   type SYMBOL_TYPE.  */
1956
1957rtx
1958mips_unspec_address (rtx address, enum mips_symbol_type symbol_type)
1959{
1960  rtx base;
1961  HOST_WIDE_INT offset;
1962
1963  mips_split_const (address, &base, &offset);
1964  base = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, base),
1965			 UNSPEC_ADDRESS_FIRST + symbol_type);
1966  return plus_constant (gen_rtx_CONST (Pmode, base), offset);
1967}
1968
1969
1970/* If mips_unspec_address (ADDR, SYMBOL_TYPE) is a 32-bit value, add the
1971   high part to BASE and return the result.  Just return BASE otherwise.
1972   TEMP is available as a temporary register if needed.
1973
1974   The returned expression can be used as the first operand to a LO_SUM.  */
1975
1976static rtx
1977mips_unspec_offset_high (rtx temp, rtx base, rtx addr,
1978			 enum mips_symbol_type symbol_type)
1979{
1980  if (mips_split_p[symbol_type])
1981    {
1982      addr = gen_rtx_HIGH (Pmode, mips_unspec_address (addr, symbol_type));
1983      addr = mips_force_temporary (temp, addr);
1984      return mips_force_temporary (temp, gen_rtx_PLUS (Pmode, addr, base));
1985    }
1986  return base;
1987}
1988
1989
1990/* Return a legitimate address for REG + OFFSET.  TEMP is as for
1991   mips_force_temporary; it is only needed when OFFSET is not a
1992   SMALL_OPERAND.  */
1993
1994static rtx
1995mips_add_offset (rtx temp, rtx reg, HOST_WIDE_INT offset)
1996{
1997  if (!SMALL_OPERAND (offset))
1998    {
1999      rtx high;
2000      if (TARGET_MIPS16)
2001	{
2002	  /* Load the full offset into a register so that we can use
2003	     an unextended instruction for the address itself.  */
2004	  high = GEN_INT (offset);
2005	  offset = 0;
2006	}
2007      else
2008	{
2009	  /* Leave OFFSET as a 16-bit offset and put the excess in HIGH.  */
2010	  high = GEN_INT (CONST_HIGH_PART (offset));
2011	  offset = CONST_LOW_PART (offset);
2012	}
2013      high = mips_force_temporary (temp, high);
2014      reg = mips_force_temporary (temp, gen_rtx_PLUS (Pmode, high, reg));
2015    }
2016  return plus_constant (reg, offset);
2017}
2018
2019/* Emit a call to __tls_get_addr.  SYM is the TLS symbol we are
2020   referencing, and TYPE is the symbol type to use (either global
2021   dynamic or local dynamic).  V0 is an RTX for the return value
2022   location.  The entire insn sequence is returned.  */
2023
2024static GTY(()) rtx mips_tls_symbol;
2025
2026static rtx
2027mips_call_tls_get_addr (rtx sym, enum mips_symbol_type type, rtx v0)
2028{
2029  rtx insn, loc, tga, a0;
2030
2031  a0 = gen_rtx_REG (Pmode, GP_ARG_FIRST);
2032
2033  if (!mips_tls_symbol)
2034    mips_tls_symbol = init_one_libfunc ("__tls_get_addr");
2035
2036  loc = mips_unspec_address (sym, type);
2037
2038  start_sequence ();
2039
2040  emit_insn (gen_rtx_SET (Pmode, a0,
2041			  gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, loc)));
2042  tga = gen_rtx_MEM (Pmode, mips_tls_symbol);
2043  insn = emit_call_insn (gen_call_value (v0, tga, const0_rtx, const0_rtx));
2044  CONST_OR_PURE_CALL_P (insn) = 1;
2045  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), v0);
2046  use_reg (&CALL_INSN_FUNCTION_USAGE (insn), a0);
2047  insn = get_insns ();
2048
2049  end_sequence ();
2050
2051  return insn;
2052}
2053
2054/* Generate the code to access LOC, a thread local SYMBOL_REF.  The
2055   return value will be a valid address and move_operand (either a REG
2056   or a LO_SUM).  */
2057
2058static rtx
2059mips_legitimize_tls_address (rtx loc)
2060{
2061  rtx dest, insn, v0, v1, tmp1, tmp2, eqv;
2062  enum tls_model model;
2063
2064  v0 = gen_rtx_REG (Pmode, GP_RETURN);
2065  v1 = gen_rtx_REG (Pmode, GP_RETURN + 1);
2066
2067  model = SYMBOL_REF_TLS_MODEL (loc);
2068  /* Only TARGET_ABICALLS code can have more than one module; other
2069     code must be be static and should not use a GOT.  All TLS models
2070     reduce to local exec in this situation.  */
2071  if (!TARGET_ABICALLS)
2072    model = TLS_MODEL_LOCAL_EXEC;
2073
2074  switch (model)
2075    {
2076    case TLS_MODEL_GLOBAL_DYNAMIC:
2077      insn = mips_call_tls_get_addr (loc, SYMBOL_TLSGD, v0);
2078      dest = gen_reg_rtx (Pmode);
2079      emit_libcall_block (insn, dest, v0, loc);
2080      break;
2081
2082    case TLS_MODEL_LOCAL_DYNAMIC:
2083      insn = mips_call_tls_get_addr (loc, SYMBOL_TLSLDM, v0);
2084      tmp1 = gen_reg_rtx (Pmode);
2085
2086      /* Attach a unique REG_EQUIV, to allow the RTL optimizers to
2087	 share the LDM result with other LD model accesses.  */
2088      eqv = gen_rtx_UNSPEC (Pmode, gen_rtvec (1, const0_rtx),
2089			    UNSPEC_TLS_LDM);
2090      emit_libcall_block (insn, tmp1, v0, eqv);
2091
2092      tmp2 = mips_unspec_offset_high (NULL, tmp1, loc, SYMBOL_DTPREL);
2093      dest = gen_rtx_LO_SUM (Pmode, tmp2,
2094			     mips_unspec_address (loc, SYMBOL_DTPREL));
2095      break;
2096
2097    case TLS_MODEL_INITIAL_EXEC:
2098      tmp1 = gen_reg_rtx (Pmode);
2099      tmp2 = mips_unspec_address (loc, SYMBOL_GOTTPREL);
2100      if (Pmode == DImode)
2101	{
2102	  emit_insn (gen_tls_get_tp_di (v1));
2103	  emit_insn (gen_load_gotdi (tmp1, pic_offset_table_rtx, tmp2));
2104	}
2105      else
2106	{
2107	  emit_insn (gen_tls_get_tp_si (v1));
2108	  emit_insn (gen_load_gotsi (tmp1, pic_offset_table_rtx, tmp2));
2109	}
2110      dest = gen_reg_rtx (Pmode);
2111      emit_insn (gen_add3_insn (dest, tmp1, v1));
2112      break;
2113
2114    case TLS_MODEL_LOCAL_EXEC:
2115      if (Pmode == DImode)
2116	emit_insn (gen_tls_get_tp_di (v1));
2117      else
2118	emit_insn (gen_tls_get_tp_si (v1));
2119
2120      tmp1 = mips_unspec_offset_high (NULL, v1, loc, SYMBOL_TPREL);
2121      dest = gen_rtx_LO_SUM (Pmode, tmp1,
2122			     mips_unspec_address (loc, SYMBOL_TPREL));
2123      break;
2124
2125    default:
2126      gcc_unreachable ();
2127    }
2128
2129  return dest;
2130}
2131
2132/* This function is used to implement LEGITIMIZE_ADDRESS.  If *XLOC can
2133   be legitimized in a way that the generic machinery might not expect,
2134   put the new address in *XLOC and return true.  MODE is the mode of
2135   the memory being accessed.  */
2136
2137bool
2138mips_legitimize_address (rtx *xloc, enum machine_mode mode)
2139{
2140  enum mips_symbol_type symbol_type;
2141
2142  if (mips_tls_operand_p (*xloc))
2143    {
2144      *xloc = mips_legitimize_tls_address (*xloc);
2145      return true;
2146    }
2147
2148  /* See if the address can split into a high part and a LO_SUM.  */
2149  if (mips_symbolic_constant_p (*xloc, &symbol_type)
2150      && mips_symbolic_address_p (symbol_type, mode)
2151      && mips_split_p[symbol_type])
2152    {
2153      *xloc = mips_split_symbol (0, *xloc);
2154      return true;
2155    }
2156
2157  if (GET_CODE (*xloc) == PLUS && GET_CODE (XEXP (*xloc, 1)) == CONST_INT)
2158    {
2159      /* Handle REG + CONSTANT using mips_add_offset.  */
2160      rtx reg;
2161
2162      reg = XEXP (*xloc, 0);
2163      if (!mips_valid_base_register_p (reg, mode, 0))
2164	reg = copy_to_mode_reg (Pmode, reg);
2165      *xloc = mips_add_offset (0, reg, INTVAL (XEXP (*xloc, 1)));
2166      return true;
2167    }
2168
2169  return false;
2170}
2171
2172
2173/* Subroutine of mips_build_integer (with the same interface).
2174   Assume that the final action in the sequence should be a left shift.  */
2175
2176static unsigned int
2177mips_build_shift (struct mips_integer_op *codes, HOST_WIDE_INT value)
2178{
2179  unsigned int i, shift;
2180
2181  /* Shift VALUE right until its lowest bit is set.  Shift arithmetically
2182     since signed numbers are easier to load than unsigned ones.  */
2183  shift = 0;
2184  while ((value & 1) == 0)
2185    value /= 2, shift++;
2186
2187  i = mips_build_integer (codes, value);
2188  codes[i].code = ASHIFT;
2189  codes[i].value = shift;
2190  return i + 1;
2191}
2192
2193
2194/* As for mips_build_shift, but assume that the final action will be
2195   an IOR or PLUS operation.  */
2196
2197static unsigned int
2198mips_build_lower (struct mips_integer_op *codes, unsigned HOST_WIDE_INT value)
2199{
2200  unsigned HOST_WIDE_INT high;
2201  unsigned int i;
2202
2203  high = value & ~(unsigned HOST_WIDE_INT) 0xffff;
2204  if (!LUI_OPERAND (high) && (value & 0x18000) == 0x18000)
2205    {
2206      /* The constant is too complex to load with a simple lui/ori pair
2207	 so our goal is to clear as many trailing zeros as possible.
2208	 In this case, we know bit 16 is set and that the low 16 bits
2209	 form a negative number.  If we subtract that number from VALUE,
2210	 we will clear at least the lowest 17 bits, maybe more.  */
2211      i = mips_build_integer (codes, CONST_HIGH_PART (value));
2212      codes[i].code = PLUS;
2213      codes[i].value = CONST_LOW_PART (value);
2214    }
2215  else
2216    {
2217      i = mips_build_integer (codes, high);
2218      codes[i].code = IOR;
2219      codes[i].value = value & 0xffff;
2220    }
2221  return i + 1;
2222}
2223
2224
2225/* Fill CODES with a sequence of rtl operations to load VALUE.
2226   Return the number of operations needed.  */
2227
2228static unsigned int
2229mips_build_integer (struct mips_integer_op *codes,
2230		    unsigned HOST_WIDE_INT value)
2231{
2232  if (SMALL_OPERAND (value)
2233      || SMALL_OPERAND_UNSIGNED (value)
2234      || LUI_OPERAND (value))
2235    {
2236      /* The value can be loaded with a single instruction.  */
2237      codes[0].code = UNKNOWN;
2238      codes[0].value = value;
2239      return 1;
2240    }
2241  else if ((value & 1) != 0 || LUI_OPERAND (CONST_HIGH_PART (value)))
2242    {
2243      /* Either the constant is a simple LUI/ORI combination or its
2244	 lowest bit is set.  We don't want to shift in this case.  */
2245      return mips_build_lower (codes, value);
2246    }
2247  else if ((value & 0xffff) == 0)
2248    {
2249      /* The constant will need at least three actions.  The lowest
2250	 16 bits are clear, so the final action will be a shift.  */
2251      return mips_build_shift (codes, value);
2252    }
2253  else
2254    {
2255      /* The final action could be a shift, add or inclusive OR.
2256	 Rather than use a complex condition to select the best
2257	 approach, try both mips_build_shift and mips_build_lower
2258	 and pick the one that gives the shortest sequence.
2259	 Note that this case is only used once per constant.  */
2260      struct mips_integer_op alt_codes[MIPS_MAX_INTEGER_OPS];
2261      unsigned int cost, alt_cost;
2262
2263      cost = mips_build_shift (codes, value);
2264      alt_cost = mips_build_lower (alt_codes, value);
2265      if (alt_cost < cost)
2266	{
2267	  memcpy (codes, alt_codes, alt_cost * sizeof (codes[0]));
2268	  cost = alt_cost;
2269	}
2270      return cost;
2271    }
2272}
2273
2274
2275/* Load VALUE into DEST, using TEMP as a temporary register if need be.  */
2276
2277void
2278mips_move_integer (rtx dest, rtx temp, unsigned HOST_WIDE_INT value)
2279{
2280  struct mips_integer_op codes[MIPS_MAX_INTEGER_OPS];
2281  enum machine_mode mode;
2282  unsigned int i, cost;
2283  rtx x;
2284
2285  mode = GET_MODE (dest);
2286  cost = mips_build_integer (codes, value);
2287
2288  /* Apply each binary operation to X.  Invariant: X is a legitimate
2289     source operand for a SET pattern.  */
2290  x = GEN_INT (codes[0].value);
2291  for (i = 1; i < cost; i++)
2292    {
2293      if (no_new_pseudos)
2294	{
2295	  emit_insn (gen_rtx_SET (VOIDmode, temp, x));
2296	  x = temp;
2297	}
2298      else
2299	x = force_reg (mode, x);
2300      x = gen_rtx_fmt_ee (codes[i].code, mode, x, GEN_INT (codes[i].value));
2301    }
2302
2303  emit_insn (gen_rtx_SET (VOIDmode, dest, x));
2304}
2305
2306
2307/* Subroutine of mips_legitimize_move.  Move constant SRC into register
2308   DEST given that SRC satisfies immediate_operand but doesn't satisfy
2309   move_operand.  */
2310
2311static void
2312mips_legitimize_const_move (enum machine_mode mode, rtx dest, rtx src)
2313{
2314  rtx base;
2315  HOST_WIDE_INT offset;
2316
2317  /* Split moves of big integers into smaller pieces.  */
2318  if (splittable_const_int_operand (src, mode))
2319    {
2320      mips_move_integer (dest, dest, INTVAL (src));
2321      return;
2322    }
2323
2324  /* Split moves of symbolic constants into high/low pairs.  */
2325  if (splittable_symbolic_operand (src, mode))
2326    {
2327      emit_insn (gen_rtx_SET (VOIDmode, dest, mips_split_symbol (dest, src)));
2328      return;
2329    }
2330
2331  if (mips_tls_operand_p (src))
2332    {
2333      emit_move_insn (dest, mips_legitimize_tls_address (src));
2334      return;
2335    }
2336
2337  /* If we have (const (plus symbol offset)), load the symbol first
2338     and then add in the offset.  This is usually better than forcing
2339     the constant into memory, at least in non-mips16 code.  */
2340  mips_split_const (src, &base, &offset);
2341  if (!TARGET_MIPS16
2342      && offset != 0
2343      && (!no_new_pseudos || SMALL_OPERAND (offset)))
2344    {
2345      base = mips_force_temporary (dest, base);
2346      emit_move_insn (dest, mips_add_offset (0, base, offset));
2347      return;
2348    }
2349
2350  src = force_const_mem (mode, src);
2351
2352  /* When using explicit relocs, constant pool references are sometimes
2353     not legitimate addresses.  */
2354  if (!memory_operand (src, VOIDmode))
2355    src = replace_equiv_address (src, mips_split_symbol (dest, XEXP (src, 0)));
2356  emit_move_insn (dest, src);
2357}
2358
2359
2360/* If (set DEST SRC) is not a valid instruction, emit an equivalent
2361   sequence that is valid.  */
2362
2363bool
2364mips_legitimize_move (enum machine_mode mode, rtx dest, rtx src)
2365{
2366  if (!register_operand (dest, mode) && !reg_or_0_operand (src, mode))
2367    {
2368      emit_move_insn (dest, force_reg (mode, src));
2369      return true;
2370    }
2371
2372  /* Check for individual, fully-reloaded mflo and mfhi instructions.  */
2373  if (GET_MODE_SIZE (mode) <= UNITS_PER_WORD
2374      && REG_P (src) && MD_REG_P (REGNO (src))
2375      && REG_P (dest) && GP_REG_P (REGNO (dest)))
2376    {
2377      int other_regno = REGNO (src) == HI_REGNUM ? LO_REGNUM : HI_REGNUM;
2378      if (GET_MODE_SIZE (mode) <= 4)
2379	emit_insn (gen_mfhilo_si (gen_rtx_REG (SImode, REGNO (dest)),
2380				  gen_rtx_REG (SImode, REGNO (src)),
2381				  gen_rtx_REG (SImode, other_regno)));
2382      else
2383	emit_insn (gen_mfhilo_di (gen_rtx_REG (DImode, REGNO (dest)),
2384				  gen_rtx_REG (DImode, REGNO (src)),
2385				  gen_rtx_REG (DImode, other_regno)));
2386      return true;
2387    }
2388
2389  /* We need to deal with constants that would be legitimate
2390     immediate_operands but not legitimate move_operands.  */
2391  if (CONSTANT_P (src) && !move_operand (src, mode))
2392    {
2393      mips_legitimize_const_move (mode, dest, src);
2394      set_unique_reg_note (get_last_insn (), REG_EQUAL, copy_rtx (src));
2395      return true;
2396    }
2397  return false;
2398}
2399
2400/* We need a lot of little routines to check constant values on the
2401   mips16.  These are used to figure out how long the instruction will
2402   be.  It would be much better to do this using constraints, but
2403   there aren't nearly enough letters available.  */
2404
2405static int
2406m16_check_op (rtx op, int low, int high, int mask)
2407{
2408  return (GET_CODE (op) == CONST_INT
2409	  && INTVAL (op) >= low
2410	  && INTVAL (op) <= high
2411	  && (INTVAL (op) & mask) == 0);
2412}
2413
2414int
2415m16_uimm3_b (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2416{
2417  return m16_check_op (op, 0x1, 0x8, 0);
2418}
2419
2420int
2421m16_simm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2422{
2423  return m16_check_op (op, - 0x8, 0x7, 0);
2424}
2425
2426int
2427m16_nsimm4_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2428{
2429  return m16_check_op (op, - 0x7, 0x8, 0);
2430}
2431
2432int
2433m16_simm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2434{
2435  return m16_check_op (op, - 0x10, 0xf, 0);
2436}
2437
2438int
2439m16_nsimm5_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2440{
2441  return m16_check_op (op, - 0xf, 0x10, 0);
2442}
2443
2444int
2445m16_uimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2446{
2447  return m16_check_op (op, (- 0x10) << 2, 0xf << 2, 3);
2448}
2449
2450int
2451m16_nuimm5_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2452{
2453  return m16_check_op (op, (- 0xf) << 2, 0x10 << 2, 3);
2454}
2455
2456int
2457m16_simm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2458{
2459  return m16_check_op (op, - 0x80, 0x7f, 0);
2460}
2461
2462int
2463m16_nsimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2464{
2465  return m16_check_op (op, - 0x7f, 0x80, 0);
2466}
2467
2468int
2469m16_uimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2470{
2471  return m16_check_op (op, 0x0, 0xff, 0);
2472}
2473
2474int
2475m16_nuimm8_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2476{
2477  return m16_check_op (op, - 0xff, 0x0, 0);
2478}
2479
2480int
2481m16_uimm8_m1_1 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2482{
2483  return m16_check_op (op, - 0x1, 0xfe, 0);
2484}
2485
2486int
2487m16_uimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2488{
2489  return m16_check_op (op, 0x0, 0xff << 2, 3);
2490}
2491
2492int
2493m16_nuimm8_4 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2494{
2495  return m16_check_op (op, (- 0xff) << 2, 0x0, 3);
2496}
2497
2498int
2499m16_simm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2500{
2501  return m16_check_op (op, (- 0x80) << 3, 0x7f << 3, 7);
2502}
2503
2504int
2505m16_nsimm8_8 (rtx op, enum machine_mode mode ATTRIBUTE_UNUSED)
2506{
2507  return m16_check_op (op, (- 0x7f) << 3, 0x80 << 3, 7);
2508}
2509
2510static bool
2511mips_rtx_costs (rtx x, int code, int outer_code, int *total)
2512{
2513  enum machine_mode mode = GET_MODE (x);
2514  bool float_mode_p = FLOAT_MODE_P (mode);
2515
2516  switch (code)
2517    {
2518    case CONST_INT:
2519      if (TARGET_MIPS16)
2520        {
2521	  /* A number between 1 and 8 inclusive is efficient for a shift.
2522	     Otherwise, we will need an extended instruction.  */
2523	  if ((outer_code) == ASHIFT || (outer_code) == ASHIFTRT
2524	      || (outer_code) == LSHIFTRT)
2525	    {
2526	      if (INTVAL (x) >= 1 && INTVAL (x) <= 8)
2527		*total = 0;
2528	      else
2529		*total = COSTS_N_INSNS (1);
2530	      return true;
2531	    }
2532
2533	  /* We can use cmpi for an xor with an unsigned 16 bit value.  */
2534	  if ((outer_code) == XOR
2535	      && INTVAL (x) >= 0 && INTVAL (x) < 0x10000)
2536	    {
2537	      *total = 0;
2538	      return true;
2539	    }
2540
2541	  /* We may be able to use slt or sltu for a comparison with a
2542	     signed 16 bit value.  (The boundary conditions aren't quite
2543	     right, but this is just a heuristic anyhow.)  */
2544	  if (((outer_code) == LT || (outer_code) == LE
2545	       || (outer_code) == GE || (outer_code) == GT
2546	       || (outer_code) == LTU || (outer_code) == LEU
2547	       || (outer_code) == GEU || (outer_code) == GTU)
2548	      && INTVAL (x) >= -0x8000 && INTVAL (x) < 0x8000)
2549	    {
2550	      *total = 0;
2551	      return true;
2552	    }
2553
2554	  /* Equality comparisons with 0 are cheap.  */
2555	  if (((outer_code) == EQ || (outer_code) == NE)
2556	      && INTVAL (x) == 0)
2557	    {
2558	      *total = 0;
2559	      return true;
2560	    }
2561
2562	  /* Constants in the range 0...255 can be loaded with an unextended
2563	     instruction.  They are therefore as cheap as a register move.
2564
2565	     Given the choice between "li R1,0...255" and "move R1,R2"
2566	     (where R2 is a known constant), it is usually better to use "li",
2567	     since we do not want to unnecessarily extend the lifetime
2568	     of R2.  */
2569	  if (outer_code == SET
2570	      && INTVAL (x) >= 0
2571	      && INTVAL (x) < 256)
2572	    {
2573	      *total = 0;
2574	      return true;
2575	    }
2576	}
2577      else
2578	{
2579	  /* These can be used anywhere. */
2580	  *total = 0;
2581	  return true;
2582	}
2583
2584      /* Otherwise fall through to the handling below because
2585	 we'll need to construct the constant.  */
2586
2587    case CONST:
2588    case SYMBOL_REF:
2589    case LABEL_REF:
2590    case CONST_DOUBLE:
2591      if (LEGITIMATE_CONSTANT_P (x))
2592	{
2593	  *total = COSTS_N_INSNS (1);
2594	  return true;
2595	}
2596      else
2597	{
2598	  /* The value will need to be fetched from the constant pool.  */
2599	  *total = CONSTANT_POOL_COST;
2600	  return true;
2601	}
2602
2603    case MEM:
2604      {
2605	/* If the address is legitimate, return the number of
2606	   instructions it needs, otherwise use the default handling.  */
2607	int n = mips_address_insns (XEXP (x, 0), GET_MODE (x));
2608	if (n > 0)
2609	  {
2610	    *total = COSTS_N_INSNS (n + 1);
2611	    return true;
2612	  }
2613	return false;
2614      }
2615
2616    case FFS:
2617      *total = COSTS_N_INSNS (6);
2618      return true;
2619
2620    case NOT:
2621      *total = COSTS_N_INSNS ((mode == DImode && !TARGET_64BIT) ? 2 : 1);
2622      return true;
2623
2624    case AND:
2625    case IOR:
2626    case XOR:
2627      if (mode == DImode && !TARGET_64BIT)
2628        {
2629          *total = COSTS_N_INSNS (2);
2630          return true;
2631        }
2632      return false;
2633
2634    case ASHIFT:
2635    case ASHIFTRT:
2636    case LSHIFTRT:
2637      if (mode == DImode && !TARGET_64BIT)
2638        {
2639          *total = COSTS_N_INSNS ((GET_CODE (XEXP (x, 1)) == CONST_INT)
2640                                  ? 4 : 12);
2641          return true;
2642        }
2643      return false;
2644
2645    case ABS:
2646      if (float_mode_p)
2647        *total = COSTS_N_INSNS (1);
2648      else
2649        *total = COSTS_N_INSNS (4);
2650      return true;
2651
2652    case LO_SUM:
2653      *total = COSTS_N_INSNS (1);
2654      return true;
2655
2656    case PLUS:
2657    case MINUS:
2658      if (float_mode_p)
2659	{
2660	  *total = mips_cost->fp_add;
2661	  return true;
2662	}
2663
2664      else if (mode == DImode && !TARGET_64BIT)
2665        {
2666          *total = COSTS_N_INSNS (4);
2667          return true;
2668        }
2669      return false;
2670
2671    case NEG:
2672      if (mode == DImode && !TARGET_64BIT)
2673        {
2674          *total = COSTS_N_INSNS (4);
2675          return true;
2676        }
2677      return false;
2678
2679    case MULT:
2680      if (mode == SFmode)
2681	*total = mips_cost->fp_mult_sf;
2682
2683      else if (mode == DFmode)
2684	*total = mips_cost->fp_mult_df;
2685
2686      else if (mode == SImode)
2687	*total = mips_cost->int_mult_si;
2688
2689      else
2690	*total = mips_cost->int_mult_di;
2691
2692      return true;
2693
2694    case DIV:
2695    case MOD:
2696      if (float_mode_p)
2697	{
2698	  if (mode == SFmode)
2699	    *total = mips_cost->fp_div_sf;
2700	  else
2701	    *total = mips_cost->fp_div_df;
2702
2703	  return true;
2704	}
2705      /* Fall through.  */
2706
2707    case UDIV:
2708    case UMOD:
2709      if (mode == DImode)
2710        *total = mips_cost->int_div_di;
2711      else
2712	*total = mips_cost->int_div_si;
2713
2714      return true;
2715
2716    case SIGN_EXTEND:
2717      /* A sign extend from SImode to DImode in 64 bit mode is often
2718         zero instructions, because the result can often be used
2719         directly by another instruction; we'll call it one.  */
2720      if (TARGET_64BIT && mode == DImode
2721          && GET_MODE (XEXP (x, 0)) == SImode)
2722        *total = COSTS_N_INSNS (1);
2723      else
2724        *total = COSTS_N_INSNS (2);
2725      return true;
2726
2727    case ZERO_EXTEND:
2728      if (TARGET_64BIT && mode == DImode
2729          && GET_MODE (XEXP (x, 0)) == SImode)
2730        *total = COSTS_N_INSNS (2);
2731      else
2732        *total = COSTS_N_INSNS (1);
2733      return true;
2734
2735    case FLOAT:
2736    case UNSIGNED_FLOAT:
2737    case FIX:
2738    case FLOAT_EXTEND:
2739    case FLOAT_TRUNCATE:
2740    case SQRT:
2741      *total = mips_cost->fp_add;
2742      return true;
2743
2744    default:
2745      return false;
2746    }
2747}
2748
2749/* Provide the costs of an addressing mode that contains ADDR.
2750   If ADDR is not a valid address, its cost is irrelevant.  */
2751
2752static int
2753mips_address_cost (rtx addr)
2754{
2755  return mips_address_insns (addr, SImode);
2756}
2757
2758/* Return one word of double-word value OP, taking into account the fixed
2759   endianness of certain registers.  HIGH_P is true to select the high part,
2760   false to select the low part.  */
2761
2762rtx
2763mips_subword (rtx op, int high_p)
2764{
2765  unsigned int byte;
2766  enum machine_mode mode;
2767
2768  mode = GET_MODE (op);
2769  if (mode == VOIDmode)
2770    mode = DImode;
2771
2772  if (TARGET_BIG_ENDIAN ? !high_p : high_p)
2773    byte = UNITS_PER_WORD;
2774  else
2775    byte = 0;
2776
2777  if (REG_P (op))
2778    {
2779      if (FP_REG_P (REGNO (op)))
2780	return gen_rtx_REG (word_mode, high_p ? REGNO (op) + 1 : REGNO (op));
2781      if (ACC_HI_REG_P (REGNO (op)))
2782	return gen_rtx_REG (word_mode, high_p ? REGNO (op) : REGNO (op) + 1);
2783    }
2784
2785  if (MEM_P (op))
2786    return mips_rewrite_small_data (adjust_address (op, word_mode, byte));
2787
2788  return simplify_gen_subreg (word_mode, op, mode, byte);
2789}
2790
2791
2792/* Return true if a 64-bit move from SRC to DEST should be split into two.  */
2793
2794bool
2795mips_split_64bit_move_p (rtx dest, rtx src)
2796{
2797  if (TARGET_64BIT)
2798    return false;
2799
2800  /* FP->FP moves can be done in a single instruction.  */
2801  if (FP_REG_RTX_P (src) && FP_REG_RTX_P (dest))
2802    return false;
2803
2804  /* Check for floating-point loads and stores.  They can be done using
2805     ldc1 and sdc1 on MIPS II and above.  */
2806  if (mips_isa > 1)
2807    {
2808      if (FP_REG_RTX_P (dest) && MEM_P (src))
2809	return false;
2810      if (FP_REG_RTX_P (src) && MEM_P (dest))
2811	return false;
2812    }
2813  return true;
2814}
2815
2816
2817/* Split a 64-bit move from SRC to DEST assuming that
2818   mips_split_64bit_move_p holds.
2819
2820   Moves into and out of FPRs cause some difficulty here.  Such moves
2821   will always be DFmode, since paired FPRs are not allowed to store
2822   DImode values.  The most natural representation would be two separate
2823   32-bit moves, such as:
2824
2825	(set (reg:SI $f0) (mem:SI ...))
2826	(set (reg:SI $f1) (mem:SI ...))
2827
2828   However, the second insn is invalid because odd-numbered FPRs are
2829   not allowed to store independent values.  Use the patterns load_df_low,
2830   load_df_high and store_df_high instead.  */
2831
2832void
2833mips_split_64bit_move (rtx dest, rtx src)
2834{
2835  if (FP_REG_RTX_P (dest))
2836    {
2837      /* Loading an FPR from memory or from GPRs.  */
2838      emit_insn (gen_load_df_low (copy_rtx (dest), mips_subword (src, 0)));
2839      emit_insn (gen_load_df_high (dest, mips_subword (src, 1),
2840				   copy_rtx (dest)));
2841    }
2842  else if (FP_REG_RTX_P (src))
2843    {
2844      /* Storing an FPR into memory or GPRs.  */
2845      emit_move_insn (mips_subword (dest, 0), mips_subword (src, 0));
2846      emit_insn (gen_store_df_high (mips_subword (dest, 1), src));
2847    }
2848  else
2849    {
2850      /* The operation can be split into two normal moves.  Decide in
2851	 which order to do them.  */
2852      rtx low_dest;
2853
2854      low_dest = mips_subword (dest, 0);
2855      if (REG_P (low_dest)
2856	  && reg_overlap_mentioned_p (low_dest, src))
2857	{
2858	  emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2859	  emit_move_insn (low_dest, mips_subword (src, 0));
2860	}
2861      else
2862	{
2863	  emit_move_insn (low_dest, mips_subword (src, 0));
2864	  emit_move_insn (mips_subword (dest, 1), mips_subword (src, 1));
2865	}
2866    }
2867}
2868
2869/* Return the appropriate instructions to move SRC into DEST.  Assume
2870   that SRC is operand 1 and DEST is operand 0.  */
2871
2872const char *
2873mips_output_move (rtx dest, rtx src)
2874{
2875  enum rtx_code dest_code, src_code;
2876  bool dbl_p;
2877
2878  dest_code = GET_CODE (dest);
2879  src_code = GET_CODE (src);
2880  dbl_p = (GET_MODE_SIZE (GET_MODE (dest)) == 8);
2881
2882  if (dbl_p && mips_split_64bit_move_p (dest, src))
2883    return "#";
2884
2885  if ((src_code == REG && GP_REG_P (REGNO (src)))
2886      || (!TARGET_MIPS16 && src == CONST0_RTX (GET_MODE (dest))))
2887    {
2888      if (dest_code == REG)
2889	{
2890	  if (GP_REG_P (REGNO (dest)))
2891	    return "move\t%0,%z1";
2892
2893	  if (MD_REG_P (REGNO (dest)))
2894	    return "mt%0\t%z1";
2895
2896	  if (DSP_ACC_REG_P (REGNO (dest)))
2897	    {
2898	      static char retval[] = "mt__\t%z1,%q0";
2899	      retval[2] = reg_names[REGNO (dest)][4];
2900	      retval[3] = reg_names[REGNO (dest)][5];
2901	      return retval;
2902	    }
2903
2904	  if (FP_REG_P (REGNO (dest)))
2905	    return (dbl_p ? "dmtc1\t%z1,%0" : "mtc1\t%z1,%0");
2906
2907	  if (ALL_COP_REG_P (REGNO (dest)))
2908	    {
2909	      static char retval[] = "dmtc_\t%z1,%0";
2910
2911	      retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2912	      return (dbl_p ? retval : retval + 1);
2913	    }
2914	}
2915      if (dest_code == MEM)
2916	return (dbl_p ? "sd\t%z1,%0" : "sw\t%z1,%0");
2917    }
2918  if (dest_code == REG && GP_REG_P (REGNO (dest)))
2919    {
2920      if (src_code == REG)
2921	{
2922	  if (DSP_ACC_REG_P (REGNO (src)))
2923	    {
2924	      static char retval[] = "mf__\t%0,%q1";
2925	      retval[2] = reg_names[REGNO (src)][4];
2926	      retval[3] = reg_names[REGNO (src)][5];
2927	      return retval;
2928	    }
2929
2930	  if (ST_REG_P (REGNO (src)) && ISA_HAS_8CC)
2931	    return "lui\t%0,0x3f80\n\tmovf\t%0,%.,%1";
2932
2933	  if (FP_REG_P (REGNO (src)))
2934	    return (dbl_p ? "dmfc1\t%0,%1" : "mfc1\t%0,%1");
2935
2936	  if (ALL_COP_REG_P (REGNO (src)))
2937	    {
2938	      static char retval[] = "dmfc_\t%0,%1";
2939
2940	      retval[4] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
2941	      return (dbl_p ? retval : retval + 1);
2942	    }
2943	}
2944
2945      if (src_code == MEM)
2946	return (dbl_p ? "ld\t%0,%1" : "lw\t%0,%1");
2947
2948      if (src_code == CONST_INT)
2949	{
2950	  /* Don't use the X format, because that will give out of
2951	     range numbers for 64 bit hosts and 32 bit targets.  */
2952	  if (!TARGET_MIPS16)
2953	    return "li\t%0,%1\t\t\t# %X1";
2954
2955	  if (INTVAL (src) >= 0 && INTVAL (src) <= 0xffff)
2956	    return "li\t%0,%1";
2957
2958	  if (INTVAL (src) < 0 && INTVAL (src) >= -0xffff)
2959	    return "#";
2960	}
2961
2962      if (src_code == HIGH)
2963	return "lui\t%0,%h1";
2964
2965      if (CONST_GP_P (src))
2966	return "move\t%0,%1";
2967
2968      if (symbolic_operand (src, VOIDmode))
2969	return (dbl_p ? "dla\t%0,%1" : "la\t%0,%1");
2970    }
2971  if (src_code == REG && FP_REG_P (REGNO (src)))
2972    {
2973      if (dest_code == REG && FP_REG_P (REGNO (dest)))
2974	{
2975	  if (GET_MODE (dest) == V2SFmode)
2976	    return "mov.ps\t%0,%1";
2977	  else
2978	    return (dbl_p ? "mov.d\t%0,%1" : "mov.s\t%0,%1");
2979	}
2980
2981      if (dest_code == MEM)
2982	return (dbl_p ? "sdc1\t%1,%0" : "swc1\t%1,%0");
2983    }
2984  if (dest_code == REG && FP_REG_P (REGNO (dest)))
2985    {
2986      if (src_code == MEM)
2987	return (dbl_p ? "ldc1\t%0,%1" : "lwc1\t%0,%1");
2988    }
2989  if (dest_code == REG && ALL_COP_REG_P (REGNO (dest)) && src_code == MEM)
2990    {
2991      static char retval[] = "l_c_\t%0,%1";
2992
2993      retval[1] = (dbl_p ? 'd' : 'w');
2994      retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (dest));
2995      return retval;
2996    }
2997  if (dest_code == MEM && src_code == REG && ALL_COP_REG_P (REGNO (src)))
2998    {
2999      static char retval[] = "s_c_\t%1,%0";
3000
3001      retval[1] = (dbl_p ? 'd' : 'w');
3002      retval[3] = COPNUM_AS_CHAR_FROM_REGNUM (REGNO (src));
3003      return retval;
3004    }
3005  gcc_unreachable ();
3006}
3007
3008/* Restore $gp from its save slot.  Valid only when using o32 or
3009   o64 abicalls.  */
3010
3011void
3012mips_restore_gp (void)
3013{
3014  rtx address, slot;
3015
3016  gcc_assert (TARGET_ABICALLS && TARGET_OLDABI);
3017
3018  address = mips_add_offset (pic_offset_table_rtx,
3019			     frame_pointer_needed
3020			     ? hard_frame_pointer_rtx
3021			     : stack_pointer_rtx,
3022			     current_function_outgoing_args_size);
3023  slot = gen_rtx_MEM (Pmode, address);
3024
3025  emit_move_insn (pic_offset_table_rtx, slot);
3026  if (!TARGET_EXPLICIT_RELOCS)
3027    emit_insn (gen_blockage ());
3028}
3029
3030/* Emit an instruction of the form (set TARGET (CODE OP0 OP1)).  */
3031
3032static void
3033mips_emit_binary (enum rtx_code code, rtx target, rtx op0, rtx op1)
3034{
3035  emit_insn (gen_rtx_SET (VOIDmode, target,
3036			  gen_rtx_fmt_ee (code, GET_MODE (target), op0, op1)));
3037}
3038
3039/* Return true if CMP1 is a suitable second operand for relational
3040   operator CODE.  See also the *sCC patterns in mips.md.  */
3041
3042static bool
3043mips_relational_operand_ok_p (enum rtx_code code, rtx cmp1)
3044{
3045  switch (code)
3046    {
3047    case GT:
3048    case GTU:
3049      return reg_or_0_operand (cmp1, VOIDmode);
3050
3051    case GE:
3052    case GEU:
3053      return !TARGET_MIPS16 && cmp1 == const1_rtx;
3054
3055    case LT:
3056    case LTU:
3057      return arith_operand (cmp1, VOIDmode);
3058
3059    case LE:
3060      return sle_operand (cmp1, VOIDmode);
3061
3062    case LEU:
3063      return sleu_operand (cmp1, VOIDmode);
3064
3065    default:
3066      gcc_unreachable ();
3067    }
3068}
3069
3070/* Canonicalize LE or LEU comparisons into LT comparisons when
3071   possible to avoid extra instructions or inverting the
3072   comparison.  */
3073
3074static bool
3075mips_canonicalize_comparison (enum rtx_code *code, rtx *cmp1,
3076			      enum machine_mode mode)
3077{
3078  HOST_WIDE_INT original, plus_one;
3079
3080  if (GET_CODE (*cmp1) != CONST_INT)
3081    return false;
3082
3083  original = INTVAL (*cmp1);
3084  plus_one = trunc_int_for_mode ((unsigned HOST_WIDE_INT) original + 1, mode);
3085
3086  switch (*code)
3087    {
3088    case LE:
3089      if (original < plus_one)
3090	{
3091	  *code = LT;
3092	  *cmp1 = force_reg (mode, GEN_INT (plus_one));
3093	  return true;
3094	}
3095      break;
3096
3097    case LEU:
3098      if (plus_one != 0)
3099	{
3100	  *code = LTU;
3101	  *cmp1 = force_reg (mode, GEN_INT (plus_one));
3102	  return true;
3103	}
3104      break;
3105
3106    default:
3107      return false;
3108   }
3109
3110  return false;
3111
3112}
3113
3114/* Compare CMP0 and CMP1 using relational operator CODE and store the
3115   result in TARGET.  CMP0 and TARGET are register_operands that have
3116   the same integer mode.  If INVERT_PTR is nonnull, it's OK to set
3117   TARGET to the inverse of the result and flip *INVERT_PTR instead.  */
3118
3119static void
3120mips_emit_int_relational (enum rtx_code code, bool *invert_ptr,
3121			  rtx target, rtx cmp0, rtx cmp1)
3122{
3123  /* First see if there is a MIPS instruction that can do this operation
3124     with CMP1 in its current form. If not, try to canonicalize the
3125     comparison to LT. If that fails, try doing the same for the
3126     inverse operation.  If that also fails, force CMP1 into a register
3127     and try again.  */
3128  if (mips_relational_operand_ok_p (code, cmp1))
3129    mips_emit_binary (code, target, cmp0, cmp1);
3130  else if (mips_canonicalize_comparison (&code, &cmp1, GET_MODE (target)))
3131    mips_emit_binary (code, target, cmp0, cmp1);
3132  else
3133    {
3134      enum rtx_code inv_code = reverse_condition (code);
3135      if (!mips_relational_operand_ok_p (inv_code, cmp1))
3136	{
3137	  cmp1 = force_reg (GET_MODE (cmp0), cmp1);
3138	  mips_emit_int_relational (code, invert_ptr, target, cmp0, cmp1);
3139	}
3140      else if (invert_ptr == 0)
3141	{
3142	  rtx inv_target = gen_reg_rtx (GET_MODE (target));
3143	  mips_emit_binary (inv_code, inv_target, cmp0, cmp1);
3144	  mips_emit_binary (XOR, target, inv_target, const1_rtx);
3145	}
3146      else
3147	{
3148	  *invert_ptr = !*invert_ptr;
3149	  mips_emit_binary (inv_code, target, cmp0, cmp1);
3150	}
3151    }
3152}
3153
3154/* Return a register that is zero iff CMP0 and CMP1 are equal.
3155   The register will have the same mode as CMP0.  */
3156
3157static rtx
3158mips_zero_if_equal (rtx cmp0, rtx cmp1)
3159{
3160  if (cmp1 == const0_rtx)
3161    return cmp0;
3162
3163  if (uns_arith_operand (cmp1, VOIDmode))
3164    return expand_binop (GET_MODE (cmp0), xor_optab,
3165			 cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3166
3167  return expand_binop (GET_MODE (cmp0), sub_optab,
3168		       cmp0, cmp1, 0, 0, OPTAB_DIRECT);
3169}
3170
3171/* Convert *CODE into a code that can be used in a floating-point
3172   scc instruction (c.<cond>.<fmt>).  Return true if the values of
3173   the condition code registers will be inverted, with 0 indicating
3174   that the condition holds.  */
3175
3176static bool
3177mips_reverse_fp_cond_p (enum rtx_code *code)
3178{
3179  switch (*code)
3180    {
3181    case NE:
3182    case LTGT:
3183    case ORDERED:
3184      *code = reverse_condition_maybe_unordered (*code);
3185      return true;
3186
3187    default:
3188      return false;
3189    }
3190}
3191
3192/* Convert a comparison into something that can be used in a branch or
3193   conditional move.  cmp_operands[0] and cmp_operands[1] are the values
3194   being compared and *CODE is the code used to compare them.
3195
3196   Update *CODE, *OP0 and *OP1 so that they describe the final comparison.
3197   If NEED_EQ_NE_P, then only EQ/NE comparisons against zero are possible,
3198   otherwise any standard branch condition can be used.  The standard branch
3199   conditions are:
3200
3201      - EQ/NE between two registers.
3202      - any comparison between a register and zero.  */
3203
3204static void
3205mips_emit_compare (enum rtx_code *code, rtx *op0, rtx *op1, bool need_eq_ne_p)
3206{
3207  if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) == MODE_INT)
3208    {
3209      if (!need_eq_ne_p && cmp_operands[1] == const0_rtx)
3210	{
3211	  *op0 = cmp_operands[0];
3212	  *op1 = cmp_operands[1];
3213	}
3214      else if (*code == EQ || *code == NE)
3215	{
3216	  if (need_eq_ne_p)
3217	    {
3218	      *op0 = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3219	      *op1 = const0_rtx;
3220	    }
3221	  else
3222	    {
3223	      *op0 = cmp_operands[0];
3224	      *op1 = force_reg (GET_MODE (*op0), cmp_operands[1]);
3225	    }
3226	}
3227      else
3228	{
3229	  /* The comparison needs a separate scc instruction.  Store the
3230	     result of the scc in *OP0 and compare it against zero.  */
3231	  bool invert = false;
3232	  *op0 = gen_reg_rtx (GET_MODE (cmp_operands[0]));
3233	  *op1 = const0_rtx;
3234	  mips_emit_int_relational (*code, &invert, *op0,
3235				    cmp_operands[0], cmp_operands[1]);
3236	  *code = (invert ? EQ : NE);
3237	}
3238    }
3239  else
3240    {
3241      enum rtx_code cmp_code;
3242
3243      /* Floating-point tests use a separate c.cond.fmt comparison to
3244	 set a condition code register.  The branch or conditional move
3245	 will then compare that register against zero.
3246
3247	 Set CMP_CODE to the code of the comparison instruction and
3248	 *CODE to the code that the branch or move should use.  */
3249      cmp_code = *code;
3250      *code = mips_reverse_fp_cond_p (&cmp_code) ? EQ : NE;
3251      *op0 = (ISA_HAS_8CC
3252	      ? gen_reg_rtx (CCmode)
3253	      : gen_rtx_REG (CCmode, FPSW_REGNUM));
3254      *op1 = const0_rtx;
3255      mips_emit_binary (cmp_code, *op0, cmp_operands[0], cmp_operands[1]);
3256    }
3257}
3258
3259/* Try comparing cmp_operands[0] and cmp_operands[1] using rtl code CODE.
3260   Store the result in TARGET and return true if successful.
3261
3262   On 64-bit targets, TARGET may be wider than cmp_operands[0].  */
3263
3264bool
3265mips_emit_scc (enum rtx_code code, rtx target)
3266{
3267  if (GET_MODE_CLASS (GET_MODE (cmp_operands[0])) != MODE_INT)
3268    return false;
3269
3270  target = gen_lowpart (GET_MODE (cmp_operands[0]), target);
3271  if (code == EQ || code == NE)
3272    {
3273      rtx zie = mips_zero_if_equal (cmp_operands[0], cmp_operands[1]);
3274      mips_emit_binary (code, target, zie, const0_rtx);
3275    }
3276  else
3277    mips_emit_int_relational (code, 0, target,
3278			      cmp_operands[0], cmp_operands[1]);
3279  return true;
3280}
3281
3282/* Emit the common code for doing conditional branches.
3283   operand[0] is the label to jump to.
3284   The comparison operands are saved away by cmp{si,di,sf,df}.  */
3285
3286void
3287gen_conditional_branch (rtx *operands, enum rtx_code code)
3288{
3289  rtx op0, op1, condition;
3290
3291  mips_emit_compare (&code, &op0, &op1, TARGET_MIPS16);
3292  condition = gen_rtx_fmt_ee (code, VOIDmode, op0, op1);
3293  emit_jump_insn (gen_condjump (condition, operands[0]));
3294}
3295
3296/* Implement:
3297
3298   (set temp (COND:CCV2 CMP_OP0 CMP_OP1))
3299   (set DEST (unspec [TRUE_SRC FALSE_SRC temp] UNSPEC_MOVE_TF_PS))  */
3300
3301void
3302mips_expand_vcondv2sf (rtx dest, rtx true_src, rtx false_src,
3303		       enum rtx_code cond, rtx cmp_op0, rtx cmp_op1)
3304{
3305  rtx cmp_result;
3306  bool reversed_p;
3307
3308  reversed_p = mips_reverse_fp_cond_p (&cond);
3309  cmp_result = gen_reg_rtx (CCV2mode);
3310  emit_insn (gen_scc_ps (cmp_result,
3311			 gen_rtx_fmt_ee (cond, VOIDmode, cmp_op0, cmp_op1)));
3312  if (reversed_p)
3313    emit_insn (gen_mips_cond_move_tf_ps (dest, false_src, true_src,
3314					 cmp_result));
3315  else
3316    emit_insn (gen_mips_cond_move_tf_ps (dest, true_src, false_src,
3317					 cmp_result));
3318}
3319
3320/* Emit the common code for conditional moves.  OPERANDS is the array
3321   of operands passed to the conditional move define_expand.  */
3322
3323void
3324gen_conditional_move (rtx *operands)
3325{
3326  enum rtx_code code;
3327  rtx op0, op1;
3328
3329  code = GET_CODE (operands[1]);
3330  mips_emit_compare (&code, &op0, &op1, true);
3331  emit_insn (gen_rtx_SET (VOIDmode, operands[0],
3332			  gen_rtx_IF_THEN_ELSE (GET_MODE (operands[0]),
3333						gen_rtx_fmt_ee (code,
3334								GET_MODE (op0),
3335								op0, op1),
3336						operands[2], operands[3])));
3337}
3338
3339/* Emit a conditional trap.  OPERANDS is the array of operands passed to
3340   the conditional_trap expander.  */
3341
3342void
3343mips_gen_conditional_trap (rtx *operands)
3344{
3345  rtx op0, op1;
3346  enum rtx_code cmp_code = GET_CODE (operands[0]);
3347  enum machine_mode mode = GET_MODE (cmp_operands[0]);
3348
3349  /* MIPS conditional trap machine instructions don't have GT or LE
3350     flavors, so we must invert the comparison and convert to LT and
3351     GE, respectively.  */
3352  switch (cmp_code)
3353    {
3354    case GT: cmp_code = LT; break;
3355    case LE: cmp_code = GE; break;
3356    case GTU: cmp_code = LTU; break;
3357    case LEU: cmp_code = GEU; break;
3358    default: break;
3359    }
3360  if (cmp_code == GET_CODE (operands[0]))
3361    {
3362      op0 = cmp_operands[0];
3363      op1 = cmp_operands[1];
3364    }
3365  else
3366    {
3367      op0 = cmp_operands[1];
3368      op1 = cmp_operands[0];
3369    }
3370  op0 = force_reg (mode, op0);
3371  if (!arith_operand (op1, mode))
3372    op1 = force_reg (mode, op1);
3373
3374  emit_insn (gen_rtx_TRAP_IF (VOIDmode,
3375			      gen_rtx_fmt_ee (cmp_code, mode, op0, op1),
3376			      operands[1]));
3377}
3378
3379/* Load function address ADDR into register DEST.  SIBCALL_P is true
3380   if the address is needed for a sibling call.  */
3381
3382static void
3383mips_load_call_address (rtx dest, rtx addr, int sibcall_p)
3384{
3385  /* If we're generating PIC, and this call is to a global function,
3386     try to allow its address to be resolved lazily.  This isn't
3387     possible for NewABI sibcalls since the value of $gp on entry
3388     to the stub would be our caller's gp, not ours.  */
3389  if (TARGET_EXPLICIT_RELOCS
3390      && !(sibcall_p && TARGET_NEWABI)
3391      && global_got_operand (addr, VOIDmode))
3392    {
3393      rtx high, lo_sum_symbol;
3394
3395      high = mips_unspec_offset_high (dest, pic_offset_table_rtx,
3396				      addr, SYMBOL_GOTOFF_CALL);
3397      lo_sum_symbol = mips_unspec_address (addr, SYMBOL_GOTOFF_CALL);
3398      if (Pmode == SImode)
3399	emit_insn (gen_load_callsi (dest, high, lo_sum_symbol));
3400      else
3401	emit_insn (gen_load_calldi (dest, high, lo_sum_symbol));
3402    }
3403  else
3404    emit_move_insn (dest, addr);
3405}
3406
3407
3408/* Expand a call or call_value instruction.  RESULT is where the
3409   result will go (null for calls), ADDR is the address of the
3410   function, ARGS_SIZE is the size of the arguments and AUX is
3411   the value passed to us by mips_function_arg.  SIBCALL_P is true
3412   if we are expanding a sibling call, false if we're expanding
3413   a normal call.  */
3414
3415void
3416mips_expand_call (rtx result, rtx addr, rtx args_size, rtx aux, int sibcall_p)
3417{
3418  rtx orig_addr, pattern, insn;
3419
3420  orig_addr = addr;
3421  if (!call_insn_operand (addr, VOIDmode))
3422    {
3423      addr = gen_reg_rtx (Pmode);
3424      mips_load_call_address (addr, orig_addr, sibcall_p);
3425    }
3426
3427  if (TARGET_MIPS16
3428      && mips16_hard_float
3429      && build_mips16_call_stub (result, addr, args_size,
3430				 aux == 0 ? 0 : (int) GET_MODE (aux)))
3431    return;
3432
3433  if (result == 0)
3434    pattern = (sibcall_p
3435	       ? gen_sibcall_internal (addr, args_size)
3436	       : gen_call_internal (addr, args_size));
3437  else if (GET_CODE (result) == PARALLEL && XVECLEN (result, 0) == 2)
3438    {
3439      rtx reg1, reg2;
3440
3441      reg1 = XEXP (XVECEXP (result, 0, 0), 0);
3442      reg2 = XEXP (XVECEXP (result, 0, 1), 0);
3443      pattern =
3444	(sibcall_p
3445	 ? gen_sibcall_value_multiple_internal (reg1, addr, args_size, reg2)
3446	 : gen_call_value_multiple_internal (reg1, addr, args_size, reg2));
3447    }
3448  else
3449    pattern = (sibcall_p
3450	       ? gen_sibcall_value_internal (result, addr, args_size)
3451	       : gen_call_value_internal (result, addr, args_size));
3452
3453  insn = emit_call_insn (pattern);
3454
3455  /* Lazy-binding stubs require $gp to be valid on entry.  */
3456  if (global_got_operand (orig_addr, VOIDmode))
3457    use_reg (&CALL_INSN_FUNCTION_USAGE (insn), pic_offset_table_rtx);
3458}
3459
3460
3461/* We can handle any sibcall when TARGET_SIBCALLS is true.  */
3462
3463static bool
3464mips_function_ok_for_sibcall (tree decl ATTRIBUTE_UNUSED,
3465			      tree exp ATTRIBUTE_UNUSED)
3466{
3467  return TARGET_SIBCALLS;
3468}
3469
3470/* Emit code to move general operand SRC into condition-code
3471   register DEST.  SCRATCH is a scratch TFmode float register.
3472   The sequence is:
3473
3474	FP1 = SRC
3475	FP2 = 0.0f
3476	DEST = FP2 < FP1
3477
3478   where FP1 and FP2 are single-precision float registers
3479   taken from SCRATCH.  */
3480
3481void
3482mips_emit_fcc_reload (rtx dest, rtx src, rtx scratch)
3483{
3484  rtx fp1, fp2;
3485
3486  /* Change the source to SFmode.  */
3487  if (MEM_P (src))
3488    src = adjust_address (src, SFmode, 0);
3489  else if (REG_P (src) || GET_CODE (src) == SUBREG)
3490    src = gen_rtx_REG (SFmode, true_regnum (src));
3491
3492  fp1 = gen_rtx_REG (SFmode, REGNO (scratch));
3493  fp2 = gen_rtx_REG (SFmode, REGNO (scratch) + FP_INC);
3494
3495  emit_move_insn (copy_rtx (fp1), src);
3496  emit_move_insn (copy_rtx (fp2), CONST0_RTX (SFmode));
3497  emit_insn (gen_slt_sf (dest, fp2, fp1));
3498}
3499
3500/* Emit code to change the current function's return address to
3501   ADDRESS.  SCRATCH is available as a scratch register, if needed.
3502   ADDRESS and SCRATCH are both word-mode GPRs.  */
3503
3504void
3505mips_set_return_address (rtx address, rtx scratch)
3506{
3507  rtx slot_address;
3508
3509  compute_frame_size (get_frame_size ());
3510  gcc_assert ((cfun->machine->frame.mask >> 31) & 1);
3511  slot_address = mips_add_offset (scratch, stack_pointer_rtx,
3512				  cfun->machine->frame.gp_sp_offset);
3513
3514  emit_move_insn (gen_rtx_MEM (GET_MODE (address), slot_address), address);
3515}
3516
3517/* Emit straight-line code to move LENGTH bytes from SRC to DEST.
3518   Assume that the areas do not overlap.  */
3519
3520static void
3521mips_block_move_straight (rtx dest, rtx src, HOST_WIDE_INT length)
3522{
3523  HOST_WIDE_INT offset, delta;
3524  unsigned HOST_WIDE_INT bits;
3525  int i;
3526  enum machine_mode mode;
3527  rtx *regs;
3528
3529  /* Work out how many bits to move at a time.  If both operands have
3530     half-word alignment, it is usually better to move in half words.
3531     For instance, lh/lh/sh/sh is usually better than lwl/lwr/swl/swr
3532     and lw/lw/sw/sw is usually better than ldl/ldr/sdl/sdr.
3533     Otherwise move word-sized chunks.  */
3534  if (MEM_ALIGN (src) == BITS_PER_WORD / 2
3535      && MEM_ALIGN (dest) == BITS_PER_WORD / 2)
3536    bits = BITS_PER_WORD / 2;
3537  else
3538    bits = BITS_PER_WORD;
3539
3540  mode = mode_for_size (bits, MODE_INT, 0);
3541  delta = bits / BITS_PER_UNIT;
3542
3543  /* Allocate a buffer for the temporary registers.  */
3544  regs = alloca (sizeof (rtx) * length / delta);
3545
3546  /* Load as many BITS-sized chunks as possible.  Use a normal load if
3547     the source has enough alignment, otherwise use left/right pairs.  */
3548  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3549    {
3550      regs[i] = gen_reg_rtx (mode);
3551      if (MEM_ALIGN (src) >= bits)
3552	emit_move_insn (regs[i], adjust_address (src, mode, offset));
3553      else
3554	{
3555	  rtx part = adjust_address (src, BLKmode, offset);
3556	  if (!mips_expand_unaligned_load (regs[i], part, bits, 0))
3557	    gcc_unreachable ();
3558	}
3559    }
3560
3561  /* Copy the chunks to the destination.  */
3562  for (offset = 0, i = 0; offset + delta <= length; offset += delta, i++)
3563    if (MEM_ALIGN (dest) >= bits)
3564      emit_move_insn (adjust_address (dest, mode, offset), regs[i]);
3565    else
3566      {
3567	rtx part = adjust_address (dest, BLKmode, offset);
3568	if (!mips_expand_unaligned_store (part, regs[i], bits, 0))
3569	  gcc_unreachable ();
3570      }
3571
3572  /* Mop up any left-over bytes.  */
3573  if (offset < length)
3574    {
3575      src = adjust_address (src, BLKmode, offset);
3576      dest = adjust_address (dest, BLKmode, offset);
3577      move_by_pieces (dest, src, length - offset,
3578		      MIN (MEM_ALIGN (src), MEM_ALIGN (dest)), 0);
3579    }
3580}
3581
3582#define MAX_MOVE_REGS 4
3583#define MAX_MOVE_BYTES (MAX_MOVE_REGS * UNITS_PER_WORD)
3584
3585
3586/* Helper function for doing a loop-based block operation on memory
3587   reference MEM.  Each iteration of the loop will operate on LENGTH
3588   bytes of MEM.
3589
3590   Create a new base register for use within the loop and point it to
3591   the start of MEM.  Create a new memory reference that uses this
3592   register.  Store them in *LOOP_REG and *LOOP_MEM respectively.  */
3593
3594static void
3595mips_adjust_block_mem (rtx mem, HOST_WIDE_INT length,
3596		       rtx *loop_reg, rtx *loop_mem)
3597{
3598  *loop_reg = copy_addr_to_reg (XEXP (mem, 0));
3599
3600  /* Although the new mem does not refer to a known location,
3601     it does keep up to LENGTH bytes of alignment.  */
3602  *loop_mem = change_address (mem, BLKmode, *loop_reg);
3603  set_mem_align (*loop_mem, MIN (MEM_ALIGN (mem), length * BITS_PER_UNIT));
3604}
3605
3606
3607/* Move LENGTH bytes from SRC to DEST using a loop that moves MAX_MOVE_BYTES
3608   per iteration.  LENGTH must be at least MAX_MOVE_BYTES.  Assume that the
3609   memory regions do not overlap.  */
3610
3611static void
3612mips_block_move_loop (rtx dest, rtx src, HOST_WIDE_INT length)
3613{
3614  rtx label, src_reg, dest_reg, final_src;
3615  HOST_WIDE_INT leftover;
3616
3617  leftover = length % MAX_MOVE_BYTES;
3618  length -= leftover;
3619
3620  /* Create registers and memory references for use within the loop.  */
3621  mips_adjust_block_mem (src, MAX_MOVE_BYTES, &src_reg, &src);
3622  mips_adjust_block_mem (dest, MAX_MOVE_BYTES, &dest_reg, &dest);
3623
3624  /* Calculate the value that SRC_REG should have after the last iteration
3625     of the loop.  */
3626  final_src = expand_simple_binop (Pmode, PLUS, src_reg, GEN_INT (length),
3627				   0, 0, OPTAB_WIDEN);
3628
3629  /* Emit the start of the loop.  */
3630  label = gen_label_rtx ();
3631  emit_label (label);
3632
3633  /* Emit the loop body.  */
3634  mips_block_move_straight (dest, src, MAX_MOVE_BYTES);
3635
3636  /* Move on to the next block.  */
3637  emit_move_insn (src_reg, plus_constant (src_reg, MAX_MOVE_BYTES));
3638  emit_move_insn (dest_reg, plus_constant (dest_reg, MAX_MOVE_BYTES));
3639
3640  /* Emit the loop condition.  */
3641  if (Pmode == DImode)
3642    emit_insn (gen_cmpdi (src_reg, final_src));
3643  else
3644    emit_insn (gen_cmpsi (src_reg, final_src));
3645  emit_jump_insn (gen_bne (label));
3646
3647  /* Mop up any left-over bytes.  */
3648  if (leftover)
3649    mips_block_move_straight (dest, src, leftover);
3650}
3651
3652/* Expand a movmemsi instruction.  */
3653
3654bool
3655mips_expand_block_move (rtx dest, rtx src, rtx length)
3656{
3657  if (GET_CODE (length) == CONST_INT)
3658    {
3659      if (INTVAL (length) <= 2 * MAX_MOVE_BYTES)
3660	{
3661	  mips_block_move_straight (dest, src, INTVAL (length));
3662	  return true;
3663	}
3664      else if (optimize)
3665	{
3666	  mips_block_move_loop (dest, src, INTVAL (length));
3667	  return true;
3668	}
3669    }
3670  return false;
3671}
3672
3673/* Argument support functions.  */
3674
3675/* Initialize CUMULATIVE_ARGS for a function.  */
3676
3677void
3678init_cumulative_args (CUMULATIVE_ARGS *cum, tree fntype,
3679		      rtx libname ATTRIBUTE_UNUSED)
3680{
3681  static CUMULATIVE_ARGS zero_cum;
3682  tree param, next_param;
3683
3684  *cum = zero_cum;
3685  cum->prototype = (fntype && TYPE_ARG_TYPES (fntype));
3686
3687  /* Determine if this function has variable arguments.  This is
3688     indicated by the last argument being 'void_type_mode' if there
3689     are no variable arguments.  The standard MIPS calling sequence
3690     passes all arguments in the general purpose registers in this case.  */
3691
3692  for (param = fntype ? TYPE_ARG_TYPES (fntype) : 0;
3693       param != 0; param = next_param)
3694    {
3695      next_param = TREE_CHAIN (param);
3696      if (next_param == 0 && TREE_VALUE (param) != void_type_node)
3697	cum->gp_reg_found = 1;
3698    }
3699}
3700
3701
3702/* Fill INFO with information about a single argument.  CUM is the
3703   cumulative state for earlier arguments.  MODE is the mode of this
3704   argument and TYPE is its type (if known).  NAMED is true if this
3705   is a named (fixed) argument rather than a variable one.  */
3706
3707static void
3708mips_arg_info (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3709	       tree type, int named, struct mips_arg_info *info)
3710{
3711  bool doubleword_aligned_p;
3712  unsigned int num_bytes, num_words, max_regs;
3713
3714  /* Work out the size of the argument.  */
3715  num_bytes = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
3716  num_words = (num_bytes + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
3717
3718  /* Decide whether it should go in a floating-point register, assuming
3719     one is free.  Later code checks for availability.
3720
3721     The checks against UNITS_PER_FPVALUE handle the soft-float and
3722     single-float cases.  */
3723  switch (mips_abi)
3724    {
3725    case ABI_EABI:
3726      /* The EABI conventions have traditionally been defined in terms
3727	 of TYPE_MODE, regardless of the actual type.  */
3728      info->fpr_p = ((GET_MODE_CLASS (mode) == MODE_FLOAT
3729		      || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3730		     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3731      break;
3732
3733    case ABI_32:
3734    case ABI_O64:
3735      /* Only leading floating-point scalars are passed in
3736	 floating-point registers.  We also handle vector floats the same
3737	 say, which is OK because they are not covered by the standard ABI.  */
3738      info->fpr_p = (!cum->gp_reg_found
3739		     && cum->arg_number < 2
3740		     && (type == 0 || SCALAR_FLOAT_TYPE_P (type)
3741			 || VECTOR_FLOAT_TYPE_P (type))
3742		     && (GET_MODE_CLASS (mode) == MODE_FLOAT
3743			 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3744		     && GET_MODE_SIZE (mode) <= UNITS_PER_FPVALUE);
3745      break;
3746
3747    case ABI_N32:
3748    case ABI_64:
3749      /* Scalar and complex floating-point types are passed in
3750	 floating-point registers.  */
3751      info->fpr_p = (named
3752		     && (type == 0 || FLOAT_TYPE_P (type))
3753		     && (GET_MODE_CLASS (mode) == MODE_FLOAT
3754			 || GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3755			 || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
3756		     && GET_MODE_UNIT_SIZE (mode) <= UNITS_PER_FPVALUE);
3757
3758      /* ??? According to the ABI documentation, the real and imaginary
3759	 parts of complex floats should be passed in individual registers.
3760	 The real and imaginary parts of stack arguments are supposed
3761	 to be contiguous and there should be an extra word of padding
3762	 at the end.
3763
3764	 This has two problems.  First, it makes it impossible to use a
3765	 single "void *" va_list type, since register and stack arguments
3766	 are passed differently.  (At the time of writing, MIPSpro cannot
3767	 handle complex float varargs correctly.)  Second, it's unclear
3768	 what should happen when there is only one register free.
3769
3770	 For now, we assume that named complex floats should go into FPRs
3771	 if there are two FPRs free, otherwise they should be passed in the
3772	 same way as a struct containing two floats.  */
3773      if (info->fpr_p
3774	  && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
3775	  && GET_MODE_UNIT_SIZE (mode) < UNITS_PER_FPVALUE)
3776	{
3777	  if (cum->num_gprs >= MAX_ARGS_IN_REGISTERS - 1)
3778	    info->fpr_p = false;
3779	  else
3780	    num_words = 2;
3781	}
3782      break;
3783
3784    default:
3785      gcc_unreachable ();
3786    }
3787
3788  /* See whether the argument has doubleword alignment.  */
3789  doubleword_aligned_p = FUNCTION_ARG_BOUNDARY (mode, type) > BITS_PER_WORD;
3790
3791  /* Set REG_OFFSET to the register count we're interested in.
3792     The EABI allocates the floating-point registers separately,
3793     but the other ABIs allocate them like integer registers.  */
3794  info->reg_offset = (mips_abi == ABI_EABI && info->fpr_p
3795		      ? cum->num_fprs
3796		      : cum->num_gprs);
3797
3798  /* Advance to an even register if the argument is doubleword-aligned.  */
3799  if (doubleword_aligned_p)
3800    info->reg_offset += info->reg_offset & 1;
3801
3802  /* Work out the offset of a stack argument.  */
3803  info->stack_offset = cum->stack_words;
3804  if (doubleword_aligned_p)
3805    info->stack_offset += info->stack_offset & 1;
3806
3807  max_regs = MAX_ARGS_IN_REGISTERS - info->reg_offset;
3808
3809  /* Partition the argument between registers and stack.  */
3810  info->reg_words = MIN (num_words, max_regs);
3811  info->stack_words = num_words - info->reg_words;
3812}
3813
3814
3815/* Implement FUNCTION_ARG_ADVANCE.  */
3816
3817void
3818function_arg_advance (CUMULATIVE_ARGS *cum, enum machine_mode mode,
3819		      tree type, int named)
3820{
3821  struct mips_arg_info info;
3822
3823  mips_arg_info (cum, mode, type, named, &info);
3824
3825  if (!info.fpr_p)
3826    cum->gp_reg_found = true;
3827
3828  /* See the comment above the cumulative args structure in mips.h
3829     for an explanation of what this code does.  It assumes the O32
3830     ABI, which passes at most 2 arguments in float registers.  */
3831  if (cum->arg_number < 2 && info.fpr_p)
3832    cum->fp_code += (mode == SFmode ? 1 : 2) << ((cum->arg_number - 1) * 2);
3833
3834  if (mips_abi != ABI_EABI || !info.fpr_p)
3835    cum->num_gprs = info.reg_offset + info.reg_words;
3836  else if (info.reg_words > 0)
3837    cum->num_fprs += FP_INC;
3838
3839  if (info.stack_words > 0)
3840    cum->stack_words = info.stack_offset + info.stack_words;
3841
3842  cum->arg_number++;
3843}
3844
3845/* Implement FUNCTION_ARG.  */
3846
3847struct rtx_def *
3848function_arg (const CUMULATIVE_ARGS *cum, enum machine_mode mode,
3849	      tree type, int named)
3850{
3851  struct mips_arg_info info;
3852
3853  /* We will be called with a mode of VOIDmode after the last argument
3854     has been seen.  Whatever we return will be passed to the call
3855     insn.  If we need a mips16 fp_code, return a REG with the code
3856     stored as the mode.  */
3857  if (mode == VOIDmode)
3858    {
3859      if (TARGET_MIPS16 && cum->fp_code != 0)
3860	return gen_rtx_REG ((enum machine_mode) cum->fp_code, 0);
3861
3862      else
3863	return 0;
3864    }
3865
3866  mips_arg_info (cum, mode, type, named, &info);
3867
3868  /* Return straight away if the whole argument is passed on the stack.  */
3869  if (info.reg_offset == MAX_ARGS_IN_REGISTERS)
3870    return 0;
3871
3872  if (type != 0
3873      && TREE_CODE (type) == RECORD_TYPE
3874      && TARGET_NEWABI
3875      && TYPE_SIZE_UNIT (type)
3876      && host_integerp (TYPE_SIZE_UNIT (type), 1)
3877      && named)
3878    {
3879      /* The Irix 6 n32/n64 ABIs say that if any 64 bit chunk of the
3880	 structure contains a double in its entirety, then that 64 bit
3881	 chunk is passed in a floating point register.  */
3882      tree field;
3883
3884      /* First check to see if there is any such field.  */
3885      for (field = TYPE_FIELDS (type); field; field = TREE_CHAIN (field))
3886	if (TREE_CODE (field) == FIELD_DECL
3887	    && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3888	    && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD
3889	    && host_integerp (bit_position (field), 0)
3890	    && int_bit_position (field) % BITS_PER_WORD == 0)
3891	  break;
3892
3893      if (field != 0)
3894	{
3895	  /* Now handle the special case by returning a PARALLEL
3896	     indicating where each 64 bit chunk goes.  INFO.REG_WORDS
3897	     chunks are passed in registers.  */
3898	  unsigned int i;
3899	  HOST_WIDE_INT bitpos;
3900	  rtx ret;
3901
3902	  /* assign_parms checks the mode of ENTRY_PARM, so we must
3903	     use the actual mode here.  */
3904	  ret = gen_rtx_PARALLEL (mode, rtvec_alloc (info.reg_words));
3905
3906	  bitpos = 0;
3907	  field = TYPE_FIELDS (type);
3908	  for (i = 0; i < info.reg_words; i++)
3909	    {
3910	      rtx reg;
3911
3912	      for (; field; field = TREE_CHAIN (field))
3913		if (TREE_CODE (field) == FIELD_DECL
3914		    && int_bit_position (field) >= bitpos)
3915		  break;
3916
3917	      if (field
3918		  && int_bit_position (field) == bitpos
3919		  && TREE_CODE (TREE_TYPE (field)) == REAL_TYPE
3920		  && !TARGET_SOFT_FLOAT
3921		  && TYPE_PRECISION (TREE_TYPE (field)) == BITS_PER_WORD)
3922		reg = gen_rtx_REG (DFmode, FP_ARG_FIRST + info.reg_offset + i);
3923	      else
3924		reg = gen_rtx_REG (DImode, GP_ARG_FIRST + info.reg_offset + i);
3925
3926	      XVECEXP (ret, 0, i)
3927		= gen_rtx_EXPR_LIST (VOIDmode, reg,
3928				     GEN_INT (bitpos / BITS_PER_UNIT));
3929
3930	      bitpos += BITS_PER_WORD;
3931	    }
3932	  return ret;
3933	}
3934    }
3935
3936  /* Handle the n32/n64 conventions for passing complex floating-point
3937     arguments in FPR pairs.  The real part goes in the lower register
3938     and the imaginary part goes in the upper register.  */
3939  if (TARGET_NEWABI
3940      && info.fpr_p
3941      && GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT)
3942    {
3943      rtx real, imag;
3944      enum machine_mode inner;
3945      int reg;
3946
3947      inner = GET_MODE_INNER (mode);
3948      reg = FP_ARG_FIRST + info.reg_offset;
3949      if (info.reg_words * UNITS_PER_WORD == GET_MODE_SIZE (inner))
3950	{
3951	  /* Real part in registers, imaginary part on stack.  */
3952	  gcc_assert (info.stack_words == info.reg_words);
3953	  return gen_rtx_REG (inner, reg);
3954	}
3955      else
3956	{
3957	  gcc_assert (info.stack_words == 0);
3958	  real = gen_rtx_EXPR_LIST (VOIDmode,
3959				    gen_rtx_REG (inner, reg),
3960				    const0_rtx);
3961	  imag = gen_rtx_EXPR_LIST (VOIDmode,
3962				    gen_rtx_REG (inner,
3963						 reg + info.reg_words / 2),
3964				    GEN_INT (GET_MODE_SIZE (inner)));
3965	  return gen_rtx_PARALLEL (mode, gen_rtvec (2, real, imag));
3966	}
3967    }
3968
3969  if (!info.fpr_p)
3970    return gen_rtx_REG (mode, GP_ARG_FIRST + info.reg_offset);
3971  else if (info.reg_offset == 1)
3972    /* This code handles the special o32 case in which the second word
3973       of the argument structure is passed in floating-point registers.  */
3974    return gen_rtx_REG (mode, FP_ARG_FIRST + FP_INC);
3975  else
3976    return gen_rtx_REG (mode, FP_ARG_FIRST + info.reg_offset);
3977}
3978
3979
3980/* Implement TARGET_ARG_PARTIAL_BYTES.  */
3981
3982static int
3983mips_arg_partial_bytes (CUMULATIVE_ARGS *cum,
3984			enum machine_mode mode, tree type, bool named)
3985{
3986  struct mips_arg_info info;
3987
3988  mips_arg_info (cum, mode, type, named, &info);
3989  return info.stack_words > 0 ? info.reg_words * UNITS_PER_WORD : 0;
3990}
3991
3992
3993/* Implement FUNCTION_ARG_BOUNDARY.  Every parameter gets at least
3994   PARM_BOUNDARY bits of alignment, but will be given anything up
3995   to STACK_BOUNDARY bits if the type requires it.  */
3996
3997int
3998function_arg_boundary (enum machine_mode mode, tree type)
3999{
4000  unsigned int alignment;
4001
4002  alignment = type ? TYPE_ALIGN (type) : GET_MODE_ALIGNMENT (mode);
4003  if (alignment < PARM_BOUNDARY)
4004    alignment = PARM_BOUNDARY;
4005  if (alignment > STACK_BOUNDARY)
4006    alignment = STACK_BOUNDARY;
4007  return alignment;
4008}
4009
4010/* Return true if FUNCTION_ARG_PADDING (MODE, TYPE) should return
4011   upward rather than downward.  In other words, return true if the
4012   first byte of the stack slot has useful data, false if the last
4013   byte does.  */
4014
4015bool
4016mips_pad_arg_upward (enum machine_mode mode, tree type)
4017{
4018  /* On little-endian targets, the first byte of every stack argument
4019     is passed in the first byte of the stack slot.  */
4020  if (!BYTES_BIG_ENDIAN)
4021    return true;
4022
4023  /* Otherwise, integral types are padded downward: the last byte of a
4024     stack argument is passed in the last byte of the stack slot.  */
4025  if (type != 0
4026      ? INTEGRAL_TYPE_P (type) || POINTER_TYPE_P (type)
4027      : GET_MODE_CLASS (mode) == MODE_INT)
4028    return false;
4029
4030  /* Big-endian o64 pads floating-point arguments downward.  */
4031  if (mips_abi == ABI_O64)
4032    if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4033      return false;
4034
4035  /* Other types are padded upward for o32, o64, n32 and n64.  */
4036  if (mips_abi != ABI_EABI)
4037    return true;
4038
4039  /* Arguments smaller than a stack slot are padded downward.  */
4040  if (mode != BLKmode)
4041    return (GET_MODE_BITSIZE (mode) >= PARM_BOUNDARY);
4042  else
4043    return (int_size_in_bytes (type) >= (PARM_BOUNDARY / BITS_PER_UNIT));
4044}
4045
4046
4047/* Likewise BLOCK_REG_PADDING (MODE, TYPE, ...).  Return !BYTES_BIG_ENDIAN
4048   if the least significant byte of the register has useful data.  Return
4049   the opposite if the most significant byte does.  */
4050
4051bool
4052mips_pad_reg_upward (enum machine_mode mode, tree type)
4053{
4054  /* No shifting is required for floating-point arguments.  */
4055  if (type != 0 ? FLOAT_TYPE_P (type) : GET_MODE_CLASS (mode) == MODE_FLOAT)
4056    return !BYTES_BIG_ENDIAN;
4057
4058  /* Otherwise, apply the same padding to register arguments as we do
4059     to stack arguments.  */
4060  return mips_pad_arg_upward (mode, type);
4061}
4062
4063static void
4064mips_setup_incoming_varargs (CUMULATIVE_ARGS *cum, enum machine_mode mode,
4065			     tree type, int *pretend_size ATTRIBUTE_UNUSED,
4066			     int no_rtl)
4067{
4068  CUMULATIVE_ARGS local_cum;
4069  int gp_saved, fp_saved;
4070
4071  /* The caller has advanced CUM up to, but not beyond, the last named
4072     argument.  Advance a local copy of CUM past the last "real" named
4073     argument, to find out how many registers are left over.  */
4074
4075  local_cum = *cum;
4076  FUNCTION_ARG_ADVANCE (local_cum, mode, type, 1);
4077
4078  /* Found out how many registers we need to save.  */
4079  gp_saved = MAX_ARGS_IN_REGISTERS - local_cum.num_gprs;
4080  fp_saved = (EABI_FLOAT_VARARGS_P
4081	      ? MAX_ARGS_IN_REGISTERS - local_cum.num_fprs
4082	      : 0);
4083
4084  if (!no_rtl)
4085    {
4086      if (gp_saved > 0)
4087	{
4088	  rtx ptr, mem;
4089
4090	  ptr = plus_constant (virtual_incoming_args_rtx,
4091			       REG_PARM_STACK_SPACE (cfun->decl)
4092			       - gp_saved * UNITS_PER_WORD);
4093	  mem = gen_rtx_MEM (BLKmode, ptr);
4094	  set_mem_alias_set (mem, get_varargs_alias_set ());
4095
4096	  move_block_from_reg (local_cum.num_gprs + GP_ARG_FIRST,
4097			       mem, gp_saved);
4098	}
4099      if (fp_saved > 0)
4100	{
4101	  /* We can't use move_block_from_reg, because it will use
4102	     the wrong mode.  */
4103	  enum machine_mode mode;
4104	  int off, i;
4105
4106	  /* Set OFF to the offset from virtual_incoming_args_rtx of
4107	     the first float register.  The FP save area lies below
4108	     the integer one, and is aligned to UNITS_PER_FPVALUE bytes.  */
4109	  off = -gp_saved * UNITS_PER_WORD;
4110	  off &= ~(UNITS_PER_FPVALUE - 1);
4111	  off -= fp_saved * UNITS_PER_FPREG;
4112
4113	  mode = TARGET_SINGLE_FLOAT ? SFmode : DFmode;
4114
4115	  for (i = local_cum.num_fprs; i < MAX_ARGS_IN_REGISTERS; i += FP_INC)
4116	    {
4117	      rtx ptr, mem;
4118
4119	      ptr = plus_constant (virtual_incoming_args_rtx, off);
4120	      mem = gen_rtx_MEM (mode, ptr);
4121	      set_mem_alias_set (mem, get_varargs_alias_set ());
4122	      emit_move_insn (mem, gen_rtx_REG (mode, FP_ARG_FIRST + i));
4123	      off += UNITS_PER_HWFPVALUE;
4124	    }
4125	}
4126    }
4127  if (REG_PARM_STACK_SPACE (cfun->decl) == 0)
4128    cfun->machine->varargs_size = (gp_saved * UNITS_PER_WORD
4129				   + fp_saved * UNITS_PER_FPREG);
4130}
4131
4132/* Create the va_list data type.
4133   We keep 3 pointers, and two offsets.
4134   Two pointers are to the overflow area, which starts at the CFA.
4135     One of these is constant, for addressing into the GPR save area below it.
4136     The other is advanced up the stack through the overflow region.
4137   The third pointer is to the GPR save area.  Since the FPR save area
4138     is just below it, we can address FPR slots off this pointer.
4139   We also keep two one-byte offsets, which are to be subtracted from the
4140     constant pointers to yield addresses in the GPR and FPR save areas.
4141     These are downcounted as float or non-float arguments are used,
4142     and when they get to zero, the argument must be obtained from the
4143     overflow region.
4144   If !EABI_FLOAT_VARARGS_P, then no FPR save area exists, and a single
4145     pointer is enough.  It's started at the GPR save area, and is
4146     advanced, period.
4147   Note that the GPR save area is not constant size, due to optimization
4148     in the prologue.  Hence, we can't use a design with two pointers
4149     and two offsets, although we could have designed this with two pointers
4150     and three offsets.  */
4151
4152static tree
4153mips_build_builtin_va_list (void)
4154{
4155  if (EABI_FLOAT_VARARGS_P)
4156    {
4157      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff, f_res, record;
4158      tree array, index;
4159
4160      record = (*lang_hooks.types.make_type) (RECORD_TYPE);
4161
4162      f_ovfl = build_decl (FIELD_DECL, get_identifier ("__overflow_argptr"),
4163			  ptr_type_node);
4164      f_gtop = build_decl (FIELD_DECL, get_identifier ("__gpr_top"),
4165			  ptr_type_node);
4166      f_ftop = build_decl (FIELD_DECL, get_identifier ("__fpr_top"),
4167			  ptr_type_node);
4168      f_goff = build_decl (FIELD_DECL, get_identifier ("__gpr_offset"),
4169			  unsigned_char_type_node);
4170      f_foff = build_decl (FIELD_DECL, get_identifier ("__fpr_offset"),
4171			  unsigned_char_type_node);
4172      /* Explicitly pad to the size of a pointer, so that -Wpadded won't
4173	 warn on every user file.  */
4174      index = build_int_cst (NULL_TREE, GET_MODE_SIZE (ptr_mode) - 2 - 1);
4175      array = build_array_type (unsigned_char_type_node,
4176			        build_index_type (index));
4177      f_res = build_decl (FIELD_DECL, get_identifier ("__reserved"), array);
4178
4179      DECL_FIELD_CONTEXT (f_ovfl) = record;
4180      DECL_FIELD_CONTEXT (f_gtop) = record;
4181      DECL_FIELD_CONTEXT (f_ftop) = record;
4182      DECL_FIELD_CONTEXT (f_goff) = record;
4183      DECL_FIELD_CONTEXT (f_foff) = record;
4184      DECL_FIELD_CONTEXT (f_res) = record;
4185
4186      TYPE_FIELDS (record) = f_ovfl;
4187      TREE_CHAIN (f_ovfl) = f_gtop;
4188      TREE_CHAIN (f_gtop) = f_ftop;
4189      TREE_CHAIN (f_ftop) = f_goff;
4190      TREE_CHAIN (f_goff) = f_foff;
4191      TREE_CHAIN (f_foff) = f_res;
4192
4193      layout_type (record);
4194      return record;
4195    }
4196  else if (TARGET_IRIX && TARGET_IRIX6)
4197    /* On IRIX 6, this type is 'char *'.  */
4198    return build_pointer_type (char_type_node);
4199  else
4200    /* Otherwise, we use 'void *'.  */
4201    return ptr_type_node;
4202}
4203
4204/* Implement va_start.  */
4205
4206void
4207mips_va_start (tree valist, rtx nextarg)
4208{
4209  if (EABI_FLOAT_VARARGS_P)
4210    {
4211      const CUMULATIVE_ARGS *cum;
4212      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4213      tree ovfl, gtop, ftop, goff, foff;
4214      tree t;
4215      int gpr_save_area_size;
4216      int fpr_save_area_size;
4217      int fpr_offset;
4218
4219      cum = &current_function_args_info;
4220      gpr_save_area_size
4221	= (MAX_ARGS_IN_REGISTERS - cum->num_gprs) * UNITS_PER_WORD;
4222      fpr_save_area_size
4223	= (MAX_ARGS_IN_REGISTERS - cum->num_fprs) * UNITS_PER_FPREG;
4224
4225      f_ovfl = TYPE_FIELDS (va_list_type_node);
4226      f_gtop = TREE_CHAIN (f_ovfl);
4227      f_ftop = TREE_CHAIN (f_gtop);
4228      f_goff = TREE_CHAIN (f_ftop);
4229      f_foff = TREE_CHAIN (f_goff);
4230
4231      ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4232		     NULL_TREE);
4233      gtop = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4234		     NULL_TREE);
4235      ftop = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4236		     NULL_TREE);
4237      goff = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4238		     NULL_TREE);
4239      foff = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4240		     NULL_TREE);
4241
4242      /* Emit code to initialize OVFL, which points to the next varargs
4243	 stack argument.  CUM->STACK_WORDS gives the number of stack
4244	 words used by named arguments.  */
4245      t = make_tree (TREE_TYPE (ovfl), virtual_incoming_args_rtx);
4246      if (cum->stack_words > 0)
4247	t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), t,
4248		    build_int_cst (NULL_TREE,
4249				   cum->stack_words * UNITS_PER_WORD));
4250      t = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4251      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4252
4253      /* Emit code to initialize GTOP, the top of the GPR save area.  */
4254      t = make_tree (TREE_TYPE (gtop), virtual_incoming_args_rtx);
4255      t = build2 (MODIFY_EXPR, TREE_TYPE (gtop), gtop, t);
4256      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4257
4258      /* Emit code to initialize FTOP, the top of the FPR save area.
4259	 This address is gpr_save_area_bytes below GTOP, rounded
4260	 down to the next fp-aligned boundary.  */
4261      t = make_tree (TREE_TYPE (ftop), virtual_incoming_args_rtx);
4262      fpr_offset = gpr_save_area_size + UNITS_PER_FPVALUE - 1;
4263      fpr_offset &= ~(UNITS_PER_FPVALUE - 1);
4264      if (fpr_offset)
4265	t = build2 (PLUS_EXPR, TREE_TYPE (ftop), t,
4266		    build_int_cst (NULL_TREE, -fpr_offset));
4267      t = build2 (MODIFY_EXPR, TREE_TYPE (ftop), ftop, t);
4268      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4269
4270      /* Emit code to initialize GOFF, the offset from GTOP of the
4271	 next GPR argument.  */
4272      t = build2 (MODIFY_EXPR, TREE_TYPE (goff), goff,
4273		  build_int_cst (NULL_TREE, gpr_save_area_size));
4274      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4275
4276      /* Likewise emit code to initialize FOFF, the offset from FTOP
4277	 of the next FPR argument.  */
4278      t = build2 (MODIFY_EXPR, TREE_TYPE (foff), foff,
4279		  build_int_cst (NULL_TREE, fpr_save_area_size));
4280      expand_expr (t, const0_rtx, VOIDmode, EXPAND_NORMAL);
4281    }
4282  else
4283    {
4284      nextarg = plus_constant (nextarg, -cfun->machine->varargs_size);
4285      std_expand_builtin_va_start (valist, nextarg);
4286    }
4287}
4288
4289/* Implement va_arg.  */
4290
4291static tree
4292mips_gimplify_va_arg_expr (tree valist, tree type, tree *pre_p, tree *post_p)
4293{
4294  HOST_WIDE_INT size, rsize;
4295  tree addr;
4296  bool indirect;
4297
4298  indirect = pass_by_reference (NULL, TYPE_MODE (type), type, 0);
4299
4300  if (indirect)
4301    type = build_pointer_type (type);
4302
4303  size = int_size_in_bytes (type);
4304  rsize = (size + UNITS_PER_WORD - 1) & -UNITS_PER_WORD;
4305
4306  if (mips_abi != ABI_EABI || !EABI_FLOAT_VARARGS_P)
4307    addr = std_gimplify_va_arg_expr (valist, type, pre_p, post_p);
4308  else
4309    {
4310      /* Not a simple merged stack.	 */
4311
4312      tree f_ovfl, f_gtop, f_ftop, f_goff, f_foff;
4313      tree ovfl, top, off, align;
4314      HOST_WIDE_INT osize;
4315      tree t, u;
4316
4317      f_ovfl = TYPE_FIELDS (va_list_type_node);
4318      f_gtop = TREE_CHAIN (f_ovfl);
4319      f_ftop = TREE_CHAIN (f_gtop);
4320      f_goff = TREE_CHAIN (f_ftop);
4321      f_foff = TREE_CHAIN (f_goff);
4322
4323      /* We maintain separate pointers and offsets for floating-point
4324	 and integer arguments, but we need similar code in both cases.
4325	 Let:
4326
4327	 TOP be the top of the register save area;
4328	 OFF be the offset from TOP of the next register;
4329	 ADDR_RTX be the address of the argument;
4330	 RSIZE be the number of bytes used to store the argument
4331	 when it's in the register save area;
4332	 OSIZE be the number of bytes used to store it when it's
4333	 in the stack overflow area; and
4334	 PADDING be (BYTES_BIG_ENDIAN ? OSIZE - RSIZE : 0)
4335
4336	 The code we want is:
4337
4338	 1: off &= -rsize;	  // round down
4339	 2: if (off != 0)
4340	 3:   {
4341	 4:	 addr_rtx = top - off;
4342	 5:	 off -= rsize;
4343	 6:   }
4344	 7: else
4345	 8:   {
4346	 9:	 ovfl += ((intptr_t) ovfl + osize - 1) & -osize;
4347	 10:	 addr_rtx = ovfl + PADDING;
4348	 11:	 ovfl += osize;
4349	 14:   }
4350
4351	 [1] and [9] can sometimes be optimized away.  */
4352
4353      ovfl = build3 (COMPONENT_REF, TREE_TYPE (f_ovfl), valist, f_ovfl,
4354		     NULL_TREE);
4355
4356      if (GET_MODE_CLASS (TYPE_MODE (type)) == MODE_FLOAT
4357	  && GET_MODE_SIZE (TYPE_MODE (type)) <= UNITS_PER_FPVALUE)
4358	{
4359	  top = build3 (COMPONENT_REF, TREE_TYPE (f_ftop), valist, f_ftop,
4360		        NULL_TREE);
4361	  off = build3 (COMPONENT_REF, TREE_TYPE (f_foff), valist, f_foff,
4362		        NULL_TREE);
4363
4364	  /* When floating-point registers are saved to the stack,
4365	     each one will take up UNITS_PER_HWFPVALUE bytes, regardless
4366	     of the float's precision.  */
4367	  rsize = UNITS_PER_HWFPVALUE;
4368
4369	  /* Overflow arguments are padded to UNITS_PER_WORD bytes
4370	     (= PARM_BOUNDARY bits).  This can be different from RSIZE
4371	     in two cases:
4372
4373	     (1) On 32-bit targets when TYPE is a structure such as:
4374
4375	     struct s { float f; };
4376
4377	     Such structures are passed in paired FPRs, so RSIZE
4378	     will be 8 bytes.  However, the structure only takes
4379	     up 4 bytes of memory, so OSIZE will only be 4.
4380
4381	     (2) In combinations such as -mgp64 -msingle-float
4382	     -fshort-double.  Doubles passed in registers
4383	     will then take up 4 (UNITS_PER_HWFPVALUE) bytes,
4384	     but those passed on the stack take up
4385	     UNITS_PER_WORD bytes.  */
4386	  osize = MAX (GET_MODE_SIZE (TYPE_MODE (type)), UNITS_PER_WORD);
4387	}
4388      else
4389	{
4390	  top = build3 (COMPONENT_REF, TREE_TYPE (f_gtop), valist, f_gtop,
4391		        NULL_TREE);
4392	  off = build3 (COMPONENT_REF, TREE_TYPE (f_goff), valist, f_goff,
4393		        NULL_TREE);
4394	  if (rsize > UNITS_PER_WORD)
4395	    {
4396	      /* [1] Emit code for: off &= -rsize.	*/
4397	      t = build2 (BIT_AND_EXPR, TREE_TYPE (off), off,
4398			  build_int_cst (NULL_TREE, -rsize));
4399	      t = build2 (MODIFY_EXPR, TREE_TYPE (off), off, t);
4400	      gimplify_and_add (t, pre_p);
4401	    }
4402	  osize = rsize;
4403	}
4404
4405      /* [2] Emit code to branch if off == 0.  */
4406      t = build2 (NE_EXPR, boolean_type_node, off,
4407		  build_int_cst (TREE_TYPE (off), 0));
4408      addr = build3 (COND_EXPR, ptr_type_node, t, NULL_TREE, NULL_TREE);
4409
4410      /* [5] Emit code for: off -= rsize.  We do this as a form of
4411	 post-increment not available to C.  Also widen for the
4412	 coming pointer arithmetic.  */
4413      t = fold_convert (TREE_TYPE (off), build_int_cst (NULL_TREE, rsize));
4414      t = build2 (POSTDECREMENT_EXPR, TREE_TYPE (off), off, t);
4415      t = fold_convert (sizetype, t);
4416      t = fold_convert (TREE_TYPE (top), t);
4417
4418      /* [4] Emit code for: addr_rtx = top - off.  On big endian machines,
4419	 the argument has RSIZE - SIZE bytes of leading padding.  */
4420      t = build2 (MINUS_EXPR, TREE_TYPE (top), top, t);
4421      if (BYTES_BIG_ENDIAN && rsize > size)
4422	{
4423	  u = fold_convert (TREE_TYPE (t), build_int_cst (NULL_TREE,
4424							  rsize - size));
4425	  t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4426	}
4427      COND_EXPR_THEN (addr) = t;
4428
4429      if (osize > UNITS_PER_WORD)
4430	{
4431	  /* [9] Emit: ovfl += ((intptr_t) ovfl + osize - 1) & -osize.  */
4432	  u = fold_convert (TREE_TYPE (ovfl),
4433			    build_int_cst (NULL_TREE, osize - 1));
4434	  t = build2 (PLUS_EXPR, TREE_TYPE (ovfl), ovfl, u);
4435	  u = fold_convert (TREE_TYPE (ovfl),
4436			    build_int_cst (NULL_TREE, -osize));
4437	  t = build2 (BIT_AND_EXPR, TREE_TYPE (ovfl), t, u);
4438	  align = build2 (MODIFY_EXPR, TREE_TYPE (ovfl), ovfl, t);
4439	}
4440      else
4441	align = NULL;
4442
4443      /* [10, 11].	Emit code to store ovfl in addr_rtx, then
4444	 post-increment ovfl by osize.  On big-endian machines,
4445	 the argument has OSIZE - SIZE bytes of leading padding.  */
4446      u = fold_convert (TREE_TYPE (ovfl),
4447			build_int_cst (NULL_TREE, osize));
4448      t = build2 (POSTINCREMENT_EXPR, TREE_TYPE (ovfl), ovfl, u);
4449      if (BYTES_BIG_ENDIAN && osize > size)
4450	{
4451	  u = fold_convert (TREE_TYPE (t),
4452			    build_int_cst (NULL_TREE, osize - size));
4453	  t = build2 (PLUS_EXPR, TREE_TYPE (t), t, u);
4454	}
4455
4456      /* String [9] and [10,11] together.  */
4457      if (align)
4458	t = build2 (COMPOUND_EXPR, TREE_TYPE (t), align, t);
4459      COND_EXPR_ELSE (addr) = t;
4460
4461      addr = fold_convert (build_pointer_type (type), addr);
4462      addr = build_va_arg_indirect_ref (addr);
4463    }
4464
4465  if (indirect)
4466    addr = build_va_arg_indirect_ref (addr);
4467
4468  return addr;
4469}
4470
4471/* Return true if it is possible to use left/right accesses for a
4472   bitfield of WIDTH bits starting BITPOS bits into *OP.  When
4473   returning true, update *OP, *LEFT and *RIGHT as follows:
4474
4475   *OP is a BLKmode reference to the whole field.
4476
4477   *LEFT is a QImode reference to the first byte if big endian or
4478   the last byte if little endian.  This address can be used in the
4479   left-side instructions (lwl, swl, ldl, sdl).
4480
4481   *RIGHT is a QImode reference to the opposite end of the field and
4482   can be used in the patterning right-side instruction.  */
4483
4484static bool
4485mips_get_unaligned_mem (rtx *op, unsigned int width, int bitpos,
4486			rtx *left, rtx *right)
4487{
4488  rtx first, last;
4489
4490  /* Check that the operand really is a MEM.  Not all the extv and
4491     extzv predicates are checked.  */
4492  if (!MEM_P (*op))
4493    return false;
4494
4495  /* Check that the size is valid.  */
4496  if (width != 32 && (!TARGET_64BIT || width != 64))
4497    return false;
4498
4499  /* We can only access byte-aligned values.  Since we are always passed
4500     a reference to the first byte of the field, it is not necessary to
4501     do anything with BITPOS after this check.  */
4502  if (bitpos % BITS_PER_UNIT != 0)
4503    return false;
4504
4505  /* Reject aligned bitfields: we want to use a normal load or store
4506     instead of a left/right pair.  */
4507  if (MEM_ALIGN (*op) >= width)
4508    return false;
4509
4510  /* Adjust *OP to refer to the whole field.  This also has the effect
4511     of legitimizing *OP's address for BLKmode, possibly simplifying it.  */
4512  *op = adjust_address (*op, BLKmode, 0);
4513  set_mem_size (*op, GEN_INT (width / BITS_PER_UNIT));
4514
4515  /* Get references to both ends of the field.  We deliberately don't
4516     use the original QImode *OP for FIRST since the new BLKmode one
4517     might have a simpler address.  */
4518  first = adjust_address (*op, QImode, 0);
4519  last = adjust_address (*op, QImode, width / BITS_PER_UNIT - 1);
4520
4521  /* Allocate to LEFT and RIGHT according to endianness.  LEFT should
4522     be the upper word and RIGHT the lower word.  */
4523  if (TARGET_BIG_ENDIAN)
4524    *left = first, *right = last;
4525  else
4526    *left = last, *right = first;
4527
4528  return true;
4529}
4530
4531
4532/* Try to emit the equivalent of (set DEST (zero_extract SRC WIDTH BITPOS)).
4533   Return true on success.  We only handle cases where zero_extract is
4534   equivalent to sign_extract.  */
4535
4536bool
4537mips_expand_unaligned_load (rtx dest, rtx src, unsigned int width, int bitpos)
4538{
4539  rtx left, right, temp;
4540
4541  /* If TARGET_64BIT, the destination of a 32-bit load will be a
4542     paradoxical word_mode subreg.  This is the only case in which
4543     we allow the destination to be larger than the source.  */
4544  if (GET_CODE (dest) == SUBREG
4545      && GET_MODE (dest) == DImode
4546      && SUBREG_BYTE (dest) == 0
4547      && GET_MODE (SUBREG_REG (dest)) == SImode)
4548    dest = SUBREG_REG (dest);
4549
4550  /* After the above adjustment, the destination must be the same
4551     width as the source.  */
4552  if (GET_MODE_BITSIZE (GET_MODE (dest)) != width)
4553    return false;
4554
4555  if (!mips_get_unaligned_mem (&src, width, bitpos, &left, &right))
4556    return false;
4557
4558  temp = gen_reg_rtx (GET_MODE (dest));
4559  if (GET_MODE (dest) == DImode)
4560    {
4561      emit_insn (gen_mov_ldl (temp, src, left));
4562      emit_insn (gen_mov_ldr (dest, copy_rtx (src), right, temp));
4563    }
4564  else
4565    {
4566      emit_insn (gen_mov_lwl (temp, src, left));
4567      emit_insn (gen_mov_lwr (dest, copy_rtx (src), right, temp));
4568    }
4569  return true;
4570}
4571
4572
4573/* Try to expand (set (zero_extract DEST WIDTH BITPOS) SRC).  Return
4574   true on success.  */
4575
4576bool
4577mips_expand_unaligned_store (rtx dest, rtx src, unsigned int width, int bitpos)
4578{
4579  rtx left, right;
4580  enum machine_mode mode;
4581
4582  if (!mips_get_unaligned_mem (&dest, width, bitpos, &left, &right))
4583    return false;
4584
4585  mode = mode_for_size (width, MODE_INT, 0);
4586  src = gen_lowpart (mode, src);
4587
4588  if (mode == DImode)
4589    {
4590      emit_insn (gen_mov_sdl (dest, src, left));
4591      emit_insn (gen_mov_sdr (copy_rtx (dest), copy_rtx (src), right));
4592    }
4593  else
4594    {
4595      emit_insn (gen_mov_swl (dest, src, left));
4596      emit_insn (gen_mov_swr (copy_rtx (dest), copy_rtx (src), right));
4597    }
4598  return true;
4599}
4600
4601/* Return true if X is a MEM with the same size as MODE.  */
4602
4603bool
4604mips_mem_fits_mode_p (enum machine_mode mode, rtx x)
4605{
4606  rtx size;
4607
4608  if (!MEM_P (x))
4609    return false;
4610
4611  size = MEM_SIZE (x);
4612  return size && INTVAL (size) == GET_MODE_SIZE (mode);
4613}
4614
4615/* Return true if (zero_extract OP SIZE POSITION) can be used as the
4616   source of an "ext" instruction or the destination of an "ins"
4617   instruction.  OP must be a register operand and the following
4618   conditions must hold:
4619
4620     0 <= POSITION < GET_MODE_BITSIZE (GET_MODE (op))
4621     0 < SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4622     0 < POSITION + SIZE <= GET_MODE_BITSIZE (GET_MODE (op))
4623
4624   Also reject lengths equal to a word as they are better handled
4625   by the move patterns.  */
4626
4627bool
4628mips_use_ins_ext_p (rtx op, rtx size, rtx position)
4629{
4630  HOST_WIDE_INT len, pos;
4631
4632  if (!ISA_HAS_EXT_INS
4633      || !register_operand (op, VOIDmode)
4634      || GET_MODE_BITSIZE (GET_MODE (op)) > BITS_PER_WORD)
4635    return false;
4636
4637  len = INTVAL (size);
4638  pos = INTVAL (position);
4639
4640  if (len <= 0 || len >= GET_MODE_BITSIZE (GET_MODE (op))
4641      || pos < 0 || pos + len > GET_MODE_BITSIZE (GET_MODE (op)))
4642    return false;
4643
4644  return true;
4645}
4646
4647/* Set up globals to generate code for the ISA or processor
4648   described by INFO.  */
4649
4650static void
4651mips_set_architecture (const struct mips_cpu_info *info)
4652{
4653  if (info != 0)
4654    {
4655      mips_arch_info = info;
4656      mips_arch = info->cpu;
4657      mips_isa = info->isa;
4658    }
4659}
4660
4661
4662/* Likewise for tuning.  */
4663
4664static void
4665mips_set_tune (const struct mips_cpu_info *info)
4666{
4667  if (info != 0)
4668    {
4669      mips_tune_info = info;
4670      mips_tune = info->cpu;
4671    }
4672}
4673
4674/* Implement TARGET_HANDLE_OPTION.  */
4675
4676static bool
4677mips_handle_option (size_t code, const char *arg, int value ATTRIBUTE_UNUSED)
4678{
4679  switch (code)
4680    {
4681    case OPT_mabi_:
4682      if (strcmp (arg, "32") == 0)
4683	mips_abi = ABI_32;
4684      else if (strcmp (arg, "o64") == 0)
4685	mips_abi = ABI_O64;
4686      else if (strcmp (arg, "n32") == 0)
4687	mips_abi = ABI_N32;
4688      else if (strcmp (arg, "64") == 0)
4689	mips_abi = ABI_64;
4690      else if (strcmp (arg, "eabi") == 0)
4691	mips_abi = ABI_EABI;
4692      else
4693	return false;
4694      return true;
4695
4696    case OPT_march_:
4697    case OPT_mtune_:
4698      return mips_parse_cpu (arg) != 0;
4699
4700    case OPT_mips:
4701      mips_isa_info = mips_parse_cpu (ACONCAT (("mips", arg, NULL)));
4702      return mips_isa_info != 0;
4703
4704    case OPT_mno_flush_func:
4705      mips_cache_flush_func = NULL;
4706      return true;
4707
4708    default:
4709      return true;
4710    }
4711}
4712
4713/* Set up the threshold for data to go into the small data area, instead
4714   of the normal data area, and detect any conflicts in the switches.  */
4715
4716void
4717override_options (void)
4718{
4719  int i, start, regno;
4720  enum machine_mode mode;
4721
4722  mips_section_threshold = g_switch_set ? g_switch_value : MIPS_DEFAULT_GVALUE;
4723
4724  /* The following code determines the architecture and register size.
4725     Similar code was added to GAS 2.14 (see tc-mips.c:md_after_parse_args()).
4726     The GAS and GCC code should be kept in sync as much as possible.  */
4727
4728  if (mips_arch_string != 0)
4729    mips_set_architecture (mips_parse_cpu (mips_arch_string));
4730
4731  if (mips_isa_info != 0)
4732    {
4733      if (mips_arch_info == 0)
4734	mips_set_architecture (mips_isa_info);
4735      else if (mips_arch_info->isa != mips_isa_info->isa)
4736	error ("-%s conflicts with the other architecture options, "
4737	       "which specify a %s processor",
4738	       mips_isa_info->name,
4739	       mips_cpu_info_from_isa (mips_arch_info->isa)->name);
4740    }
4741
4742  if (mips_arch_info == 0)
4743    {
4744#ifdef MIPS_CPU_STRING_DEFAULT
4745      mips_set_architecture (mips_parse_cpu (MIPS_CPU_STRING_DEFAULT));
4746#else
4747      mips_set_architecture (mips_cpu_info_from_isa (MIPS_ISA_DEFAULT));
4748#endif
4749    }
4750
4751  if (ABI_NEEDS_64BIT_REGS && !ISA_HAS_64BIT_REGS)
4752    error ("-march=%s is not compatible with the selected ABI",
4753	   mips_arch_info->name);
4754
4755  /* Optimize for mips_arch, unless -mtune selects a different processor.  */
4756  if (mips_tune_string != 0)
4757    mips_set_tune (mips_parse_cpu (mips_tune_string));
4758
4759  if (mips_tune_info == 0)
4760    mips_set_tune (mips_arch_info);
4761
4762  /* Set cost structure for the processor.  */
4763  mips_cost = &mips_rtx_cost_data[mips_tune];
4764
4765  if ((target_flags_explicit & MASK_64BIT) != 0)
4766    {
4767      /* The user specified the size of the integer registers.  Make sure
4768	 it agrees with the ABI and ISA.  */
4769      if (TARGET_64BIT && !ISA_HAS_64BIT_REGS)
4770	error ("-mgp64 used with a 32-bit processor");
4771      else if (!TARGET_64BIT && ABI_NEEDS_64BIT_REGS)
4772	error ("-mgp32 used with a 64-bit ABI");
4773      else if (TARGET_64BIT && ABI_NEEDS_32BIT_REGS)
4774	error ("-mgp64 used with a 32-bit ABI");
4775    }
4776  else
4777    {
4778      /* Infer the integer register size from the ABI and processor.
4779	 Restrict ourselves to 32-bit registers if that's all the
4780	 processor has, or if the ABI cannot handle 64-bit registers.  */
4781      if (ABI_NEEDS_32BIT_REGS || !ISA_HAS_64BIT_REGS)
4782	target_flags &= ~MASK_64BIT;
4783      else
4784	target_flags |= MASK_64BIT;
4785    }
4786
4787  if ((target_flags_explicit & MASK_FLOAT64) != 0)
4788    {
4789      /* Really, -mfp32 and -mfp64 are ornamental options.  There's
4790	 only one right answer here.  */
4791      if (TARGET_64BIT && TARGET_DOUBLE_FLOAT && !TARGET_FLOAT64)
4792	error ("unsupported combination: %s", "-mgp64 -mfp32 -mdouble-float");
4793      else if (!TARGET_64BIT && TARGET_FLOAT64)
4794	error ("unsupported combination: %s", "-mgp32 -mfp64");
4795      else if (TARGET_SINGLE_FLOAT && TARGET_FLOAT64)
4796	error ("unsupported combination: %s", "-mfp64 -msingle-float");
4797    }
4798  else
4799    {
4800      /* -msingle-float selects 32-bit float registers.  Otherwise the
4801	 float registers should be the same size as the integer ones.  */
4802      if (TARGET_64BIT && TARGET_DOUBLE_FLOAT)
4803	target_flags |= MASK_FLOAT64;
4804      else
4805	target_flags &= ~MASK_FLOAT64;
4806    }
4807
4808  /* End of code shared with GAS.  */
4809
4810  if ((target_flags_explicit & MASK_LONG64) == 0)
4811    {
4812      if ((mips_abi == ABI_EABI && TARGET_64BIT) || mips_abi == ABI_64)
4813	target_flags |= MASK_LONG64;
4814      else
4815	target_flags &= ~MASK_LONG64;
4816    }
4817
4818  if (MIPS_MARCH_CONTROLS_SOFT_FLOAT
4819      && (target_flags_explicit & MASK_SOFT_FLOAT) == 0)
4820    {
4821      /* For some configurations, it is useful to have -march control
4822	 the default setting of MASK_SOFT_FLOAT.  */
4823      switch ((int) mips_arch)
4824	{
4825	case PROCESSOR_R4100:
4826	case PROCESSOR_R4111:
4827	case PROCESSOR_R4120:
4828	case PROCESSOR_R4130:
4829	  target_flags |= MASK_SOFT_FLOAT;
4830	  break;
4831
4832	default:
4833	  target_flags &= ~MASK_SOFT_FLOAT;
4834	  break;
4835	}
4836    }
4837
4838  if (!TARGET_OLDABI)
4839    flag_pcc_struct_return = 0;
4840
4841  if ((target_flags_explicit & MASK_BRANCHLIKELY) == 0)
4842    {
4843      /* If neither -mbranch-likely nor -mno-branch-likely was given
4844	 on the command line, set MASK_BRANCHLIKELY based on the target
4845	 architecture.
4846
4847	 By default, we enable use of Branch Likely instructions on
4848	 all architectures which support them with the following
4849	 exceptions: when creating MIPS32 or MIPS64 code, and when
4850	 tuning for architectures where their use tends to hurt
4851	 performance.
4852
4853	 The MIPS32 and MIPS64 architecture specifications say "Software
4854	 is strongly encouraged to avoid use of Branch Likely
4855	 instructions, as they will be removed from a future revision
4856	 of the [MIPS32 and MIPS64] architecture."  Therefore, we do not
4857	 issue those instructions unless instructed to do so by
4858	 -mbranch-likely.  */
4859      if (ISA_HAS_BRANCHLIKELY
4860	  && !(ISA_MIPS32 || ISA_MIPS32R2 || ISA_MIPS64 || ISA_MIPS64R2)
4861	  && !(TUNE_MIPS5500 || TUNE_SB1))
4862	target_flags |= MASK_BRANCHLIKELY;
4863      else
4864	target_flags &= ~MASK_BRANCHLIKELY;
4865    }
4866  if (TARGET_BRANCHLIKELY && !ISA_HAS_BRANCHLIKELY)
4867    warning (0, "generation of Branch Likely instructions enabled, but not supported by architecture");
4868
4869  /* The effect of -mabicalls isn't defined for the EABI.  */
4870  if (mips_abi == ABI_EABI && TARGET_ABICALLS)
4871    {
4872      error ("unsupported combination: %s", "-mabicalls -mabi=eabi");
4873      target_flags &= ~MASK_ABICALLS;
4874    }
4875
4876  if (TARGET_ABICALLS)
4877    {
4878      /* We need to set flag_pic for executables as well as DSOs
4879	 because we may reference symbols that are not defined in
4880	 the final executable.  (MIPS does not use things like
4881	 copy relocs, for example.)
4882
4883	 Also, there is a body of code that uses __PIC__ to distinguish
4884	 between -mabicalls and -mno-abicalls code.  */
4885      flag_pic = 1;
4886      if (mips_section_threshold > 0)
4887	warning (0, "%<-G%> is incompatible with %<-mabicalls%>");
4888    }
4889
4890  /* mips_split_addresses is a half-way house between explicit
4891     relocations and the traditional assembler macros.  It can
4892     split absolute 32-bit symbolic constants into a high/lo_sum
4893     pair but uses macros for other sorts of access.
4894
4895     Like explicit relocation support for REL targets, it relies
4896     on GNU extensions in the assembler and the linker.
4897
4898     Although this code should work for -O0, it has traditionally
4899     been treated as an optimization.  */
4900  if (!TARGET_MIPS16 && TARGET_SPLIT_ADDRESSES
4901      && optimize && !flag_pic
4902      && !ABI_HAS_64BIT_SYMBOLS)
4903    mips_split_addresses = 1;
4904  else
4905    mips_split_addresses = 0;
4906
4907  /* -mvr4130-align is a "speed over size" optimization: it usually produces
4908     faster code, but at the expense of more nops.  Enable it at -O3 and
4909     above.  */
4910  if (optimize > 2 && (target_flags_explicit & MASK_VR4130_ALIGN) == 0)
4911    target_flags |= MASK_VR4130_ALIGN;
4912
4913  /* When compiling for the mips16, we cannot use floating point.  We
4914     record the original hard float value in mips16_hard_float.  */
4915  if (TARGET_MIPS16)
4916    {
4917      if (TARGET_SOFT_FLOAT)
4918	mips16_hard_float = 0;
4919      else
4920	mips16_hard_float = 1;
4921      target_flags |= MASK_SOFT_FLOAT;
4922
4923      /* Don't run the scheduler before reload, since it tends to
4924         increase register pressure.  */
4925      flag_schedule_insns = 0;
4926
4927      /* Don't do hot/cold partitioning.  The constant layout code expects
4928	 the whole function to be in a single section.  */
4929      flag_reorder_blocks_and_partition = 0;
4930
4931      /* Silently disable -mexplicit-relocs since it doesn't apply
4932	 to mips16 code.  Even so, it would overly pedantic to warn
4933	 about "-mips16 -mexplicit-relocs", especially given that
4934	 we use a %gprel() operator.  */
4935      target_flags &= ~MASK_EXPLICIT_RELOCS;
4936    }
4937
4938  /* When using explicit relocs, we call dbr_schedule from within
4939     mips_reorg.  */
4940  if (TARGET_EXPLICIT_RELOCS)
4941    {
4942      mips_flag_delayed_branch = flag_delayed_branch;
4943      flag_delayed_branch = 0;
4944    }
4945
4946#ifdef MIPS_TFMODE_FORMAT
4947  REAL_MODE_FORMAT (TFmode) = &MIPS_TFMODE_FORMAT;
4948#endif
4949
4950  /* Make sure that the user didn't turn off paired single support when
4951     MIPS-3D support is requested.  */
4952  if (TARGET_MIPS3D && (target_flags_explicit & MASK_PAIRED_SINGLE_FLOAT)
4953      && !TARGET_PAIRED_SINGLE_FLOAT)
4954    error ("-mips3d requires -mpaired-single");
4955
4956  /* If TARGET_MIPS3D, enable MASK_PAIRED_SINGLE_FLOAT.  */
4957  if (TARGET_MIPS3D)
4958    target_flags |= MASK_PAIRED_SINGLE_FLOAT;
4959
4960  /* Make sure that when TARGET_PAIRED_SINGLE_FLOAT is true, TARGET_FLOAT64
4961     and TARGET_HARD_FLOAT are both true.  */
4962  if (TARGET_PAIRED_SINGLE_FLOAT && !(TARGET_FLOAT64 && TARGET_HARD_FLOAT))
4963    error ("-mips3d/-mpaired-single must be used with -mfp64 -mhard-float");
4964
4965  /* Make sure that the ISA supports TARGET_PAIRED_SINGLE_FLOAT when it is
4966     enabled.  */
4967  if (TARGET_PAIRED_SINGLE_FLOAT && !ISA_MIPS64)
4968    error ("-mips3d/-mpaired-single must be used with -mips64");
4969
4970  if (TARGET_MIPS16 && TARGET_DSP)
4971    error ("-mips16 and -mdsp cannot be used together");
4972
4973  mips_print_operand_punct['?'] = 1;
4974  mips_print_operand_punct['#'] = 1;
4975  mips_print_operand_punct['/'] = 1;
4976  mips_print_operand_punct['&'] = 1;
4977  mips_print_operand_punct['!'] = 1;
4978  mips_print_operand_punct['*'] = 1;
4979  mips_print_operand_punct['@'] = 1;
4980  mips_print_operand_punct['.'] = 1;
4981  mips_print_operand_punct['('] = 1;
4982  mips_print_operand_punct[')'] = 1;
4983  mips_print_operand_punct['['] = 1;
4984  mips_print_operand_punct[']'] = 1;
4985  mips_print_operand_punct['<'] = 1;
4986  mips_print_operand_punct['>'] = 1;
4987  mips_print_operand_punct['{'] = 1;
4988  mips_print_operand_punct['}'] = 1;
4989  mips_print_operand_punct['^'] = 1;
4990  mips_print_operand_punct['$'] = 1;
4991  mips_print_operand_punct['+'] = 1;
4992  mips_print_operand_punct['~'] = 1;
4993
4994  /* Set up array to map GCC register number to debug register number.
4995     Ignore the special purpose register numbers.  */
4996
4997  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
4998    mips_dbx_regno[i] = -1;
4999
5000  start = GP_DBX_FIRST - GP_REG_FIRST;
5001  for (i = GP_REG_FIRST; i <= GP_REG_LAST; i++)
5002    mips_dbx_regno[i] = i + start;
5003
5004  start = FP_DBX_FIRST - FP_REG_FIRST;
5005  for (i = FP_REG_FIRST; i <= FP_REG_LAST; i++)
5006    mips_dbx_regno[i] = i + start;
5007
5008  mips_dbx_regno[HI_REGNUM] = MD_DBX_FIRST + 0;
5009  mips_dbx_regno[LO_REGNUM] = MD_DBX_FIRST + 1;
5010
5011  /* Set up array giving whether a given register can hold a given mode.  */
5012
5013  for (mode = VOIDmode;
5014       mode != MAX_MACHINE_MODE;
5015       mode = (enum machine_mode) ((int)mode + 1))
5016    {
5017      register int size		     = GET_MODE_SIZE (mode);
5018      register enum mode_class class = GET_MODE_CLASS (mode);
5019
5020      for (regno = 0; regno < FIRST_PSEUDO_REGISTER; regno++)
5021	{
5022	  register int temp;
5023
5024	  if (mode == CCV2mode)
5025	    temp = (ISA_HAS_8CC
5026		    && ST_REG_P (regno)
5027		    && (regno - ST_REG_FIRST) % 2 == 0);
5028
5029	  else if (mode == CCV4mode)
5030	    temp = (ISA_HAS_8CC
5031		    && ST_REG_P (regno)
5032		    && (regno - ST_REG_FIRST) % 4 == 0);
5033
5034	  else if (mode == CCmode)
5035	    {
5036	      if (! ISA_HAS_8CC)
5037		temp = (regno == FPSW_REGNUM);
5038	      else
5039		temp = (ST_REG_P (regno) || GP_REG_P (regno)
5040			|| FP_REG_P (regno));
5041	    }
5042
5043	  else if (GP_REG_P (regno))
5044	    temp = ((regno & 1) == 0 || size <= UNITS_PER_WORD);
5045
5046	  else if (FP_REG_P (regno))
5047	    temp = ((regno % FP_INC) == 0)
5048		    && (((class == MODE_FLOAT || class == MODE_COMPLEX_FLOAT
5049			  || class == MODE_VECTOR_FLOAT)
5050			 && size <= UNITS_PER_FPVALUE)
5051			/* Allow integer modes that fit into a single
5052			   register.  We need to put integers into FPRs
5053			   when using instructions like cvt and trunc.
5054			   We can't allow sizes smaller than a word,
5055			   the FPU has no appropriate load/store
5056			   instructions for those.  */
5057			|| (class == MODE_INT
5058			    && size >= MIN_UNITS_PER_WORD
5059			    && size <= UNITS_PER_FPREG)
5060			/* Allow TFmode for CCmode reloads.  */
5061			|| (ISA_HAS_8CC && mode == TFmode));
5062
5063          else if (ACC_REG_P (regno))
5064	    temp = (INTEGRAL_MODE_P (mode)
5065		    && (size <= UNITS_PER_WORD
5066			|| (ACC_HI_REG_P (regno)
5067			    && size == 2 * UNITS_PER_WORD)));
5068
5069	  else if (ALL_COP_REG_P (regno))
5070	    temp = (class == MODE_INT && size <= UNITS_PER_WORD);
5071	  else
5072	    temp = 0;
5073
5074	  mips_hard_regno_mode_ok[(int)mode][regno] = temp;
5075	}
5076    }
5077
5078  /* Save GPR registers in word_mode sized hunks.  word_mode hasn't been
5079     initialized yet, so we can't use that here.  */
5080  gpr_mode = TARGET_64BIT ? DImode : SImode;
5081
5082  /* Provide default values for align_* for 64-bit targets.  */
5083  if (TARGET_64BIT && !TARGET_MIPS16)
5084    {
5085      if (align_loops == 0)
5086	align_loops = 8;
5087      if (align_jumps == 0)
5088	align_jumps = 8;
5089      if (align_functions == 0)
5090	align_functions = 8;
5091    }
5092
5093  /* Function to allocate machine-dependent function status.  */
5094  init_machine_status = &mips_init_machine_status;
5095
5096  if (ABI_HAS_64BIT_SYMBOLS)
5097    {
5098      if (TARGET_EXPLICIT_RELOCS)
5099	{
5100	  mips_split_p[SYMBOL_64_HIGH] = true;
5101	  mips_hi_relocs[SYMBOL_64_HIGH] = "%highest(";
5102	  mips_lo_relocs[SYMBOL_64_HIGH] = "%higher(";
5103
5104	  mips_split_p[SYMBOL_64_MID] = true;
5105	  mips_hi_relocs[SYMBOL_64_MID] = "%higher(";
5106	  mips_lo_relocs[SYMBOL_64_MID] = "%hi(";
5107
5108	  mips_split_p[SYMBOL_64_LOW] = true;
5109	  mips_hi_relocs[SYMBOL_64_LOW] = "%hi(";
5110	  mips_lo_relocs[SYMBOL_64_LOW] = "%lo(";
5111
5112	  mips_split_p[SYMBOL_GENERAL] = true;
5113	  mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5114	}
5115    }
5116  else
5117    {
5118      if (TARGET_EXPLICIT_RELOCS || mips_split_addresses)
5119	{
5120	  mips_split_p[SYMBOL_GENERAL] = true;
5121	  mips_hi_relocs[SYMBOL_GENERAL] = "%hi(";
5122	  mips_lo_relocs[SYMBOL_GENERAL] = "%lo(";
5123	}
5124    }
5125
5126  if (TARGET_MIPS16)
5127    {
5128      /* The high part is provided by a pseudo copy of $gp.  */
5129      mips_split_p[SYMBOL_SMALL_DATA] = true;
5130      mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gprel(";
5131    }
5132
5133  if (TARGET_EXPLICIT_RELOCS)
5134    {
5135      /* Small data constants are kept whole until after reload,
5136	 then lowered by mips_rewrite_small_data.  */
5137      mips_lo_relocs[SYMBOL_SMALL_DATA] = "%gp_rel(";
5138
5139      mips_split_p[SYMBOL_GOT_LOCAL] = true;
5140      if (TARGET_NEWABI)
5141	{
5142	  mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got_page(";
5143	  mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%got_ofst(";
5144	}
5145      else
5146	{
5147	  mips_lo_relocs[SYMBOL_GOTOFF_PAGE] = "%got(";
5148	  mips_lo_relocs[SYMBOL_GOT_LOCAL] = "%lo(";
5149	}
5150
5151      if (TARGET_XGOT)
5152	{
5153	  /* The HIGH and LO_SUM are matched by special .md patterns.  */
5154	  mips_split_p[SYMBOL_GOT_GLOBAL] = true;
5155
5156	  mips_split_p[SYMBOL_GOTOFF_GLOBAL] = true;
5157	  mips_hi_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_hi(";
5158	  mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_lo(";
5159
5160	  mips_split_p[SYMBOL_GOTOFF_CALL] = true;
5161	  mips_hi_relocs[SYMBOL_GOTOFF_CALL] = "%call_hi(";
5162	  mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call_lo(";
5163	}
5164      else
5165	{
5166	  if (TARGET_NEWABI)
5167	    mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got_disp(";
5168	  else
5169	    mips_lo_relocs[SYMBOL_GOTOFF_GLOBAL] = "%got(";
5170	  mips_lo_relocs[SYMBOL_GOTOFF_CALL] = "%call16(";
5171	}
5172    }
5173
5174  if (TARGET_NEWABI)
5175    {
5176      mips_split_p[SYMBOL_GOTOFF_LOADGP] = true;
5177      mips_hi_relocs[SYMBOL_GOTOFF_LOADGP] = "%hi(%neg(%gp_rel(";
5178      mips_lo_relocs[SYMBOL_GOTOFF_LOADGP] = "%lo(%neg(%gp_rel(";
5179    }
5180
5181  /* Thread-local relocation operators.  */
5182  mips_lo_relocs[SYMBOL_TLSGD] = "%tlsgd(";
5183  mips_lo_relocs[SYMBOL_TLSLDM] = "%tlsldm(";
5184  mips_split_p[SYMBOL_DTPREL] = 1;
5185  mips_hi_relocs[SYMBOL_DTPREL] = "%dtprel_hi(";
5186  mips_lo_relocs[SYMBOL_DTPREL] = "%dtprel_lo(";
5187  mips_lo_relocs[SYMBOL_GOTTPREL] = "%gottprel(";
5188  mips_split_p[SYMBOL_TPREL] = 1;
5189  mips_hi_relocs[SYMBOL_TPREL] = "%tprel_hi(";
5190  mips_lo_relocs[SYMBOL_TPREL] = "%tprel_lo(";
5191
5192  /* We don't have a thread pointer access instruction on MIPS16, or
5193     appropriate TLS relocations.  */
5194  if (TARGET_MIPS16)
5195    targetm.have_tls = false;
5196
5197  /* Default to working around R4000 errata only if the processor
5198     was selected explicitly.  */
5199  if ((target_flags_explicit & MASK_FIX_R4000) == 0
5200      && mips_matching_cpu_name_p (mips_arch_info->name, "r4000"))
5201    target_flags |= MASK_FIX_R4000;
5202
5203  /* Default to working around R4400 errata only if the processor
5204     was selected explicitly.  */
5205  if ((target_flags_explicit & MASK_FIX_R4400) == 0
5206      && mips_matching_cpu_name_p (mips_arch_info->name, "r4400"))
5207    target_flags |= MASK_FIX_R4400;
5208}
5209
5210/* Implement CONDITIONAL_REGISTER_USAGE.  */
5211
5212void
5213mips_conditional_register_usage (void)
5214{
5215  if (!TARGET_DSP)
5216    {
5217      int regno;
5218
5219      for (regno = DSP_ACC_REG_FIRST; regno <= DSP_ACC_REG_LAST; regno++)
5220	fixed_regs[regno] = call_used_regs[regno] = 1;
5221    }
5222  if (!TARGET_HARD_FLOAT)
5223    {
5224      int regno;
5225
5226      for (regno = FP_REG_FIRST; regno <= FP_REG_LAST; regno++)
5227	fixed_regs[regno] = call_used_regs[regno] = 1;
5228      for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5229	fixed_regs[regno] = call_used_regs[regno] = 1;
5230    }
5231  else if (! ISA_HAS_8CC)
5232    {
5233      int regno;
5234
5235      /* We only have a single condition code register.  We
5236	 implement this by hiding all the condition code registers,
5237	 and generating RTL that refers directly to ST_REG_FIRST.  */
5238      for (regno = ST_REG_FIRST; regno <= ST_REG_LAST; regno++)
5239	fixed_regs[regno] = call_used_regs[regno] = 1;
5240    }
5241  /* In mips16 mode, we permit the $t temporary registers to be used
5242     for reload.  We prohibit the unused $s registers, since they
5243     are caller saved, and saving them via a mips16 register would
5244     probably waste more time than just reloading the value.  */
5245  if (TARGET_MIPS16)
5246    {
5247      fixed_regs[18] = call_used_regs[18] = 1;
5248      fixed_regs[19] = call_used_regs[19] = 1;
5249      fixed_regs[20] = call_used_regs[20] = 1;
5250      fixed_regs[21] = call_used_regs[21] = 1;
5251      fixed_regs[22] = call_used_regs[22] = 1;
5252      fixed_regs[23] = call_used_regs[23] = 1;
5253      fixed_regs[26] = call_used_regs[26] = 1;
5254      fixed_regs[27] = call_used_regs[27] = 1;
5255      fixed_regs[30] = call_used_regs[30] = 1;
5256    }
5257  /* fp20-23 are now caller saved.  */
5258  if (mips_abi == ABI_64)
5259    {
5260      int regno;
5261      for (regno = FP_REG_FIRST + 20; regno < FP_REG_FIRST + 24; regno++)
5262	call_really_used_regs[regno] = call_used_regs[regno] = 1;
5263    }
5264  /* Odd registers from fp21 to fp31 are now caller saved.  */
5265  if (mips_abi == ABI_N32)
5266    {
5267      int regno;
5268      for (regno = FP_REG_FIRST + 21; regno <= FP_REG_FIRST + 31; regno+=2)
5269	call_really_used_regs[regno] = call_used_regs[regno] = 1;
5270    }
5271}
5272
5273/* Allocate a chunk of memory for per-function machine-dependent data.  */
5274static struct machine_function *
5275mips_init_machine_status (void)
5276{
5277  return ((struct machine_function *)
5278	  ggc_alloc_cleared (sizeof (struct machine_function)));
5279}
5280
5281/* On the mips16, we want to allocate $24 (T_REG) before other
5282   registers for instructions for which it is possible.  This helps
5283   avoid shuffling registers around in order to set up for an xor,
5284   encouraging the compiler to use a cmp instead.  */
5285
5286void
5287mips_order_regs_for_local_alloc (void)
5288{
5289  register int i;
5290
5291  for (i = 0; i < FIRST_PSEUDO_REGISTER; i++)
5292    reg_alloc_order[i] = i;
5293
5294  if (TARGET_MIPS16)
5295    {
5296      /* It really doesn't matter where we put register 0, since it is
5297         a fixed register anyhow.  */
5298      reg_alloc_order[0] = 24;
5299      reg_alloc_order[24] = 0;
5300    }
5301}
5302
5303
5304/* The MIPS debug format wants all automatic variables and arguments
5305   to be in terms of the virtual frame pointer (stack pointer before
5306   any adjustment in the function), while the MIPS 3.0 linker wants
5307   the frame pointer to be the stack pointer after the initial
5308   adjustment.  So, we do the adjustment here.  The arg pointer (which
5309   is eliminated) points to the virtual frame pointer, while the frame
5310   pointer (which may be eliminated) points to the stack pointer after
5311   the initial adjustments.  */
5312
5313HOST_WIDE_INT
5314mips_debugger_offset (rtx addr, HOST_WIDE_INT offset)
5315{
5316  rtx offset2 = const0_rtx;
5317  rtx reg = eliminate_constant_term (addr, &offset2);
5318
5319  if (offset == 0)
5320    offset = INTVAL (offset2);
5321
5322  if (reg == stack_pointer_rtx || reg == frame_pointer_rtx
5323      || reg == hard_frame_pointer_rtx)
5324    {
5325      HOST_WIDE_INT frame_size = (!cfun->machine->frame.initialized)
5326				  ? compute_frame_size (get_frame_size ())
5327				  : cfun->machine->frame.total_size;
5328
5329      /* MIPS16 frame is smaller */
5330      if (frame_pointer_needed && TARGET_MIPS16)
5331	frame_size -= cfun->machine->frame.args_size;
5332
5333      offset = offset - frame_size;
5334    }
5335
5336  /* sdbout_parms does not want this to crash for unrecognized cases.  */
5337#if 0
5338  else if (reg != arg_pointer_rtx)
5339    fatal_insn ("mips_debugger_offset called with non stack/frame/arg pointer",
5340		addr);
5341#endif
5342
5343  return offset;
5344}
5345
5346/* Implement the PRINT_OPERAND macro.  The MIPS-specific operand codes are:
5347
5348   'X'  OP is CONST_INT, prints 32 bits in hexadecimal format = "0x%08x",
5349   'x'  OP is CONST_INT, prints 16 bits in hexadecimal format = "0x%04x",
5350   'h'  OP is HIGH, prints %hi(X),
5351   'd'  output integer constant in decimal,
5352   'z'	if the operand is 0, use $0 instead of normal operand.
5353   'D'  print second part of double-word register or memory operand.
5354   'L'  print low-order register of double-word register operand.
5355   'M'  print high-order register of double-word register operand.
5356   'C'  print part of opcode for a branch condition.
5357   'F'  print part of opcode for a floating-point branch condition.
5358   'N'  print part of opcode for a branch condition, inverted.
5359   'W'  print part of opcode for a floating-point branch condition, inverted.
5360   'T'  print 'f' for (eq:CC ...), 't' for (ne:CC ...),
5361	      'z' for (eq:?I ...), 'n' for (ne:?I ...).
5362   't'  like 'T', but with the EQ/NE cases reversed
5363   'Y'  for a CONST_INT X, print mips_fp_conditions[X]
5364   'Z'  print the operand and a comma for ISA_HAS_8CC, otherwise print nothing
5365   'R'  print the reloc associated with LO_SUM
5366   'q'  print DSP accumulator registers
5367
5368   The punctuation characters are:
5369
5370   '('	Turn on .set noreorder
5371   ')'	Turn on .set reorder
5372   '['	Turn on .set noat
5373   ']'	Turn on .set at
5374   '<'	Turn on .set nomacro
5375   '>'	Turn on .set macro
5376   '{'	Turn on .set volatile (not GAS)
5377   '}'	Turn on .set novolatile (not GAS)
5378   '&'	Turn on .set noreorder if filling delay slots
5379   '*'	Turn on both .set noreorder and .set nomacro if filling delay slots
5380   '!'	Turn on .set nomacro if filling delay slots
5381   '#'	Print nop if in a .set noreorder section.
5382   '/'	Like '#', but does nothing within a delayed branch sequence
5383   '?'	Print 'l' if we are to use a branch likely instead of normal branch.
5384   '@'	Print the name of the assembler temporary register (at or $1).
5385   '.'	Print the name of the register with a hard-wired zero (zero or $0).
5386   '^'	Print the name of the pic call-through register (t9 or $25).
5387   '$'	Print the name of the stack pointer register (sp or $29).
5388   '+'	Print the name of the gp register (usually gp or $28).
5389   '~'	Output a branch alignment to LABEL_ALIGN(NULL).  */
5390
5391void
5392print_operand (FILE *file, rtx op, int letter)
5393{
5394  register enum rtx_code code;
5395
5396  if (PRINT_OPERAND_PUNCT_VALID_P (letter))
5397    {
5398      switch (letter)
5399	{
5400	case '?':
5401	  if (mips_branch_likely)
5402	    putc ('l', file);
5403	  break;
5404
5405	case '@':
5406	  fputs (reg_names [GP_REG_FIRST + 1], file);
5407	  break;
5408
5409	case '^':
5410	  fputs (reg_names [PIC_FUNCTION_ADDR_REGNUM], file);
5411	  break;
5412
5413	case '.':
5414	  fputs (reg_names [GP_REG_FIRST + 0], file);
5415	  break;
5416
5417	case '$':
5418	  fputs (reg_names[STACK_POINTER_REGNUM], file);
5419	  break;
5420
5421	case '+':
5422	  fputs (reg_names[PIC_OFFSET_TABLE_REGNUM], file);
5423	  break;
5424
5425	case '&':
5426	  if (final_sequence != 0 && set_noreorder++ == 0)
5427	    fputs (".set\tnoreorder\n\t", file);
5428	  break;
5429
5430	case '*':
5431	  if (final_sequence != 0)
5432	    {
5433	      if (set_noreorder++ == 0)
5434		fputs (".set\tnoreorder\n\t", file);
5435
5436	      if (set_nomacro++ == 0)
5437		fputs (".set\tnomacro\n\t", file);
5438	    }
5439	  break;
5440
5441	case '!':
5442	  if (final_sequence != 0 && set_nomacro++ == 0)
5443	    fputs ("\n\t.set\tnomacro", file);
5444	  break;
5445
5446	case '#':
5447	  if (set_noreorder != 0)
5448	    fputs ("\n\tnop", file);
5449	  break;
5450
5451	case '/':
5452	  /* Print an extra newline so that the delayed insn is separated
5453	     from the following ones.  This looks neater and is consistent
5454	     with non-nop delayed sequences.  */
5455	  if (set_noreorder != 0 && final_sequence == 0)
5456	    fputs ("\n\tnop\n", file);
5457	  break;
5458
5459	case '(':
5460	  if (set_noreorder++ == 0)
5461	    fputs (".set\tnoreorder\n\t", file);
5462	  break;
5463
5464	case ')':
5465	  if (set_noreorder == 0)
5466	    error ("internal error: %%) found without a %%( in assembler pattern");
5467
5468	  else if (--set_noreorder == 0)
5469	    fputs ("\n\t.set\treorder", file);
5470
5471	  break;
5472
5473	case '[':
5474	  if (set_noat++ == 0)
5475	    fputs (".set\tnoat\n\t", file);
5476	  break;
5477
5478	case ']':
5479	  if (set_noat == 0)
5480	    error ("internal error: %%] found without a %%[ in assembler pattern");
5481	  else if (--set_noat == 0)
5482	    fputs ("\n\t.set\tat", file);
5483
5484	  break;
5485
5486	case '<':
5487	  if (set_nomacro++ == 0)
5488	    fputs (".set\tnomacro\n\t", file);
5489	  break;
5490
5491	case '>':
5492	  if (set_nomacro == 0)
5493	    error ("internal error: %%> found without a %%< in assembler pattern");
5494	  else if (--set_nomacro == 0)
5495	    fputs ("\n\t.set\tmacro", file);
5496
5497	  break;
5498
5499	case '{':
5500	  if (set_volatile++ == 0)
5501	    fputs ("#.set\tvolatile\n\t", file);
5502	  break;
5503
5504	case '}':
5505	  if (set_volatile == 0)
5506	    error ("internal error: %%} found without a %%{ in assembler pattern");
5507	  else if (--set_volatile == 0)
5508	    fputs ("\n\t#.set\tnovolatile", file);
5509
5510	  break;
5511
5512	case '~':
5513	  {
5514	    if (align_labels_log > 0)
5515	      ASM_OUTPUT_ALIGN (file, align_labels_log);
5516	  }
5517	  break;
5518
5519	default:
5520	  error ("PRINT_OPERAND: unknown punctuation '%c'", letter);
5521	  break;
5522	}
5523
5524      return;
5525    }
5526
5527  if (! op)
5528    {
5529      error ("PRINT_OPERAND null pointer");
5530      return;
5531    }
5532
5533  code = GET_CODE (op);
5534
5535  if (letter == 'C')
5536    switch (code)
5537      {
5538      case EQ:	fputs ("eq",  file); break;
5539      case NE:	fputs ("ne",  file); break;
5540      case GT:	fputs ("gt",  file); break;
5541      case GE:	fputs ("ge",  file); break;
5542      case LT:	fputs ("lt",  file); break;
5543      case LE:	fputs ("le",  file); break;
5544      case GTU: fputs ("gtu", file); break;
5545      case GEU: fputs ("geu", file); break;
5546      case LTU: fputs ("ltu", file); break;
5547      case LEU: fputs ("leu", file); break;
5548      default:
5549	fatal_insn ("PRINT_OPERAND, invalid insn for %%C", op);
5550      }
5551
5552  else if (letter == 'N')
5553    switch (code)
5554      {
5555      case EQ:	fputs ("ne",  file); break;
5556      case NE:	fputs ("eq",  file); break;
5557      case GT:	fputs ("le",  file); break;
5558      case GE:	fputs ("lt",  file); break;
5559      case LT:	fputs ("ge",  file); break;
5560      case LE:	fputs ("gt",  file); break;
5561      case GTU: fputs ("leu", file); break;
5562      case GEU: fputs ("ltu", file); break;
5563      case LTU: fputs ("geu", file); break;
5564      case LEU: fputs ("gtu", file); break;
5565      default:
5566	fatal_insn ("PRINT_OPERAND, invalid insn for %%N", op);
5567      }
5568
5569  else if (letter == 'F')
5570    switch (code)
5571      {
5572      case EQ: fputs ("c1f", file); break;
5573      case NE: fputs ("c1t", file); break;
5574      default:
5575	fatal_insn ("PRINT_OPERAND, invalid insn for %%F", op);
5576      }
5577
5578  else if (letter == 'W')
5579    switch (code)
5580      {
5581      case EQ: fputs ("c1t", file); break;
5582      case NE: fputs ("c1f", file); break;
5583      default:
5584	fatal_insn ("PRINT_OPERAND, invalid insn for %%W", op);
5585      }
5586
5587  else if (letter == 'h')
5588    {
5589      if (GET_CODE (op) == HIGH)
5590	op = XEXP (op, 0);
5591
5592      print_operand_reloc (file, op, mips_hi_relocs);
5593    }
5594
5595  else if (letter == 'R')
5596    print_operand_reloc (file, op, mips_lo_relocs);
5597
5598  else if (letter == 'Y')
5599    {
5600      if (GET_CODE (op) == CONST_INT
5601	  && ((unsigned HOST_WIDE_INT) INTVAL (op)
5602	      < ARRAY_SIZE (mips_fp_conditions)))
5603	fputs (mips_fp_conditions[INTVAL (op)], file);
5604      else
5605	output_operand_lossage ("invalid %%Y value");
5606    }
5607
5608  else if (letter == 'Z')
5609    {
5610      if (ISA_HAS_8CC)
5611	{
5612	  print_operand (file, op, 0);
5613	  fputc (',', file);
5614	}
5615    }
5616
5617  else if (letter == 'q')
5618    {
5619      int regnum;
5620
5621      if (code != REG)
5622	fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5623
5624      regnum = REGNO (op);
5625      if (MD_REG_P (regnum))
5626	fprintf (file, "$ac0");
5627      else if (DSP_ACC_REG_P (regnum))
5628	fprintf (file, "$ac%c", reg_names[regnum][3]);
5629      else
5630	fatal_insn ("PRINT_OPERAND, invalid insn for %%q", op);
5631    }
5632
5633  else if (code == REG || code == SUBREG)
5634    {
5635      register int regnum;
5636
5637      if (code == REG)
5638	regnum = REGNO (op);
5639      else
5640	regnum = true_regnum (op);
5641
5642      if ((letter == 'M' && ! WORDS_BIG_ENDIAN)
5643	  || (letter == 'L' && WORDS_BIG_ENDIAN)
5644	  || letter == 'D')
5645	regnum++;
5646
5647      fprintf (file, "%s", reg_names[regnum]);
5648    }
5649
5650  else if (code == MEM)
5651    {
5652      if (letter == 'D')
5653	output_address (plus_constant (XEXP (op, 0), 4));
5654      else
5655	output_address (XEXP (op, 0));
5656    }
5657
5658  else if (letter == 'x' && GET_CODE (op) == CONST_INT)
5659    fprintf (file, HOST_WIDE_INT_PRINT_HEX, 0xffff & INTVAL(op));
5660
5661  else if (letter == 'X' && GET_CODE(op) == CONST_INT)
5662    fprintf (file, HOST_WIDE_INT_PRINT_HEX, INTVAL (op));
5663
5664  else if (letter == 'd' && GET_CODE(op) == CONST_INT)
5665    fprintf (file, HOST_WIDE_INT_PRINT_DEC, (INTVAL(op)));
5666
5667  else if (letter == 'z' && op == CONST0_RTX (GET_MODE (op)))
5668    fputs (reg_names[GP_REG_FIRST], file);
5669
5670  else if (letter == 'd' || letter == 'x' || letter == 'X')
5671    output_operand_lossage ("invalid use of %%d, %%x, or %%X");
5672
5673  else if (letter == 'T' || letter == 't')
5674    {
5675      int truth = (code == NE) == (letter == 'T');
5676      fputc ("zfnt"[truth * 2 + (GET_MODE (op) == CCmode)], file);
5677    }
5678
5679  else if (CONST_GP_P (op))
5680    fputs (reg_names[GLOBAL_POINTER_REGNUM], file);
5681
5682  else
5683    output_addr_const (file, op);
5684}
5685
5686
5687/* Print symbolic operand OP, which is part of a HIGH or LO_SUM.
5688   RELOCS is the array of relocations to use.  */
5689
5690static void
5691print_operand_reloc (FILE *file, rtx op, const char **relocs)
5692{
5693  enum mips_symbol_type symbol_type;
5694  const char *p;
5695  rtx base;
5696  HOST_WIDE_INT offset;
5697
5698  symbol_type = mips_classify_symbolic_expression (op);
5699  if (relocs[symbol_type] == 0)
5700    fatal_insn ("PRINT_OPERAND, invalid operand for relocation", op);
5701
5702  /* If OP uses an UNSPEC address, we want to print the inner symbol.  */
5703  mips_split_const (op, &base, &offset);
5704  if (UNSPEC_ADDRESS_P (base))
5705    op = plus_constant (UNSPEC_ADDRESS (base), offset);
5706
5707  fputs (relocs[symbol_type], file);
5708  output_addr_const (file, op);
5709  for (p = relocs[symbol_type]; *p != 0; p++)
5710    if (*p == '(')
5711      fputc (')', file);
5712}
5713
5714/* Output address operand X to FILE.  */
5715
5716void
5717print_operand_address (FILE *file, rtx x)
5718{
5719  struct mips_address_info addr;
5720
5721  if (mips_classify_address (&addr, x, word_mode, true))
5722    switch (addr.type)
5723      {
5724      case ADDRESS_REG:
5725	print_operand (file, addr.offset, 0);
5726	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5727	return;
5728
5729      case ADDRESS_LO_SUM:
5730	print_operand (file, addr.offset, 'R');
5731	fprintf (file, "(%s)", reg_names[REGNO (addr.reg)]);
5732	return;
5733
5734      case ADDRESS_CONST_INT:
5735	output_addr_const (file, x);
5736	fprintf (file, "(%s)", reg_names[0]);
5737	return;
5738
5739      case ADDRESS_SYMBOLIC:
5740	output_addr_const (file, x);
5741	return;
5742      }
5743  gcc_unreachable ();
5744}
5745
5746/* When using assembler macros, keep track of all of small-data externs
5747   so that mips_file_end can emit the appropriate declarations for them.
5748
5749   In most cases it would be safe (though pointless) to emit .externs
5750   for other symbols too.  One exception is when an object is within
5751   the -G limit but declared by the user to be in a section other
5752   than .sbss or .sdata.  */
5753
5754int
5755mips_output_external (FILE *file ATTRIBUTE_UNUSED, tree decl, const char *name)
5756{
5757  register struct extern_list *p;
5758
5759  if (!TARGET_EXPLICIT_RELOCS && mips_in_small_data_p (decl))
5760    {
5761      p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5762      p->next = extern_head;
5763      p->name = name;
5764      p->size = int_size_in_bytes (TREE_TYPE (decl));
5765      extern_head = p;
5766    }
5767
5768  if (TARGET_IRIX && mips_abi == ABI_32 && TREE_CODE (decl) == FUNCTION_DECL)
5769    {
5770      p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5771      p->next = extern_head;
5772      p->name = name;
5773      p->size = -1;
5774      extern_head = p;
5775    }
5776
5777  return 0;
5778}
5779
5780#if TARGET_IRIX
5781static void
5782irix_output_external_libcall (rtx fun)
5783{
5784  register struct extern_list *p;
5785
5786  if (mips_abi == ABI_32)
5787    {
5788      p = (struct extern_list *) ggc_alloc (sizeof (struct extern_list));
5789      p->next = extern_head;
5790      p->name = XSTR (fun, 0);
5791      p->size = -1;
5792      extern_head = p;
5793    }
5794}
5795#endif
5796
5797/* Emit a new filename to a stream.  If we are smuggling stabs, try to
5798   put out a MIPS ECOFF file and a stab.  */
5799
5800void
5801mips_output_filename (FILE *stream, const char *name)
5802{
5803
5804  /* If we are emitting DWARF-2, let dwarf2out handle the ".file"
5805     directives.  */
5806  if (write_symbols == DWARF2_DEBUG)
5807    return;
5808  else if (mips_output_filename_first_time)
5809    {
5810      mips_output_filename_first_time = 0;
5811      num_source_filenames += 1;
5812      current_function_file = name;
5813      fprintf (stream, "\t.file\t%d ", num_source_filenames);
5814      output_quoted_string (stream, name);
5815      putc ('\n', stream);
5816    }
5817
5818  /* If we are emitting stabs, let dbxout.c handle this (except for
5819     the mips_output_filename_first_time case).  */
5820  else if (write_symbols == DBX_DEBUG)
5821    return;
5822
5823  else if (name != current_function_file
5824	   && strcmp (name, current_function_file) != 0)
5825    {
5826      num_source_filenames += 1;
5827      current_function_file = name;
5828      fprintf (stream, "\t.file\t%d ", num_source_filenames);
5829      output_quoted_string (stream, name);
5830      putc ('\n', stream);
5831    }
5832}
5833
5834/* Output an ASCII string, in a space-saving way.  PREFIX is the string
5835   that should be written before the opening quote, such as "\t.ascii\t"
5836   for real string data or "\t# " for a comment.  */
5837
5838void
5839mips_output_ascii (FILE *stream, const char *string_param, size_t len,
5840		   const char *prefix)
5841{
5842  size_t i;
5843  int cur_pos = 17;
5844  register const unsigned char *string =
5845    (const unsigned char *)string_param;
5846
5847  fprintf (stream, "%s\"", prefix);
5848  for (i = 0; i < len; i++)
5849    {
5850      register int c = string[i];
5851
5852      if (ISPRINT (c))
5853	{
5854	  if (c == '\\' || c == '\"')
5855	    {
5856	      putc ('\\', stream);
5857	      cur_pos++;
5858	    }
5859	  putc (c, stream);
5860	  cur_pos++;
5861	}
5862      else
5863	{
5864	  fprintf (stream, "\\%03o", c);
5865	  cur_pos += 4;
5866	}
5867
5868      if (cur_pos > 72 && i+1 < len)
5869	{
5870	  cur_pos = 17;
5871	  fprintf (stream, "\"\n%s\"", prefix);
5872	}
5873    }
5874  fprintf (stream, "\"\n");
5875}
5876
5877/* Implement TARGET_ASM_FILE_START.  */
5878
5879static void
5880mips_file_start (void)
5881{
5882  default_file_start ();
5883
5884  if (!TARGET_IRIX)
5885    {
5886      /* Generate a special section to describe the ABI switches used to
5887	 produce the resultant binary.  This used to be done by the assembler
5888	 setting bits in the ELF header's flags field, but we have run out of
5889	 bits.  GDB needs this information in order to be able to correctly
5890	 debug these binaries.  See the function mips_gdbarch_init() in
5891	 gdb/mips-tdep.c.  This is unnecessary for the IRIX 5/6 ABIs and
5892	 causes unnecessary IRIX 6 ld warnings.  */
5893      const char * abi_string = NULL;
5894
5895      switch (mips_abi)
5896	{
5897	case ABI_32:   abi_string = "abi32"; break;
5898	case ABI_N32:  abi_string = "abiN32"; break;
5899	case ABI_64:   abi_string = "abi64"; break;
5900	case ABI_O64:  abi_string = "abiO64"; break;
5901	case ABI_EABI: abi_string = TARGET_64BIT ? "eabi64" : "eabi32"; break;
5902	default:
5903	  gcc_unreachable ();
5904	}
5905      /* Note - we use fprintf directly rather than calling switch_to_section
5906	 because in this way we can avoid creating an allocated section.  We
5907	 do not want this section to take up any space in the running
5908	 executable.  */
5909      fprintf (asm_out_file, "\t.section .mdebug.%s\n", abi_string);
5910
5911      /* There is no ELF header flag to distinguish long32 forms of the
5912	 EABI from long64 forms.  Emit a special section to help tools
5913	 such as GDB.  Do the same for o64, which is sometimes used with
5914	 -mlong64.  */
5915      if (mips_abi == ABI_EABI || mips_abi == ABI_O64)
5916	fprintf (asm_out_file, "\t.section .gcc_compiled_long%d\n",
5917		 TARGET_LONG64 ? 64 : 32);
5918
5919      /* Restore the default section.  */
5920      fprintf (asm_out_file, "\t.previous\n");
5921    }
5922
5923  /* Generate the pseudo ops that System V.4 wants.  */
5924  if (TARGET_ABICALLS)
5925    fprintf (asm_out_file, "\t.abicalls\n");
5926
5927  if (TARGET_MIPS16)
5928    fprintf (asm_out_file, "\t.set\tmips16\n");
5929
5930  if (flag_verbose_asm)
5931    fprintf (asm_out_file, "\n%s -G value = %d, Arch = %s, ISA = %d\n",
5932	     ASM_COMMENT_START,
5933	     mips_section_threshold, mips_arch_info->name, mips_isa);
5934}
5935
5936#ifdef BSS_SECTION_ASM_OP
5937/* Implement ASM_OUTPUT_ALIGNED_BSS.  This differs from the default only
5938   in the use of sbss.  */
5939
5940void
5941mips_output_aligned_bss (FILE *stream, tree decl, const char *name,
5942			 unsigned HOST_WIDE_INT size, int align)
5943{
5944  extern tree last_assemble_variable_decl;
5945
5946  if (mips_in_small_data_p (decl))
5947    switch_to_section (get_named_section (NULL, ".sbss", 0));
5948  else
5949    switch_to_section (bss_section);
5950  ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
5951  last_assemble_variable_decl = decl;
5952  ASM_DECLARE_OBJECT_NAME (stream, name, decl);
5953  ASM_OUTPUT_SKIP (stream, size != 0 ? size : 1);
5954}
5955#endif
5956
5957/* Implement TARGET_ASM_FILE_END.  When using assembler macros, emit
5958   .externs for any small-data variables that turned out to be external.  */
5959
5960static void
5961mips_file_end (void)
5962{
5963  tree name_tree;
5964  struct extern_list *p;
5965
5966  if (extern_head)
5967    {
5968      fputs ("\n", asm_out_file);
5969
5970      for (p = extern_head; p != 0; p = p->next)
5971	{
5972	  name_tree = get_identifier (p->name);
5973
5974	  /* Positively ensure only one .extern for any given symbol.  */
5975	  if (!TREE_ASM_WRITTEN (name_tree)
5976	      && TREE_SYMBOL_REFERENCED (name_tree))
5977	    {
5978	      TREE_ASM_WRITTEN (name_tree) = 1;
5979	      /* In IRIX 5 or IRIX 6 for the O32 ABI, we must output a
5980		 `.global name .text' directive for every used but
5981		 undefined function.  If we don't, the linker may perform
5982		 an optimization (skipping over the insns that set $gp)
5983		 when it is unsafe.  */
5984	      if (TARGET_IRIX && mips_abi == ABI_32 && p->size == -1)
5985		{
5986		  fputs ("\t.globl ", asm_out_file);
5987		  assemble_name (asm_out_file, p->name);
5988		  fputs (" .text\n", asm_out_file);
5989		}
5990	      else
5991		{
5992		  fputs ("\t.extern\t", asm_out_file);
5993		  assemble_name (asm_out_file, p->name);
5994		  fprintf (asm_out_file, ", %d\n", p->size);
5995		}
5996	    }
5997	}
5998    }
5999}
6000
6001/* Implement ASM_OUTPUT_ALIGNED_DECL_COMMON.  This is usually the same as the
6002   elfos.h version, but we also need to handle -muninit-const-in-rodata.  */
6003
6004void
6005mips_output_aligned_decl_common (FILE *stream, tree decl, const char *name,
6006				 unsigned HOST_WIDE_INT size,
6007				 unsigned int align)
6008{
6009  /* If the target wants uninitialized const declarations in
6010     .rdata then don't put them in .comm.  */
6011  if (TARGET_EMBEDDED_DATA && TARGET_UNINIT_CONST_IN_RODATA
6012      && TREE_CODE (decl) == VAR_DECL && TREE_READONLY (decl)
6013      && (DECL_INITIAL (decl) == 0 || DECL_INITIAL (decl) == error_mark_node))
6014    {
6015      if (TREE_PUBLIC (decl) && DECL_NAME (decl))
6016	targetm.asm_out.globalize_label (stream, name);
6017
6018      switch_to_section (readonly_data_section);
6019      ASM_OUTPUT_ALIGN (stream, floor_log2 (align / BITS_PER_UNIT));
6020      mips_declare_object (stream, name, "",
6021			   ":\n\t.space\t" HOST_WIDE_INT_PRINT_UNSIGNED "\n",
6022			   size);
6023    }
6024  else
6025    mips_declare_common_object (stream, name, "\n\t.comm\t",
6026				size, align, true);
6027}
6028
6029/* Declare a common object of SIZE bytes using asm directive INIT_STRING.
6030   NAME is the name of the object and ALIGN is the required alignment
6031   in bytes.  TAKES_ALIGNMENT_P is true if the directive takes a third
6032   alignment argument.  */
6033
6034void
6035mips_declare_common_object (FILE *stream, const char *name,
6036			    const char *init_string,
6037			    unsigned HOST_WIDE_INT size,
6038			    unsigned int align, bool takes_alignment_p)
6039{
6040  if (!takes_alignment_p)
6041    {
6042      size += (align / BITS_PER_UNIT) - 1;
6043      size -= size % (align / BITS_PER_UNIT);
6044      mips_declare_object (stream, name, init_string,
6045			   "," HOST_WIDE_INT_PRINT_UNSIGNED "\n", size);
6046    }
6047  else
6048    mips_declare_object (stream, name, init_string,
6049			 "," HOST_WIDE_INT_PRINT_UNSIGNED ",%u\n",
6050			 size, align / BITS_PER_UNIT);
6051}
6052
6053/* Emit either a label, .comm, or .lcomm directive.  When using assembler
6054   macros, mark the symbol as written so that mips_file_end won't emit an
6055   .extern for it.  STREAM is the output file, NAME is the name of the
6056   symbol, INIT_STRING is the string that should be written before the
6057   symbol and FINAL_STRING is the string that should be written after it.
6058   FINAL_STRING is a printf() format that consumes the remaining arguments.  */
6059
6060void
6061mips_declare_object (FILE *stream, const char *name, const char *init_string,
6062		     const char *final_string, ...)
6063{
6064  va_list ap;
6065
6066  fputs (init_string, stream);
6067  assemble_name (stream, name);
6068  va_start (ap, final_string);
6069  vfprintf (stream, final_string, ap);
6070  va_end (ap);
6071
6072  if (!TARGET_EXPLICIT_RELOCS)
6073    {
6074      tree name_tree = get_identifier (name);
6075      TREE_ASM_WRITTEN (name_tree) = 1;
6076    }
6077}
6078
6079#ifdef ASM_OUTPUT_SIZE_DIRECTIVE
6080extern int size_directive_output;
6081
6082/* Implement ASM_DECLARE_OBJECT_NAME.  This is like most of the standard ELF
6083   definitions except that it uses mips_declare_object() to emit the label.  */
6084
6085void
6086mips_declare_object_name (FILE *stream, const char *name,
6087			  tree decl ATTRIBUTE_UNUSED)
6088{
6089#ifdef ASM_OUTPUT_TYPE_DIRECTIVE
6090  ASM_OUTPUT_TYPE_DIRECTIVE (stream, name, "object");
6091#endif
6092
6093  size_directive_output = 0;
6094  if (!flag_inhibit_size_directive && DECL_SIZE (decl))
6095    {
6096      HOST_WIDE_INT size;
6097
6098      size_directive_output = 1;
6099      size = int_size_in_bytes (TREE_TYPE (decl));
6100      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6101    }
6102
6103  mips_declare_object (stream, name, "", ":\n");
6104}
6105
6106/* Implement ASM_FINISH_DECLARE_OBJECT.  This is generic ELF stuff.  */
6107
6108void
6109mips_finish_declare_object (FILE *stream, tree decl, int top_level, int at_end)
6110{
6111  const char *name;
6112
6113  name = XSTR (XEXP (DECL_RTL (decl), 0), 0);
6114  if (!flag_inhibit_size_directive
6115      && DECL_SIZE (decl) != 0
6116      && !at_end && top_level
6117      && DECL_INITIAL (decl) == error_mark_node
6118      && !size_directive_output)
6119    {
6120      HOST_WIDE_INT size;
6121
6122      size_directive_output = 1;
6123      size = int_size_in_bytes (TREE_TYPE (decl));
6124      ASM_OUTPUT_SIZE_DIRECTIVE (stream, name, size);
6125    }
6126}
6127#endif
6128
6129/* Return true if X is a small data address that can be rewritten
6130   as a LO_SUM.  */
6131
6132static bool
6133mips_rewrite_small_data_p (rtx x)
6134{
6135  enum mips_symbol_type symbol_type;
6136
6137  return (TARGET_EXPLICIT_RELOCS
6138	  && mips_symbolic_constant_p (x, &symbol_type)
6139	  && symbol_type == SYMBOL_SMALL_DATA);
6140}
6141
6142
6143/* A for_each_rtx callback for mips_small_data_pattern_p.  */
6144
6145static int
6146mips_small_data_pattern_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6147{
6148  if (GET_CODE (*loc) == LO_SUM)
6149    return -1;
6150
6151  return mips_rewrite_small_data_p (*loc);
6152}
6153
6154/* Return true if OP refers to small data symbols directly, not through
6155   a LO_SUM.  */
6156
6157bool
6158mips_small_data_pattern_p (rtx op)
6159{
6160  return for_each_rtx (&op, mips_small_data_pattern_1, 0);
6161}
6162
6163/* A for_each_rtx callback, used by mips_rewrite_small_data.  */
6164
6165static int
6166mips_rewrite_small_data_1 (rtx *loc, void *data ATTRIBUTE_UNUSED)
6167{
6168  if (mips_rewrite_small_data_p (*loc))
6169    *loc = gen_rtx_LO_SUM (Pmode, pic_offset_table_rtx, *loc);
6170
6171  if (GET_CODE (*loc) == LO_SUM)
6172    return -1;
6173
6174  return 0;
6175}
6176
6177/* If possible, rewrite OP so that it refers to small data using
6178   explicit relocations.  */
6179
6180rtx
6181mips_rewrite_small_data (rtx op)
6182{
6183  op = copy_insn (op);
6184  for_each_rtx (&op, mips_rewrite_small_data_1, 0);
6185  return op;
6186}
6187
6188/* Return true if the current function has an insn that implicitly
6189   refers to $gp.  */
6190
6191static bool
6192mips_function_has_gp_insn (void)
6193{
6194  /* Don't bother rechecking if we found one last time.  */
6195  if (!cfun->machine->has_gp_insn_p)
6196    {
6197      rtx insn;
6198
6199      push_topmost_sequence ();
6200      for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
6201	if (INSN_P (insn)
6202	    && GET_CODE (PATTERN (insn)) != USE
6203	    && GET_CODE (PATTERN (insn)) != CLOBBER
6204	    && (get_attr_got (insn) != GOT_UNSET
6205		|| small_data_pattern (PATTERN (insn), VOIDmode)))
6206	  break;
6207      pop_topmost_sequence ();
6208
6209      cfun->machine->has_gp_insn_p = (insn != 0);
6210    }
6211  return cfun->machine->has_gp_insn_p;
6212}
6213
6214
6215/* Return the register that should be used as the global pointer
6216   within this function.  Return 0 if the function doesn't need
6217   a global pointer.  */
6218
6219static unsigned int
6220mips_global_pointer (void)
6221{
6222  unsigned int regno;
6223
6224  /* $gp is always available in non-abicalls code.  */
6225  if (!TARGET_ABICALLS)
6226    return GLOBAL_POINTER_REGNUM;
6227
6228  /* We must always provide $gp when it is used implicitly.  */
6229  if (!TARGET_EXPLICIT_RELOCS)
6230    return GLOBAL_POINTER_REGNUM;
6231
6232  /* FUNCTION_PROFILER includes a jal macro, so we need to give it
6233     a valid gp.  */
6234  if (current_function_profile)
6235    return GLOBAL_POINTER_REGNUM;
6236
6237  /* If the function has a nonlocal goto, $gp must hold the correct
6238     global pointer for the target function.  */
6239  if (current_function_has_nonlocal_goto)
6240    return GLOBAL_POINTER_REGNUM;
6241
6242  /* If the gp is never referenced, there's no need to initialize it.
6243     Note that reload can sometimes introduce constant pool references
6244     into a function that otherwise didn't need them.  For example,
6245     suppose we have an instruction like:
6246
6247	  (set (reg:DF R1) (float:DF (reg:SI R2)))
6248
6249     If R2 turns out to be constant such as 1, the instruction may have a
6250     REG_EQUAL note saying that R1 == 1.0.  Reload then has the option of
6251     using this constant if R2 doesn't get allocated to a register.
6252
6253     In cases like these, reload will have added the constant to the pool
6254     but no instruction will yet refer to it.  */
6255  if (!regs_ever_live[GLOBAL_POINTER_REGNUM]
6256      && !current_function_uses_const_pool
6257      && !mips_function_has_gp_insn ())
6258    return 0;
6259
6260  /* We need a global pointer, but perhaps we can use a call-clobbered
6261     register instead of $gp.  */
6262  if (TARGET_NEWABI && current_function_is_leaf)
6263    for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6264      if (!regs_ever_live[regno]
6265	  && call_used_regs[regno]
6266	  && !fixed_regs[regno]
6267	  && regno != PIC_FUNCTION_ADDR_REGNUM)
6268	return regno;
6269
6270  return GLOBAL_POINTER_REGNUM;
6271}
6272
6273
6274/* Return true if the current function must save REGNO.  */
6275
6276static bool
6277mips_save_reg_p (unsigned int regno)
6278{
6279  /* We only need to save $gp for NewABI PIC.  */
6280  if (regno == GLOBAL_POINTER_REGNUM)
6281    return (TARGET_ABICALLS && TARGET_NEWABI
6282	    && cfun->machine->global_pointer == regno);
6283
6284  /* Check call-saved registers.  */
6285  if (regs_ever_live[regno] && !call_used_regs[regno])
6286    return true;
6287
6288  /* We need to save the old frame pointer before setting up a new one.  */
6289  if (regno == HARD_FRAME_POINTER_REGNUM && frame_pointer_needed)
6290    return true;
6291
6292  /* We need to save the incoming return address if it is ever clobbered
6293     within the function.  */
6294  if (regno == GP_REG_FIRST + 31 && regs_ever_live[regno])
6295    return true;
6296
6297  if (TARGET_MIPS16)
6298    {
6299      tree return_type;
6300
6301      return_type = DECL_RESULT (current_function_decl);
6302
6303      /* $18 is a special case in mips16 code.  It may be used to call
6304	 a function which returns a floating point value, but it is
6305	 marked in call_used_regs.  */
6306      if (regno == GP_REG_FIRST + 18 && regs_ever_live[regno])
6307	return true;
6308
6309      /* $31 is also a special case.  It will be used to copy a return
6310	 value into the floating point registers if the return value is
6311	 floating point.  */
6312      if (regno == GP_REG_FIRST + 31
6313	  && mips16_hard_float
6314	  && !aggregate_value_p (return_type, current_function_decl)
6315	  && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
6316	  && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
6317	return true;
6318    }
6319
6320  return false;
6321}
6322
6323
6324/* Return the bytes needed to compute the frame pointer from the current
6325   stack pointer.  SIZE is the size (in bytes) of the local variables.
6326
6327   MIPS stack frames look like:
6328
6329             Before call		        After call
6330        +-----------------------+	+-----------------------+
6331   high |			|       |      			|
6332   mem. |		        |	|			|
6333        |  caller's temps.    	|       |  caller's temps.    	|
6334	|       		|       |       	        |
6335        +-----------------------+	+-----------------------+
6336 	|       		|	|		        |
6337        |  arguments on stack.  |	|  arguments on stack.  |
6338	|       		|	|			|
6339        +-----------------------+	+-----------------------+
6340 	|  4 words to save     	|	|  4 words to save	|
6341	|  arguments passed	|	|  arguments passed	|
6342	|  in registers, even	|	|  in registers, even	|
6343    SP->|  if not passed.       |  VFP->|  if not passed.	|
6344	+-----------------------+       +-----------------------+
6345					|		        |
6346                                        |  fp register save     |
6347					|			|
6348					+-----------------------+
6349					|		        |
6350                                        |  gp register save     |
6351                                        |       		|
6352					+-----------------------+
6353					|			|
6354					|  local variables	|
6355					|			|
6356					+-----------------------+
6357					|			|
6358                                        |  alloca allocations   |
6359        				|			|
6360					+-----------------------+
6361					|			|
6362					|  GP save for V.4 abi	|
6363					|			|
6364					+-----------------------+
6365					|			|
6366                                        |  arguments on stack   |
6367        				|		        |
6368					+-----------------------+
6369                                        |  4 words to save      |
6370					|  arguments passed     |
6371                                        |  in registers, even   |
6372   low                              SP->|  if not passed.       |
6373   memory        			+-----------------------+
6374
6375*/
6376
6377HOST_WIDE_INT
6378compute_frame_size (HOST_WIDE_INT size)
6379{
6380  unsigned int regno;
6381  HOST_WIDE_INT total_size;	/* # bytes that the entire frame takes up */
6382  HOST_WIDE_INT var_size;	/* # bytes that variables take up */
6383  HOST_WIDE_INT args_size;	/* # bytes that outgoing arguments take up */
6384  HOST_WIDE_INT cprestore_size; /* # bytes that the cprestore slot takes up */
6385  HOST_WIDE_INT gp_reg_rounded;	/* # bytes needed to store gp after rounding */
6386  HOST_WIDE_INT gp_reg_size;	/* # bytes needed to store gp regs */
6387  HOST_WIDE_INT fp_reg_size;	/* # bytes needed to store fp regs */
6388  unsigned int mask;		/* mask of saved gp registers */
6389  unsigned int fmask;		/* mask of saved fp registers */
6390
6391  cfun->machine->global_pointer = mips_global_pointer ();
6392
6393  gp_reg_size = 0;
6394  fp_reg_size = 0;
6395  mask = 0;
6396  fmask	= 0;
6397  var_size = MIPS_STACK_ALIGN (size);
6398  args_size = current_function_outgoing_args_size;
6399  cprestore_size = MIPS_STACK_ALIGN (STARTING_FRAME_OFFSET) - args_size;
6400
6401  /* The space set aside by STARTING_FRAME_OFFSET isn't needed in leaf
6402     functions.  If the function has local variables, we're committed
6403     to allocating it anyway.  Otherwise reclaim it here.  */
6404  if (var_size == 0 && current_function_is_leaf)
6405    cprestore_size = args_size = 0;
6406
6407  /* The MIPS 3.0 linker does not like functions that dynamically
6408     allocate the stack and have 0 for STACK_DYNAMIC_OFFSET, since it
6409     looks like we are trying to create a second frame pointer to the
6410     function, so allocate some stack space to make it happy.  */
6411
6412  if (args_size == 0 && current_function_calls_alloca)
6413    args_size = 4 * UNITS_PER_WORD;
6414
6415  total_size = var_size + args_size + cprestore_size;
6416
6417  /* Calculate space needed for gp registers.  */
6418  for (regno = GP_REG_FIRST; regno <= GP_REG_LAST; regno++)
6419    if (mips_save_reg_p (regno))
6420      {
6421	gp_reg_size += GET_MODE_SIZE (gpr_mode);
6422	mask |= 1 << (regno - GP_REG_FIRST);
6423      }
6424
6425  /* We need to restore these for the handler.  */
6426  if (current_function_calls_eh_return)
6427    {
6428      unsigned int i;
6429      for (i = 0; ; ++i)
6430	{
6431	  regno = EH_RETURN_DATA_REGNO (i);
6432	  if (regno == INVALID_REGNUM)
6433	    break;
6434	  gp_reg_size += GET_MODE_SIZE (gpr_mode);
6435	  mask |= 1 << (regno - GP_REG_FIRST);
6436	}
6437    }
6438
6439  /* This loop must iterate over the same space as its companion in
6440     save_restore_insns.  */
6441  for (regno = (FP_REG_LAST - FP_INC + 1);
6442       regno >= FP_REG_FIRST;
6443       regno -= FP_INC)
6444    {
6445      if (mips_save_reg_p (regno))
6446	{
6447	  fp_reg_size += FP_INC * UNITS_PER_FPREG;
6448	  fmask |= ((1 << FP_INC) - 1) << (regno - FP_REG_FIRST);
6449	}
6450    }
6451
6452  gp_reg_rounded = MIPS_STACK_ALIGN (gp_reg_size);
6453  total_size += gp_reg_rounded + MIPS_STACK_ALIGN (fp_reg_size);
6454
6455  /* Add in the space required for saving incoming register arguments.  */
6456  total_size += current_function_pretend_args_size;
6457  total_size += MIPS_STACK_ALIGN (cfun->machine->varargs_size);
6458
6459  /* Save other computed information.  */
6460  cfun->machine->frame.total_size = total_size;
6461  cfun->machine->frame.var_size = var_size;
6462  cfun->machine->frame.args_size = args_size;
6463  cfun->machine->frame.cprestore_size = cprestore_size;
6464  cfun->machine->frame.gp_reg_size = gp_reg_size;
6465  cfun->machine->frame.fp_reg_size = fp_reg_size;
6466  cfun->machine->frame.mask = mask;
6467  cfun->machine->frame.fmask = fmask;
6468  cfun->machine->frame.initialized = reload_completed;
6469  cfun->machine->frame.num_gp = gp_reg_size / UNITS_PER_WORD;
6470  cfun->machine->frame.num_fp = fp_reg_size / (FP_INC * UNITS_PER_FPREG);
6471
6472  if (mask)
6473    {
6474      HOST_WIDE_INT offset;
6475
6476      offset = (args_size + cprestore_size + var_size
6477		+ gp_reg_size - GET_MODE_SIZE (gpr_mode));
6478      cfun->machine->frame.gp_sp_offset = offset;
6479      cfun->machine->frame.gp_save_offset = offset - total_size;
6480    }
6481  else
6482    {
6483      cfun->machine->frame.gp_sp_offset = 0;
6484      cfun->machine->frame.gp_save_offset = 0;
6485    }
6486
6487  if (fmask)
6488    {
6489      HOST_WIDE_INT offset;
6490
6491      offset = (args_size + cprestore_size + var_size
6492		+ gp_reg_rounded + fp_reg_size
6493		- FP_INC * UNITS_PER_FPREG);
6494      cfun->machine->frame.fp_sp_offset = offset;
6495      cfun->machine->frame.fp_save_offset = offset - total_size;
6496    }
6497  else
6498    {
6499      cfun->machine->frame.fp_sp_offset = 0;
6500      cfun->machine->frame.fp_save_offset = 0;
6501    }
6502
6503  /* Ok, we're done.  */
6504  return total_size;
6505}
6506
6507/* Implement INITIAL_ELIMINATION_OFFSET.  FROM is either the frame
6508   pointer or argument pointer.  TO is either the stack pointer or
6509   hard frame pointer.  */
6510
6511HOST_WIDE_INT
6512mips_initial_elimination_offset (int from, int to)
6513{
6514  HOST_WIDE_INT offset;
6515
6516  compute_frame_size (get_frame_size ());
6517
6518  /* Set OFFSET to the offset from the stack pointer.  */
6519  switch (from)
6520    {
6521    case FRAME_POINTER_REGNUM:
6522      offset = 0;
6523      break;
6524
6525    case ARG_POINTER_REGNUM:
6526      offset = (cfun->machine->frame.total_size
6527		- current_function_pretend_args_size);
6528      break;
6529
6530    default:
6531      gcc_unreachable ();
6532    }
6533
6534  if (TARGET_MIPS16 && to == HARD_FRAME_POINTER_REGNUM)
6535    offset -= cfun->machine->frame.args_size;
6536
6537  return offset;
6538}
6539
6540/* Implement RETURN_ADDR_RTX.  Note, we do not support moving
6541   back to a previous frame.  */
6542rtx
6543mips_return_addr (int count, rtx frame ATTRIBUTE_UNUSED)
6544{
6545  if (count != 0)
6546    return const0_rtx;
6547
6548  return get_hard_reg_initial_val (Pmode, GP_REG_FIRST + 31);
6549}
6550
6551/* Use FN to save or restore register REGNO.  MODE is the register's
6552   mode and OFFSET is the offset of its save slot from the current
6553   stack pointer.  */
6554
6555static void
6556mips_save_restore_reg (enum machine_mode mode, int regno,
6557		       HOST_WIDE_INT offset, mips_save_restore_fn fn)
6558{
6559  rtx mem;
6560
6561  mem = gen_frame_mem (mode, plus_constant (stack_pointer_rtx, offset));
6562
6563  fn (gen_rtx_REG (mode, regno), mem);
6564}
6565
6566
6567/* Call FN for each register that is saved by the current function.
6568   SP_OFFSET is the offset of the current stack pointer from the start
6569   of the frame.  */
6570
6571static void
6572mips_for_each_saved_reg (HOST_WIDE_INT sp_offset, mips_save_restore_fn fn)
6573{
6574#define BITSET_P(VALUE, BIT) (((VALUE) & (1L << (BIT))) != 0)
6575
6576  enum machine_mode fpr_mode;
6577  HOST_WIDE_INT offset;
6578  int regno;
6579
6580  /* Save registers starting from high to low.  The debuggers prefer at least
6581     the return register be stored at func+4, and also it allows us not to
6582     need a nop in the epilog if at least one register is reloaded in
6583     addition to return address.  */
6584  offset = cfun->machine->frame.gp_sp_offset - sp_offset;
6585  for (regno = GP_REG_LAST; regno >= GP_REG_FIRST; regno--)
6586    if (BITSET_P (cfun->machine->frame.mask, regno - GP_REG_FIRST))
6587      {
6588	mips_save_restore_reg (gpr_mode, regno, offset, fn);
6589	offset -= GET_MODE_SIZE (gpr_mode);
6590      }
6591
6592  /* This loop must iterate over the same space as its companion in
6593     compute_frame_size.  */
6594  offset = cfun->machine->frame.fp_sp_offset - sp_offset;
6595  fpr_mode = (TARGET_SINGLE_FLOAT ? SFmode : DFmode);
6596  for (regno = (FP_REG_LAST - FP_INC + 1);
6597       regno >= FP_REG_FIRST;
6598       regno -= FP_INC)
6599    if (BITSET_P (cfun->machine->frame.fmask, regno - FP_REG_FIRST))
6600      {
6601	mips_save_restore_reg (fpr_mode, regno, offset, fn);
6602	offset -= GET_MODE_SIZE (fpr_mode);
6603      }
6604#undef BITSET_P
6605}
6606
6607/* If we're generating n32 or n64 abicalls, and the current function
6608   does not use $28 as its global pointer, emit a cplocal directive.
6609   Use pic_offset_table_rtx as the argument to the directive.  */
6610
6611static void
6612mips_output_cplocal (void)
6613{
6614  if (!TARGET_EXPLICIT_RELOCS
6615      && cfun->machine->global_pointer > 0
6616      && cfun->machine->global_pointer != GLOBAL_POINTER_REGNUM)
6617    output_asm_insn (".cplocal %+", 0);
6618}
6619
6620/* Return the style of GP load sequence that is being used for the
6621   current function.  */
6622
6623enum mips_loadgp_style
6624mips_current_loadgp_style (void)
6625{
6626  if (!TARGET_ABICALLS || cfun->machine->global_pointer == 0)
6627    return LOADGP_NONE;
6628
6629  if (TARGET_ABSOLUTE_ABICALLS)
6630    return LOADGP_ABSOLUTE;
6631
6632  return TARGET_NEWABI ? LOADGP_NEWABI : LOADGP_OLDABI;
6633}
6634
6635/* The __gnu_local_gp symbol.  */
6636
6637static GTY(()) rtx mips_gnu_local_gp;
6638
6639/* If we're generating n32 or n64 abicalls, emit instructions
6640   to set up the global pointer.  */
6641
6642static void
6643mips_emit_loadgp (void)
6644{
6645  rtx addr, offset, incoming_address;
6646
6647  switch (mips_current_loadgp_style ())
6648    {
6649    case LOADGP_ABSOLUTE:
6650      if (mips_gnu_local_gp == NULL)
6651	{
6652	  mips_gnu_local_gp = gen_rtx_SYMBOL_REF (Pmode, "__gnu_local_gp");
6653	  SYMBOL_REF_FLAGS (mips_gnu_local_gp) |= SYMBOL_FLAG_LOCAL;
6654	}
6655      emit_insn (gen_loadgp_noshared (mips_gnu_local_gp));
6656      break;
6657
6658    case LOADGP_NEWABI:
6659      addr = XEXP (DECL_RTL (current_function_decl), 0);
6660      offset = mips_unspec_address (addr, SYMBOL_GOTOFF_LOADGP);
6661      incoming_address = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
6662      emit_insn (gen_loadgp (offset, incoming_address));
6663      if (!TARGET_EXPLICIT_RELOCS)
6664	emit_insn (gen_loadgp_blockage ());
6665      break;
6666
6667    default:
6668      break;
6669    }
6670}
6671
6672/* Set up the stack and frame (if desired) for the function.  */
6673
6674static void
6675mips_output_function_prologue (FILE *file, HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6676{
6677  const char *fnname;
6678  HOST_WIDE_INT tsize = cfun->machine->frame.total_size;
6679
6680#ifdef SDB_DEBUGGING_INFO
6681  if (debug_info_level != DINFO_LEVEL_TERSE && write_symbols == SDB_DEBUG)
6682    SDB_OUTPUT_SOURCE_LINE (file, DECL_SOURCE_LINE (current_function_decl));
6683#endif
6684
6685  /* In mips16 mode, we may need to generate a 32 bit to handle
6686     floating point arguments.  The linker will arrange for any 32 bit
6687     functions to call this stub, which will then jump to the 16 bit
6688     function proper.  */
6689  if (TARGET_MIPS16 && !TARGET_SOFT_FLOAT
6690      && current_function_args_info.fp_code != 0)
6691    build_mips16_function_stub (file);
6692
6693  if (!FUNCTION_NAME_ALREADY_DECLARED)
6694    {
6695      /* Get the function name the same way that toplev.c does before calling
6696	 assemble_start_function.  This is needed so that the name used here
6697	 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
6698      fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6699
6700      if (!flag_inhibit_size_directive)
6701	{
6702	  fputs ("\t.ent\t", file);
6703	  assemble_name (file, fnname);
6704	  fputs ("\n", file);
6705	}
6706
6707      assemble_name (file, fnname);
6708      fputs (":\n", file);
6709    }
6710
6711  /* Stop mips_file_end from treating this function as external.  */
6712  if (TARGET_IRIX && mips_abi == ABI_32)
6713    TREE_ASM_WRITTEN (DECL_NAME (cfun->decl)) = 1;
6714
6715  if (!flag_inhibit_size_directive)
6716    {
6717      /* .frame FRAMEREG, FRAMESIZE, RETREG */
6718      fprintf (file,
6719	       "\t.frame\t%s," HOST_WIDE_INT_PRINT_DEC ",%s\t\t"
6720	       "# vars= " HOST_WIDE_INT_PRINT_DEC ", regs= %d/%d"
6721	       ", args= " HOST_WIDE_INT_PRINT_DEC
6722	       ", gp= " HOST_WIDE_INT_PRINT_DEC "\n",
6723	       (reg_names[(frame_pointer_needed)
6724			  ? HARD_FRAME_POINTER_REGNUM : STACK_POINTER_REGNUM]),
6725	       ((frame_pointer_needed && TARGET_MIPS16)
6726		? tsize - cfun->machine->frame.args_size
6727		: tsize),
6728	       reg_names[GP_REG_FIRST + 31],
6729	       cfun->machine->frame.var_size,
6730	       cfun->machine->frame.num_gp,
6731	       cfun->machine->frame.num_fp,
6732	       cfun->machine->frame.args_size,
6733	       cfun->machine->frame.cprestore_size);
6734
6735      /* .mask MASK, GPOFFSET; .fmask FPOFFSET */
6736      fprintf (file, "\t.mask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6737	       cfun->machine->frame.mask,
6738	       cfun->machine->frame.gp_save_offset);
6739      fprintf (file, "\t.fmask\t0x%08x," HOST_WIDE_INT_PRINT_DEC "\n",
6740	       cfun->machine->frame.fmask,
6741	       cfun->machine->frame.fp_save_offset);
6742
6743      /* Require:
6744	 OLD_SP == *FRAMEREG + FRAMESIZE => can find old_sp from nominated FP reg.
6745	 HIGHEST_GP_SAVED == *FRAMEREG + FRAMESIZE + GPOFFSET => can find saved regs.  */
6746    }
6747
6748  if (mips_current_loadgp_style () == LOADGP_OLDABI)
6749    {
6750      /* Handle the initialization of $gp for SVR4 PIC.  */
6751      if (!cfun->machine->all_noreorder_p)
6752	output_asm_insn ("%(.cpload\t%^%)", 0);
6753      else
6754	output_asm_insn ("%(.cpload\t%^\n\t%<", 0);
6755    }
6756  else if (cfun->machine->all_noreorder_p)
6757    output_asm_insn ("%(%<", 0);
6758
6759  /* Tell the assembler which register we're using as the global
6760     pointer.  This is needed for thunks, since they can use either
6761     explicit relocs or assembler macros.  */
6762  mips_output_cplocal ();
6763}
6764
6765/* Make the last instruction frame related and note that it performs
6766   the operation described by FRAME_PATTERN.  */
6767
6768static void
6769mips_set_frame_expr (rtx frame_pattern)
6770{
6771  rtx insn;
6772
6773  insn = get_last_insn ();
6774  RTX_FRAME_RELATED_P (insn) = 1;
6775  REG_NOTES (insn) = alloc_EXPR_LIST (REG_FRAME_RELATED_EXPR,
6776				      frame_pattern,
6777				      REG_NOTES (insn));
6778}
6779
6780
6781/* Return a frame-related rtx that stores REG at MEM.
6782   REG must be a single register.  */
6783
6784static rtx
6785mips_frame_set (rtx mem, rtx reg)
6786{
6787  rtx set;
6788
6789  /* If we're saving the return address register and the dwarf return
6790     address column differs from the hard register number, adjust the
6791     note reg to refer to the former.  */
6792  if (REGNO (reg) == GP_REG_FIRST + 31
6793      && DWARF_FRAME_RETURN_COLUMN != GP_REG_FIRST + 31)
6794    reg = gen_rtx_REG (GET_MODE (reg), DWARF_FRAME_RETURN_COLUMN);
6795
6796  set = gen_rtx_SET (VOIDmode, mem, reg);
6797  RTX_FRAME_RELATED_P (set) = 1;
6798
6799  return set;
6800}
6801
6802
6803/* Save register REG to MEM.  Make the instruction frame-related.  */
6804
6805static void
6806mips_save_reg (rtx reg, rtx mem)
6807{
6808  if (GET_MODE (reg) == DFmode && !TARGET_FLOAT64)
6809    {
6810      rtx x1, x2;
6811
6812      if (mips_split_64bit_move_p (mem, reg))
6813	mips_split_64bit_move (mem, reg);
6814      else
6815	emit_move_insn (mem, reg);
6816
6817      x1 = mips_frame_set (mips_subword (mem, 0), mips_subword (reg, 0));
6818      x2 = mips_frame_set (mips_subword (mem, 1), mips_subword (reg, 1));
6819      mips_set_frame_expr (gen_rtx_PARALLEL (VOIDmode, gen_rtvec (2, x1, x2)));
6820    }
6821  else
6822    {
6823      if (TARGET_MIPS16
6824	  && REGNO (reg) != GP_REG_FIRST + 31
6825	  && !M16_REG_P (REGNO (reg)))
6826	{
6827	  /* Save a non-mips16 register by moving it through a temporary.
6828	     We don't need to do this for $31 since there's a special
6829	     instruction for it.  */
6830	  emit_move_insn (MIPS_PROLOGUE_TEMP (GET_MODE (reg)), reg);
6831	  emit_move_insn (mem, MIPS_PROLOGUE_TEMP (GET_MODE (reg)));
6832	}
6833      else
6834	emit_move_insn (mem, reg);
6835
6836      mips_set_frame_expr (mips_frame_set (mem, reg));
6837    }
6838}
6839
6840
6841/* Expand the prologue into a bunch of separate insns.  */
6842
6843void
6844mips_expand_prologue (void)
6845{
6846  HOST_WIDE_INT size;
6847
6848  if (cfun->machine->global_pointer > 0)
6849    REGNO (pic_offset_table_rtx) = cfun->machine->global_pointer;
6850
6851  size = compute_frame_size (get_frame_size ());
6852
6853  /* Save the registers.  Allocate up to MIPS_MAX_FIRST_STACK_STEP
6854     bytes beforehand; this is enough to cover the register save area
6855     without going out of range.  */
6856  if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
6857    {
6858      HOST_WIDE_INT step1;
6859
6860      step1 = MIN (size, MIPS_MAX_FIRST_STACK_STEP);
6861      RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6862						     stack_pointer_rtx,
6863						     GEN_INT (-step1)))) = 1;
6864      size -= step1;
6865      mips_for_each_saved_reg (size, mips_save_reg);
6866    }
6867
6868  /* Allocate the rest of the frame.  */
6869  if (size > 0)
6870    {
6871      if (SMALL_OPERAND (-size))
6872	RTX_FRAME_RELATED_P (emit_insn (gen_add3_insn (stack_pointer_rtx,
6873						       stack_pointer_rtx,
6874						       GEN_INT (-size)))) = 1;
6875      else
6876	{
6877	  emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), GEN_INT (size));
6878	  if (TARGET_MIPS16)
6879	    {
6880	      /* There are no instructions to add or subtract registers
6881		 from the stack pointer, so use the frame pointer as a
6882		 temporary.  We should always be using a frame pointer
6883		 in this case anyway.  */
6884	      gcc_assert (frame_pointer_needed);
6885	      emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6886	      emit_insn (gen_sub3_insn (hard_frame_pointer_rtx,
6887					hard_frame_pointer_rtx,
6888					MIPS_PROLOGUE_TEMP (Pmode)));
6889	      emit_move_insn (stack_pointer_rtx, hard_frame_pointer_rtx);
6890	    }
6891	  else
6892	    emit_insn (gen_sub3_insn (stack_pointer_rtx,
6893				      stack_pointer_rtx,
6894				      MIPS_PROLOGUE_TEMP (Pmode)));
6895
6896	  /* Describe the combined effect of the previous instructions.  */
6897	  mips_set_frame_expr
6898	    (gen_rtx_SET (VOIDmode, stack_pointer_rtx,
6899			  plus_constant (stack_pointer_rtx, -size)));
6900	}
6901    }
6902
6903  /* Set up the frame pointer, if we're using one.  In mips16 code,
6904     we point the frame pointer ahead of the outgoing argument area.
6905     This should allow more variables & incoming arguments to be
6906     accessed with unextended instructions.  */
6907  if (frame_pointer_needed)
6908    {
6909      if (TARGET_MIPS16 && cfun->machine->frame.args_size != 0)
6910	{
6911	  rtx offset = GEN_INT (cfun->machine->frame.args_size);
6912	  if (SMALL_OPERAND (cfun->machine->frame.args_size))
6913	    RTX_FRAME_RELATED_P
6914	      (emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6915					 stack_pointer_rtx,
6916					 offset))) = 1;
6917	  else
6918	    {
6919	      emit_move_insn (MIPS_PROLOGUE_TEMP (Pmode), offset);
6920	      emit_move_insn (hard_frame_pointer_rtx, stack_pointer_rtx);
6921	      emit_insn (gen_add3_insn (hard_frame_pointer_rtx,
6922					hard_frame_pointer_rtx,
6923					MIPS_PROLOGUE_TEMP (Pmode)));
6924	      mips_set_frame_expr
6925		(gen_rtx_SET (VOIDmode, hard_frame_pointer_rtx,
6926			      plus_constant (stack_pointer_rtx,
6927					     cfun->machine->frame.args_size)));
6928	    }
6929	}
6930      else
6931	RTX_FRAME_RELATED_P (emit_move_insn (hard_frame_pointer_rtx,
6932					     stack_pointer_rtx)) = 1;
6933    }
6934
6935  mips_emit_loadgp ();
6936
6937  /* If generating o32/o64 abicalls, save $gp on the stack.  */
6938  if (TARGET_ABICALLS && !TARGET_NEWABI && !current_function_is_leaf)
6939    emit_insn (gen_cprestore (GEN_INT (current_function_outgoing_args_size)));
6940
6941  /* If we are profiling, make sure no instructions are scheduled before
6942     the call to mcount.  */
6943
6944  if (current_function_profile)
6945    emit_insn (gen_blockage ());
6946}
6947
6948/* Do any necessary cleanup after a function to restore stack, frame,
6949   and regs.  */
6950
6951#define RA_MASK BITMASK_HIGH	/* 1 << 31 */
6952
6953static void
6954mips_output_function_epilogue (FILE *file ATTRIBUTE_UNUSED,
6955			       HOST_WIDE_INT size ATTRIBUTE_UNUSED)
6956{
6957  /* Reinstate the normal $gp.  */
6958  REGNO (pic_offset_table_rtx) = GLOBAL_POINTER_REGNUM;
6959  mips_output_cplocal ();
6960
6961  if (cfun->machine->all_noreorder_p)
6962    {
6963      /* Avoid using %>%) since it adds excess whitespace.  */
6964      output_asm_insn (".set\tmacro", 0);
6965      output_asm_insn (".set\treorder", 0);
6966      set_noreorder = set_nomacro = 0;
6967    }
6968
6969  if (!FUNCTION_NAME_ALREADY_DECLARED && !flag_inhibit_size_directive)
6970    {
6971      const char *fnname;
6972
6973      /* Get the function name the same way that toplev.c does before calling
6974	 assemble_start_function.  This is needed so that the name used here
6975	 exactly matches the name used in ASM_DECLARE_FUNCTION_NAME.  */
6976      fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
6977      fputs ("\t.end\t", file);
6978      assemble_name (file, fnname);
6979      fputs ("\n", file);
6980    }
6981}
6982
6983/* Emit instructions to restore register REG from slot MEM.  */
6984
6985static void
6986mips_restore_reg (rtx reg, rtx mem)
6987{
6988  /* There's no mips16 instruction to load $31 directly.  Load into
6989     $7 instead and adjust the return insn appropriately.  */
6990  if (TARGET_MIPS16 && REGNO (reg) == GP_REG_FIRST + 31)
6991    reg = gen_rtx_REG (GET_MODE (reg), 7);
6992
6993  if (TARGET_MIPS16 && !M16_REG_P (REGNO (reg)))
6994    {
6995      /* Can't restore directly; move through a temporary.  */
6996      emit_move_insn (MIPS_EPILOGUE_TEMP (GET_MODE (reg)), mem);
6997      emit_move_insn (reg, MIPS_EPILOGUE_TEMP (GET_MODE (reg)));
6998    }
6999  else
7000    emit_move_insn (reg, mem);
7001}
7002
7003
7004/* Expand the epilogue into a bunch of separate insns.  SIBCALL_P is true
7005   if this epilogue precedes a sibling call, false if it is for a normal
7006   "epilogue" pattern.  */
7007
7008void
7009mips_expand_epilogue (int sibcall_p)
7010{
7011  HOST_WIDE_INT step1, step2;
7012  rtx base, target;
7013
7014  if (!sibcall_p && mips_can_use_return_insn ())
7015    {
7016      emit_jump_insn (gen_return ());
7017      return;
7018    }
7019
7020  /* Split the frame into two.  STEP1 is the amount of stack we should
7021     deallocate before restoring the registers.  STEP2 is the amount we
7022     should deallocate afterwards.
7023
7024     Start off by assuming that no registers need to be restored.  */
7025  step1 = cfun->machine->frame.total_size;
7026  step2 = 0;
7027
7028  /* Work out which register holds the frame address.  Account for the
7029     frame pointer offset used by mips16 code.  */
7030  if (!frame_pointer_needed)
7031    base = stack_pointer_rtx;
7032  else
7033    {
7034      base = hard_frame_pointer_rtx;
7035      if (TARGET_MIPS16)
7036	step1 -= cfun->machine->frame.args_size;
7037    }
7038
7039  /* If we need to restore registers, deallocate as much stack as
7040     possible in the second step without going out of range.  */
7041  if ((cfun->machine->frame.mask | cfun->machine->frame.fmask) != 0)
7042    {
7043      step2 = MIN (step1, MIPS_MAX_FIRST_STACK_STEP);
7044      step1 -= step2;
7045    }
7046
7047  /* Set TARGET to BASE + STEP1.  */
7048  target = base;
7049  if (step1 > 0)
7050    {
7051      rtx adjust;
7052
7053      /* Get an rtx for STEP1 that we can add to BASE.  */
7054      adjust = GEN_INT (step1);
7055      if (!SMALL_OPERAND (step1))
7056	{
7057	  emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), adjust);
7058	  adjust = MIPS_EPILOGUE_TEMP (Pmode);
7059	}
7060
7061      /* Normal mode code can copy the result straight into $sp.  */
7062      if (!TARGET_MIPS16)
7063	target = stack_pointer_rtx;
7064
7065      emit_insn (gen_add3_insn (target, base, adjust));
7066    }
7067
7068  /* Copy TARGET into the stack pointer.  */
7069  if (target != stack_pointer_rtx)
7070    emit_move_insn (stack_pointer_rtx, target);
7071
7072  /* If we're using addressing macros for n32/n64 abicalls, $gp is
7073     implicitly used by all SYMBOL_REFs.  We must emit a blockage
7074     insn before restoring it.  */
7075  if (TARGET_ABICALLS && TARGET_NEWABI && !TARGET_EXPLICIT_RELOCS)
7076    emit_insn (gen_blockage ());
7077
7078  /* Restore the registers.  */
7079  mips_for_each_saved_reg (cfun->machine->frame.total_size - step2,
7080			   mips_restore_reg);
7081
7082  /* Deallocate the final bit of the frame.  */
7083  if (step2 > 0)
7084    emit_insn (gen_add3_insn (stack_pointer_rtx,
7085			      stack_pointer_rtx,
7086			      GEN_INT (step2)));
7087
7088  /* Add in the __builtin_eh_return stack adjustment.  We need to
7089     use a temporary in mips16 code.  */
7090  if (current_function_calls_eh_return)
7091    {
7092      if (TARGET_MIPS16)
7093	{
7094	  emit_move_insn (MIPS_EPILOGUE_TEMP (Pmode), stack_pointer_rtx);
7095	  emit_insn (gen_add3_insn (MIPS_EPILOGUE_TEMP (Pmode),
7096				    MIPS_EPILOGUE_TEMP (Pmode),
7097				    EH_RETURN_STACKADJ_RTX));
7098	  emit_move_insn (stack_pointer_rtx, MIPS_EPILOGUE_TEMP (Pmode));
7099	}
7100      else
7101	emit_insn (gen_add3_insn (stack_pointer_rtx,
7102				  stack_pointer_rtx,
7103				  EH_RETURN_STACKADJ_RTX));
7104    }
7105
7106  if (!sibcall_p)
7107    {
7108      /* The mips16 loads the return address into $7, not $31.  */
7109      if (TARGET_MIPS16 && (cfun->machine->frame.mask & RA_MASK) != 0)
7110	emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7111							  GP_REG_FIRST + 7)));
7112      else
7113	emit_jump_insn (gen_return_internal (gen_rtx_REG (Pmode,
7114							  GP_REG_FIRST + 31)));
7115    }
7116}
7117
7118/* Return nonzero if this function is known to have a null epilogue.
7119   This allows the optimizer to omit jumps to jumps if no stack
7120   was created.  */
7121
7122int
7123mips_can_use_return_insn (void)
7124{
7125  tree return_type;
7126
7127  if (! reload_completed)
7128    return 0;
7129
7130  if (regs_ever_live[31] || current_function_profile)
7131    return 0;
7132
7133  return_type = DECL_RESULT (current_function_decl);
7134
7135  /* In mips16 mode, a function which returns a floating point value
7136     needs to arrange to copy the return value into the floating point
7137     registers.  */
7138  if (TARGET_MIPS16
7139      && mips16_hard_float
7140      && ! aggregate_value_p (return_type, current_function_decl)
7141      && GET_MODE_CLASS (DECL_MODE (return_type)) == MODE_FLOAT
7142      && GET_MODE_SIZE (DECL_MODE (return_type)) <= UNITS_PER_FPVALUE)
7143    return 0;
7144
7145  if (cfun->machine->frame.initialized)
7146    return cfun->machine->frame.total_size == 0;
7147
7148  return compute_frame_size (get_frame_size ()) == 0;
7149}
7150
7151/* Implement TARGET_ASM_OUTPUT_MI_THUNK.  Generate rtl rather than asm text
7152   in order to avoid duplicating too much logic from elsewhere.  */
7153
7154static void
7155mips_output_mi_thunk (FILE *file, tree thunk_fndecl ATTRIBUTE_UNUSED,
7156		      HOST_WIDE_INT delta, HOST_WIDE_INT vcall_offset,
7157		      tree function)
7158{
7159  rtx this, temp1, temp2, insn, fnaddr;
7160
7161  /* Pretend to be a post-reload pass while generating rtl.  */
7162  no_new_pseudos = 1;
7163  reload_completed = 1;
7164  reset_block_changes ();
7165
7166  /* Pick a global pointer for -mabicalls.  Use $15 rather than $28
7167     for TARGET_NEWABI since the latter is a call-saved register.  */
7168  if (TARGET_ABICALLS)
7169    cfun->machine->global_pointer
7170      = REGNO (pic_offset_table_rtx)
7171      = TARGET_NEWABI ? 15 : GLOBAL_POINTER_REGNUM;
7172
7173  /* Set up the global pointer for n32 or n64 abicalls.  */
7174  mips_emit_loadgp ();
7175
7176  /* We need two temporary registers in some cases.  */
7177  temp1 = gen_rtx_REG (Pmode, 2);
7178  temp2 = gen_rtx_REG (Pmode, 3);
7179
7180  /* Find out which register contains the "this" pointer.  */
7181  if (aggregate_value_p (TREE_TYPE (TREE_TYPE (function)), function))
7182    this = gen_rtx_REG (Pmode, GP_ARG_FIRST + 1);
7183  else
7184    this = gen_rtx_REG (Pmode, GP_ARG_FIRST);
7185
7186  /* Add DELTA to THIS.  */
7187  if (delta != 0)
7188    {
7189      rtx offset = GEN_INT (delta);
7190      if (!SMALL_OPERAND (delta))
7191	{
7192	  emit_move_insn (temp1, offset);
7193	  offset = temp1;
7194	}
7195      emit_insn (gen_add3_insn (this, this, offset));
7196    }
7197
7198  /* If needed, add *(*THIS + VCALL_OFFSET) to THIS.  */
7199  if (vcall_offset != 0)
7200    {
7201      rtx addr;
7202
7203      /* Set TEMP1 to *THIS.  */
7204      emit_move_insn (temp1, gen_rtx_MEM (Pmode, this));
7205
7206      /* Set ADDR to a legitimate address for *THIS + VCALL_OFFSET.  */
7207      addr = mips_add_offset (temp2, temp1, vcall_offset);
7208
7209      /* Load the offset and add it to THIS.  */
7210      emit_move_insn (temp1, gen_rtx_MEM (Pmode, addr));
7211      emit_insn (gen_add3_insn (this, this, temp1));
7212    }
7213
7214  /* Jump to the target function.  Use a sibcall if direct jumps are
7215     allowed, otherwise load the address into a register first.  */
7216  fnaddr = XEXP (DECL_RTL (function), 0);
7217  if (TARGET_MIPS16 || TARGET_ABICALLS || TARGET_LONG_CALLS)
7218    {
7219      /* This is messy.  gas treats "la $25,foo" as part of a call
7220	 sequence and may allow a global "foo" to be lazily bound.
7221	 The general move patterns therefore reject this combination.
7222
7223	 In this context, lazy binding would actually be OK for o32 and o64,
7224	 but it's still wrong for n32 and n64; see mips_load_call_address.
7225	 We must therefore load the address via a temporary register if
7226	 mips_dangerous_for_la25_p.
7227
7228	 If we jump to the temporary register rather than $25, the assembler
7229	 can use the move insn to fill the jump's delay slot.  */
7230      if (TARGET_ABICALLS && !mips_dangerous_for_la25_p (fnaddr))
7231	temp1 = gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM);
7232      mips_load_call_address (temp1, fnaddr, true);
7233
7234      if (TARGET_ABICALLS && REGNO (temp1) != PIC_FUNCTION_ADDR_REGNUM)
7235	emit_move_insn (gen_rtx_REG (Pmode, PIC_FUNCTION_ADDR_REGNUM), temp1);
7236      emit_jump_insn (gen_indirect_jump (temp1));
7237    }
7238  else
7239    {
7240      insn = emit_call_insn (gen_sibcall_internal (fnaddr, const0_rtx));
7241      SIBLING_CALL_P (insn) = 1;
7242    }
7243
7244  /* Run just enough of rest_of_compilation.  This sequence was
7245     "borrowed" from alpha.c.  */
7246  insn = get_insns ();
7247  insn_locators_initialize ();
7248  split_all_insns_noflow ();
7249  if (TARGET_MIPS16)
7250    mips16_lay_out_constants ();
7251  shorten_branches (insn);
7252  final_start_function (insn, file, 1);
7253  final (insn, file, 1);
7254  final_end_function ();
7255
7256  /* Clean up the vars set above.  Note that final_end_function resets
7257     the global pointer for us.  */
7258  reload_completed = 0;
7259  no_new_pseudos = 0;
7260}
7261
7262/* Returns nonzero if X contains a SYMBOL_REF.  */
7263
7264static int
7265symbolic_expression_p (rtx x)
7266{
7267  if (GET_CODE (x) == SYMBOL_REF)
7268    return 1;
7269
7270  if (GET_CODE (x) == CONST)
7271    return symbolic_expression_p (XEXP (x, 0));
7272
7273  if (UNARY_P (x))
7274    return symbolic_expression_p (XEXP (x, 0));
7275
7276  if (ARITHMETIC_P (x))
7277    return (symbolic_expression_p (XEXP (x, 0))
7278	    || symbolic_expression_p (XEXP (x, 1)));
7279
7280  return 0;
7281}
7282
7283/* Choose the section to use for the constant rtx expression X that has
7284   mode MODE.  */
7285
7286static section *
7287mips_select_rtx_section (enum machine_mode mode, rtx x,
7288			 unsigned HOST_WIDE_INT align)
7289{
7290  if (TARGET_MIPS16)
7291    {
7292      /* In mips16 mode, the constant table always goes in the same section
7293         as the function, so that constants can be loaded using PC relative
7294         addressing.  */
7295      return function_section (current_function_decl);
7296    }
7297  else if (TARGET_EMBEDDED_DATA)
7298    {
7299      /* For embedded applications, always put constants in read-only data,
7300	 in order to reduce RAM usage.  */
7301      return mergeable_constant_section (mode, align, 0);
7302    }
7303  else
7304    {
7305      /* For hosted applications, always put constants in small data if
7306	 possible, as this gives the best performance.  */
7307      /* ??? Consider using mergeable small data sections.  */
7308
7309      if (GET_MODE_SIZE (mode) <= (unsigned) mips_section_threshold
7310	  && mips_section_threshold > 0)
7311	return get_named_section (NULL, ".sdata", 0);
7312      else if (flag_pic && symbolic_expression_p (x))
7313	return get_named_section (NULL, ".data.rel.ro", 3);
7314      else
7315	return mergeable_constant_section (mode, align, 0);
7316    }
7317}
7318
7319/* Implement TARGET_ASM_FUNCTION_RODATA_SECTION.
7320
7321   The complication here is that, with the combination TARGET_ABICALLS
7322   && !TARGET_GPWORD, jump tables will use absolute addresses, and should
7323   therefore not be included in the read-only part of a DSO.  Handle such
7324   cases by selecting a normal data section instead of a read-only one.
7325   The logic apes that in default_function_rodata_section.  */
7326
7327static section *
7328mips_function_rodata_section (tree decl)
7329{
7330  if (!TARGET_ABICALLS || TARGET_GPWORD)
7331    return default_function_rodata_section (decl);
7332
7333  if (decl && DECL_SECTION_NAME (decl))
7334    {
7335      const char *name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7336      if (DECL_ONE_ONLY (decl) && strncmp (name, ".gnu.linkonce.t.", 16) == 0)
7337	{
7338	  char *rname = ASTRDUP (name);
7339	  rname[14] = 'd';
7340	  return get_section (rname, SECTION_LINKONCE | SECTION_WRITE, decl);
7341	}
7342      else if (flag_function_sections && flag_data_sections
7343	       && strncmp (name, ".text.", 6) == 0)
7344	{
7345	  char *rname = ASTRDUP (name);
7346	  memcpy (rname + 1, "data", 4);
7347	  return get_section (rname, SECTION_WRITE, decl);
7348	}
7349    }
7350  return data_section;
7351}
7352
7353/* Implement TARGET_IN_SMALL_DATA_P.  This function controls whether
7354   locally-defined objects go in a small data section.  It also controls
7355   the setting of the SYMBOL_REF_SMALL_P flag, which in turn helps
7356   mips_classify_symbol decide when to use %gp_rel(...)($gp) accesses.  */
7357
7358static bool
7359mips_in_small_data_p (tree decl)
7360{
7361  HOST_WIDE_INT size;
7362
7363  if (TREE_CODE (decl) == STRING_CST || TREE_CODE (decl) == FUNCTION_DECL)
7364    return false;
7365
7366  /* We don't yet generate small-data references for -mabicalls.  See related
7367     -G handling in override_options.  */
7368  if (TARGET_ABICALLS)
7369    return false;
7370
7371  if (TREE_CODE (decl) == VAR_DECL && DECL_SECTION_NAME (decl) != 0)
7372    {
7373      const char *name;
7374
7375      /* Reject anything that isn't in a known small-data section.  */
7376      name = TREE_STRING_POINTER (DECL_SECTION_NAME (decl));
7377      if (strcmp (name, ".sdata") != 0 && strcmp (name, ".sbss") != 0)
7378	return false;
7379
7380      /* If a symbol is defined externally, the assembler will use the
7381	 usual -G rules when deciding how to implement macros.  */
7382      if (TARGET_EXPLICIT_RELOCS || !DECL_EXTERNAL (decl))
7383	return true;
7384    }
7385  else if (TARGET_EMBEDDED_DATA)
7386    {
7387      /* Don't put constants into the small data section: we want them
7388	 to be in ROM rather than RAM.  */
7389      if (TREE_CODE (decl) != VAR_DECL)
7390	return false;
7391
7392      if (TREE_READONLY (decl)
7393	  && !TREE_SIDE_EFFECTS (decl)
7394	  && (!DECL_INITIAL (decl) || TREE_CONSTANT (DECL_INITIAL (decl))))
7395	return false;
7396    }
7397
7398  size = int_size_in_bytes (TREE_TYPE (decl));
7399  return (size > 0 && size <= mips_section_threshold);
7400}
7401
7402/* Implement TARGET_USE_ANCHORS_FOR_SYMBOL_P.  We don't want to use
7403   anchors for small data: the GP register acts as an anchor in that
7404   case.  We also don't want to use them for PC-relative accesses,
7405   where the PC acts as an anchor.  */
7406
7407static bool
7408mips_use_anchors_for_symbol_p (rtx symbol)
7409{
7410  switch (mips_classify_symbol (symbol))
7411    {
7412    case SYMBOL_CONSTANT_POOL:
7413    case SYMBOL_SMALL_DATA:
7414      return false;
7415
7416    default:
7417      return true;
7418    }
7419}
7420
7421/* See whether VALTYPE is a record whose fields should be returned in
7422   floating-point registers.  If so, return the number of fields and
7423   list them in FIELDS (which should have two elements).  Return 0
7424   otherwise.
7425
7426   For n32 & n64, a structure with one or two fields is returned in
7427   floating-point registers as long as every field has a floating-point
7428   type.  */
7429
7430static int
7431mips_fpr_return_fields (tree valtype, tree *fields)
7432{
7433  tree field;
7434  int i;
7435
7436  if (!TARGET_NEWABI)
7437    return 0;
7438
7439  if (TREE_CODE (valtype) != RECORD_TYPE)
7440    return 0;
7441
7442  i = 0;
7443  for (field = TYPE_FIELDS (valtype); field != 0; field = TREE_CHAIN (field))
7444    {
7445      if (TREE_CODE (field) != FIELD_DECL)
7446	continue;
7447
7448      if (TREE_CODE (TREE_TYPE (field)) != REAL_TYPE)
7449	return 0;
7450
7451      if (i == 2)
7452	return 0;
7453
7454      fields[i++] = field;
7455    }
7456  return i;
7457}
7458
7459
7460/* Implement TARGET_RETURN_IN_MSB.  For n32 & n64, we should return
7461   a value in the most significant part of $2/$3 if:
7462
7463      - the target is big-endian;
7464
7465      - the value has a structure or union type (we generalize this to
7466	cover aggregates from other languages too); and
7467
7468      - the structure is not returned in floating-point registers.  */
7469
7470static bool
7471mips_return_in_msb (tree valtype)
7472{
7473  tree fields[2];
7474
7475  return (TARGET_NEWABI
7476	  && TARGET_BIG_ENDIAN
7477	  && AGGREGATE_TYPE_P (valtype)
7478	  && mips_fpr_return_fields (valtype, fields) == 0);
7479}
7480
7481
7482/* Return a composite value in a pair of floating-point registers.
7483   MODE1 and OFFSET1 are the mode and byte offset for the first value,
7484   likewise MODE2 and OFFSET2 for the second.  MODE is the mode of the
7485   complete value.
7486
7487   For n32 & n64, $f0 always holds the first value and $f2 the second.
7488   Otherwise the values are packed together as closely as possible.  */
7489
7490static rtx
7491mips_return_fpr_pair (enum machine_mode mode,
7492		      enum machine_mode mode1, HOST_WIDE_INT offset1,
7493		      enum machine_mode mode2, HOST_WIDE_INT offset2)
7494{
7495  int inc;
7496
7497  inc = (TARGET_NEWABI ? 2 : FP_INC);
7498  return gen_rtx_PARALLEL
7499    (mode,
7500     gen_rtvec (2,
7501		gen_rtx_EXPR_LIST (VOIDmode,
7502				   gen_rtx_REG (mode1, FP_RETURN),
7503				   GEN_INT (offset1)),
7504		gen_rtx_EXPR_LIST (VOIDmode,
7505				   gen_rtx_REG (mode2, FP_RETURN + inc),
7506				   GEN_INT (offset2))));
7507
7508}
7509
7510
7511/* Implement FUNCTION_VALUE and LIBCALL_VALUE.  For normal calls,
7512   VALTYPE is the return type and MODE is VOIDmode.  For libcalls,
7513   VALTYPE is null and MODE is the mode of the return value.  */
7514
7515rtx
7516mips_function_value (tree valtype, tree func ATTRIBUTE_UNUSED,
7517		     enum machine_mode mode)
7518{
7519  if (valtype)
7520    {
7521      tree fields[2];
7522      int unsignedp;
7523
7524      mode = TYPE_MODE (valtype);
7525      unsignedp = TYPE_UNSIGNED (valtype);
7526
7527      /* Since we define TARGET_PROMOTE_FUNCTION_RETURN that returns
7528	 true, we must promote the mode just as PROMOTE_MODE does.  */
7529      mode = promote_mode (valtype, mode, &unsignedp, 1);
7530
7531      /* Handle structures whose fields are returned in $f0/$f2.  */
7532      switch (mips_fpr_return_fields (valtype, fields))
7533	{
7534	case 1:
7535	  return gen_rtx_REG (mode, FP_RETURN);
7536
7537	case 2:
7538	  return mips_return_fpr_pair (mode,
7539				       TYPE_MODE (TREE_TYPE (fields[0])),
7540				       int_byte_position (fields[0]),
7541				       TYPE_MODE (TREE_TYPE (fields[1])),
7542				       int_byte_position (fields[1]));
7543	}
7544
7545      /* If a value is passed in the most significant part of a register, see
7546	 whether we have to round the mode up to a whole number of words.  */
7547      if (mips_return_in_msb (valtype))
7548	{
7549	  HOST_WIDE_INT size = int_size_in_bytes (valtype);
7550	  if (size % UNITS_PER_WORD != 0)
7551	    {
7552	      size += UNITS_PER_WORD - size % UNITS_PER_WORD;
7553	      mode = mode_for_size (size * BITS_PER_UNIT, MODE_INT, 0);
7554	    }
7555	}
7556
7557      /* For EABI, the class of return register depends entirely on MODE.
7558	 For example, "struct { some_type x; }" and "union { some_type x; }"
7559	 are returned in the same way as a bare "some_type" would be.
7560	 Other ABIs only use FPRs for scalar, complex or vector types.  */
7561      if (mips_abi != ABI_EABI && !FLOAT_TYPE_P (valtype))
7562	return gen_rtx_REG (mode, GP_RETURN);
7563    }
7564
7565  if ((GET_MODE_CLASS (mode) == MODE_FLOAT
7566       || GET_MODE_CLASS (mode) == MODE_VECTOR_FLOAT)
7567      && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE)
7568    return gen_rtx_REG (mode, FP_RETURN);
7569
7570  /* Handle long doubles for n32 & n64.  */
7571  if (mode == TFmode)
7572    return mips_return_fpr_pair (mode,
7573				 DImode, 0,
7574				 DImode, GET_MODE_SIZE (mode) / 2);
7575
7576  if (GET_MODE_CLASS (mode) == MODE_COMPLEX_FLOAT
7577      && GET_MODE_SIZE (mode) <= UNITS_PER_HWFPVALUE * 2)
7578    return mips_return_fpr_pair (mode,
7579				 GET_MODE_INNER (mode), 0,
7580				 GET_MODE_INNER (mode),
7581				 GET_MODE_SIZE (mode) / 2);
7582
7583  return gen_rtx_REG (mode, GP_RETURN);
7584}
7585
7586/* Return nonzero when an argument must be passed by reference.  */
7587
7588static bool
7589mips_pass_by_reference (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7590			enum machine_mode mode, tree type,
7591			bool named ATTRIBUTE_UNUSED)
7592{
7593  if (mips_abi == ABI_EABI)
7594    {
7595      int size;
7596
7597      /* ??? How should SCmode be handled?  */
7598      if (mode == DImode || mode == DFmode)
7599	return 0;
7600
7601      size = type ? int_size_in_bytes (type) : GET_MODE_SIZE (mode);
7602      return size == -1 || size > UNITS_PER_WORD;
7603    }
7604  else
7605    {
7606      /* If we have a variable-sized parameter, we have no choice.  */
7607      return targetm.calls.must_pass_in_stack (mode, type);
7608    }
7609}
7610
7611static bool
7612mips_callee_copies (CUMULATIVE_ARGS *cum ATTRIBUTE_UNUSED,
7613		    enum machine_mode mode ATTRIBUTE_UNUSED,
7614		    tree type ATTRIBUTE_UNUSED, bool named)
7615{
7616  return mips_abi == ABI_EABI && named;
7617}
7618
7619/* Return true if registers of class CLASS cannot change from mode FROM
7620   to mode TO.  */
7621
7622bool
7623mips_cannot_change_mode_class (enum machine_mode from,
7624			       enum machine_mode to, enum reg_class class)
7625{
7626  if (MIN (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) <= UNITS_PER_WORD
7627      && MAX (GET_MODE_SIZE (from), GET_MODE_SIZE (to)) > UNITS_PER_WORD)
7628    {
7629      if (TARGET_BIG_ENDIAN)
7630	{
7631	  /* When a multi-word value is stored in paired floating-point
7632	     registers, the first register always holds the low word.
7633	     We therefore can't allow FPRs to change between single-word
7634	     and multi-word modes.  */
7635	  if (FP_INC > 1 && reg_classes_intersect_p (FP_REGS, class))
7636	    return true;
7637	}
7638      else
7639	{
7640	  /* LO_REGNO == HI_REGNO + 1, so if a multi-word value is stored
7641	     in LO and HI, the high word always comes first.  We therefore
7642	     can't allow values stored in HI to change between single-word
7643	     and multi-word modes.
7644	     This rule applies to both the original HI/LO pair and the new
7645	     DSP accumulators.  */
7646	  if (reg_classes_intersect_p (ACC_REGS, class))
7647	    return true;
7648	}
7649    }
7650  /* Loading a 32-bit value into a 64-bit floating-point register
7651     will not sign-extend the value, despite what LOAD_EXTEND_OP says.
7652     We can't allow 64-bit float registers to change from SImode to
7653     to a wider mode.  */
7654  if (TARGET_FLOAT64
7655      && from == SImode
7656      && GET_MODE_SIZE (to) >= UNITS_PER_WORD
7657      && reg_classes_intersect_p (FP_REGS, class))
7658    return true;
7659  return false;
7660}
7661
7662/* Return true if X should not be moved directly into register $25.
7663   We need this because many versions of GAS will treat "la $25,foo" as
7664   part of a call sequence and so allow a global "foo" to be lazily bound.  */
7665
7666bool
7667mips_dangerous_for_la25_p (rtx x)
7668{
7669  HOST_WIDE_INT offset;
7670
7671  if (TARGET_EXPLICIT_RELOCS)
7672    return false;
7673
7674  mips_split_const (x, &x, &offset);
7675  return global_got_operand (x, VOIDmode);
7676}
7677
7678/* Implement PREFERRED_RELOAD_CLASS.  */
7679
7680enum reg_class
7681mips_preferred_reload_class (rtx x, enum reg_class class)
7682{
7683  if (mips_dangerous_for_la25_p (x) && reg_class_subset_p (LEA_REGS, class))
7684    return LEA_REGS;
7685
7686  if (TARGET_HARD_FLOAT
7687      && FLOAT_MODE_P (GET_MODE (x))
7688      && reg_class_subset_p (FP_REGS, class))
7689    return FP_REGS;
7690
7691  if (reg_class_subset_p (GR_REGS, class))
7692    class = GR_REGS;
7693
7694  if (TARGET_MIPS16 && reg_class_subset_p (M16_REGS, class))
7695    class = M16_REGS;
7696
7697  return class;
7698}
7699
7700/* This function returns the register class required for a secondary
7701   register when copying between one of the registers in CLASS, and X,
7702   using MODE.  If IN_P is nonzero, the copy is going from X to the
7703   register, otherwise the register is the source.  A return value of
7704   NO_REGS means that no secondary register is required.  */
7705
7706enum reg_class
7707mips_secondary_reload_class (enum reg_class class,
7708			     enum machine_mode mode, rtx x, int in_p)
7709{
7710  enum reg_class gr_regs = TARGET_MIPS16 ? M16_REGS : GR_REGS;
7711  int regno = -1;
7712  int gp_reg_p;
7713
7714  if (REG_P (x)|| GET_CODE (x) == SUBREG)
7715    regno = true_regnum (x);
7716
7717  gp_reg_p = TARGET_MIPS16 ? M16_REG_P (regno) : GP_REG_P (regno);
7718
7719  if (mips_dangerous_for_la25_p (x))
7720    {
7721      gr_regs = LEA_REGS;
7722      if (TEST_HARD_REG_BIT (reg_class_contents[(int) class], 25))
7723	return gr_regs;
7724    }
7725
7726  /* Copying from HI or LO to anywhere other than a general register
7727     requires a general register.
7728     This rule applies to both the original HI/LO pair and the new
7729     DSP accumulators.  */
7730  if (reg_class_subset_p (class, ACC_REGS))
7731    {
7732      if (TARGET_MIPS16 && in_p)
7733	{
7734	  /* We can't really copy to HI or LO at all in mips16 mode.  */
7735	  return M16_REGS;
7736	}
7737      return gp_reg_p ? NO_REGS : gr_regs;
7738    }
7739  if (ACC_REG_P (regno))
7740    {
7741      if (TARGET_MIPS16 && ! in_p)
7742	{
7743	  /* We can't really copy to HI or LO at all in mips16 mode.  */
7744	  return M16_REGS;
7745	}
7746      return class == gr_regs ? NO_REGS : gr_regs;
7747    }
7748
7749  /* We can only copy a value to a condition code register from a
7750     floating point register, and even then we require a scratch
7751     floating point register.  We can only copy a value out of a
7752     condition code register into a general register.  */
7753  if (class == ST_REGS)
7754    {
7755      if (in_p)
7756	return FP_REGS;
7757      return gp_reg_p ? NO_REGS : gr_regs;
7758    }
7759  if (ST_REG_P (regno))
7760    {
7761      if (! in_p)
7762	return FP_REGS;
7763      return class == gr_regs ? NO_REGS : gr_regs;
7764    }
7765
7766  if (class == FP_REGS)
7767    {
7768      if (MEM_P (x))
7769	{
7770	  /* In this case we can use lwc1, swc1, ldc1 or sdc1.  */
7771	  return NO_REGS;
7772	}
7773      else if (CONSTANT_P (x) && GET_MODE_CLASS (mode) == MODE_FLOAT)
7774	{
7775	  /* We can use the l.s and l.d macros to load floating-point
7776	     constants.  ??? For l.s, we could probably get better
7777	     code by returning GR_REGS here.  */
7778	  return NO_REGS;
7779	}
7780      else if (gp_reg_p || x == CONST0_RTX (mode))
7781	{
7782	  /* In this case we can use mtc1, mfc1, dmtc1 or dmfc1.  */
7783	  return NO_REGS;
7784	}
7785      else if (FP_REG_P (regno))
7786	{
7787	  /* In this case we can use mov.s or mov.d.  */
7788	  return NO_REGS;
7789	}
7790      else
7791	{
7792	  /* Otherwise, we need to reload through an integer register.  */
7793	  return gr_regs;
7794	}
7795    }
7796
7797  /* In mips16 mode, going between memory and anything but M16_REGS
7798     requires an M16_REG.  */
7799  if (TARGET_MIPS16)
7800    {
7801      if (class != M16_REGS && class != M16_NA_REGS)
7802	{
7803	  if (gp_reg_p)
7804	    return NO_REGS;
7805	  return M16_REGS;
7806	}
7807      if (! gp_reg_p)
7808	{
7809	  if (class == M16_REGS || class == M16_NA_REGS)
7810	    return NO_REGS;
7811	  return M16_REGS;
7812	}
7813    }
7814
7815  return NO_REGS;
7816}
7817
7818/* Implement CLASS_MAX_NREGS.
7819
7820   Usually all registers are word-sized.  The only supported exception
7821   is -mgp64 -msingle-float, which has 64-bit words but 32-bit float
7822   registers.  A word-based calculation is correct even in that case,
7823   since -msingle-float disallows multi-FPR values.
7824
7825   The FP status registers are an exception to this rule.  They are always
7826   4 bytes wide as they only hold condition code modes, and CCmode is always
7827   considered to be 4 bytes wide.  */
7828
7829int
7830mips_class_max_nregs (enum reg_class class ATTRIBUTE_UNUSED,
7831		      enum machine_mode mode)
7832{
7833  if (class == ST_REGS)
7834    return (GET_MODE_SIZE (mode) + 3) / 4;
7835  else
7836    return (GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD;
7837}
7838
7839static bool
7840mips_valid_pointer_mode (enum machine_mode mode)
7841{
7842  return (mode == SImode || (TARGET_64BIT && mode == DImode));
7843}
7844
7845/* Target hook for vector_mode_supported_p.  */
7846
7847static bool
7848mips_vector_mode_supported_p (enum machine_mode mode)
7849{
7850  switch (mode)
7851    {
7852    case V2SFmode:
7853      return TARGET_PAIRED_SINGLE_FLOAT;
7854
7855    case V2HImode:
7856    case V4QImode:
7857      return TARGET_DSP;
7858
7859    default:
7860      return false;
7861    }
7862}
7863
7864/* If we can access small data directly (using gp-relative relocation
7865   operators) return the small data pointer, otherwise return null.
7866
7867   For each mips16 function which refers to GP relative symbols, we
7868   use a pseudo register, initialized at the start of the function, to
7869   hold the $gp value.  */
7870
7871static rtx
7872mips16_gp_pseudo_reg (void)
7873{
7874  if (cfun->machine->mips16_gp_pseudo_rtx == NULL_RTX)
7875    {
7876      rtx unspec;
7877      rtx insn, scan;
7878
7879      cfun->machine->mips16_gp_pseudo_rtx = gen_reg_rtx (Pmode);
7880
7881      /* We want to initialize this to a value which gcc will believe
7882         is constant.  */
7883      start_sequence ();
7884      unspec = gen_rtx_UNSPEC (VOIDmode, gen_rtvec (1, const0_rtx), UNSPEC_GP);
7885      emit_move_insn (cfun->machine->mips16_gp_pseudo_rtx,
7886		      gen_rtx_CONST (Pmode, unspec));
7887      insn = get_insns ();
7888      end_sequence ();
7889
7890      push_topmost_sequence ();
7891      /* We need to emit the initialization after the FUNCTION_BEG
7892         note, so that it will be integrated.  */
7893      for (scan = get_insns (); scan != NULL_RTX; scan = NEXT_INSN (scan))
7894	if (NOTE_P (scan)
7895	    && NOTE_LINE_NUMBER (scan) == NOTE_INSN_FUNCTION_BEG)
7896	  break;
7897      if (scan == NULL_RTX)
7898	scan = get_insns ();
7899      insn = emit_insn_after (insn, scan);
7900      pop_topmost_sequence ();
7901    }
7902
7903  return cfun->machine->mips16_gp_pseudo_rtx;
7904}
7905
7906/* Write out code to move floating point arguments in or out of
7907   general registers.  Output the instructions to FILE.  FP_CODE is
7908   the code describing which arguments are present (see the comment at
7909   the definition of CUMULATIVE_ARGS in mips.h).  FROM_FP_P is nonzero if
7910   we are copying from the floating point registers.  */
7911
7912static void
7913mips16_fp_args (FILE *file, int fp_code, int from_fp_p)
7914{
7915  const char *s;
7916  int gparg, fparg;
7917  unsigned int f;
7918
7919  /* This code only works for the original 32 bit ABI and the O64 ABI.  */
7920  gcc_assert (TARGET_OLDABI);
7921
7922  if (from_fp_p)
7923    s = "mfc1";
7924  else
7925    s = "mtc1";
7926  gparg = GP_ARG_FIRST;
7927  fparg = FP_ARG_FIRST;
7928  for (f = (unsigned int) fp_code; f != 0; f >>= 2)
7929    {
7930      if ((f & 3) == 1)
7931	{
7932	  if ((fparg & 1) != 0)
7933	    ++fparg;
7934	  fprintf (file, "\t%s\t%s,%s\n", s,
7935		   reg_names[gparg], reg_names[fparg]);
7936	}
7937      else if ((f & 3) == 2)
7938	{
7939	  if (TARGET_64BIT)
7940	    fprintf (file, "\td%s\t%s,%s\n", s,
7941		     reg_names[gparg], reg_names[fparg]);
7942	  else
7943	    {
7944	      if ((fparg & 1) != 0)
7945		++fparg;
7946	      if (TARGET_BIG_ENDIAN)
7947		fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7948			 reg_names[gparg], reg_names[fparg + 1], s,
7949			 reg_names[gparg + 1], reg_names[fparg]);
7950	      else
7951		fprintf (file, "\t%s\t%s,%s\n\t%s\t%s,%s\n", s,
7952			 reg_names[gparg], reg_names[fparg], s,
7953			 reg_names[gparg + 1], reg_names[fparg + 1]);
7954	      ++gparg;
7955	      ++fparg;
7956	    }
7957	}
7958      else
7959	gcc_unreachable ();
7960
7961      ++gparg;
7962      ++fparg;
7963    }
7964}
7965
7966/* Build a mips16 function stub.  This is used for functions which
7967   take arguments in the floating point registers.  It is 32 bit code
7968   that moves the floating point args into the general registers, and
7969   then jumps to the 16 bit code.  */
7970
7971static void
7972build_mips16_function_stub (FILE *file)
7973{
7974  const char *fnname;
7975  char *secname, *stubname;
7976  tree stubid, stubdecl;
7977  int need_comma;
7978  unsigned int f;
7979
7980  fnname = XSTR (XEXP (DECL_RTL (current_function_decl), 0), 0);
7981  secname = (char *) alloca (strlen (fnname) + 20);
7982  sprintf (secname, ".mips16.fn.%s", fnname);
7983  stubname = (char *) alloca (strlen (fnname) + 20);
7984  sprintf (stubname, "__fn_stub_%s", fnname);
7985  stubid = get_identifier (stubname);
7986  stubdecl = build_decl (FUNCTION_DECL, stubid,
7987			 build_function_type (void_type_node, NULL_TREE));
7988  DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
7989
7990  fprintf (file, "\t# Stub function for %s (", current_function_name ());
7991  need_comma = 0;
7992  for (f = (unsigned int) current_function_args_info.fp_code; f != 0; f >>= 2)
7993    {
7994      fprintf (file, "%s%s",
7995	       need_comma ? ", " : "",
7996	       (f & 3) == 1 ? "float" : "double");
7997      need_comma = 1;
7998    }
7999  fprintf (file, ")\n");
8000
8001  fprintf (file, "\t.set\tnomips16\n");
8002  switch_to_section (function_section (stubdecl));
8003  ASM_OUTPUT_ALIGN (file, floor_log2 (FUNCTION_BOUNDARY / BITS_PER_UNIT));
8004
8005  /* ??? If FUNCTION_NAME_ALREADY_DECLARED is defined, then we are
8006     within a .ent, and we cannot emit another .ent.  */
8007  if (!FUNCTION_NAME_ALREADY_DECLARED)
8008    {
8009      fputs ("\t.ent\t", file);
8010      assemble_name (file, stubname);
8011      fputs ("\n", file);
8012    }
8013
8014  assemble_name (file, stubname);
8015  fputs (":\n", file);
8016
8017  /* We don't want the assembler to insert any nops here.  */
8018  fprintf (file, "\t.set\tnoreorder\n");
8019
8020  mips16_fp_args (file, current_function_args_info.fp_code, 1);
8021
8022  fprintf (asm_out_file, "\t.set\tnoat\n");
8023  fprintf (asm_out_file, "\tla\t%s,", reg_names[GP_REG_FIRST + 1]);
8024  assemble_name (file, fnname);
8025  fprintf (file, "\n");
8026  fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8027  fprintf (asm_out_file, "\t.set\tat\n");
8028
8029  /* Unfortunately, we can't fill the jump delay slot.  We can't fill
8030     with one of the mfc1 instructions, because the result is not
8031     available for one instruction, so if the very first instruction
8032     in the function refers to the register, it will see the wrong
8033     value.  */
8034  fprintf (file, "\tnop\n");
8035
8036  fprintf (file, "\t.set\treorder\n");
8037
8038  if (!FUNCTION_NAME_ALREADY_DECLARED)
8039    {
8040      fputs ("\t.end\t", file);
8041      assemble_name (file, stubname);
8042      fputs ("\n", file);
8043    }
8044
8045  fprintf (file, "\t.set\tmips16\n");
8046
8047  switch_to_section (function_section (current_function_decl));
8048}
8049
8050/* We keep a list of functions for which we have already built stubs
8051   in build_mips16_call_stub.  */
8052
8053struct mips16_stub
8054{
8055  struct mips16_stub *next;
8056  char *name;
8057  int fpret;
8058};
8059
8060static struct mips16_stub *mips16_stubs;
8061
8062/* Build a call stub for a mips16 call.  A stub is needed if we are
8063   passing any floating point values which should go into the floating
8064   point registers.  If we are, and the call turns out to be to a 32
8065   bit function, the stub will be used to move the values into the
8066   floating point registers before calling the 32 bit function.  The
8067   linker will magically adjust the function call to either the 16 bit
8068   function or the 32 bit stub, depending upon where the function call
8069   is actually defined.
8070
8071   Similarly, we need a stub if the return value might come back in a
8072   floating point register.
8073
8074   RETVAL is the location of the return value, or null if this is
8075   a call rather than a call_value.  FN is the address of the
8076   function and ARG_SIZE is the size of the arguments.  FP_CODE
8077   is the code built by function_arg.  This function returns a nonzero
8078   value if it builds the call instruction itself.  */
8079
8080int
8081build_mips16_call_stub (rtx retval, rtx fn, rtx arg_size, int fp_code)
8082{
8083  int fpret;
8084  const char *fnname;
8085  char *secname, *stubname;
8086  struct mips16_stub *l;
8087  tree stubid, stubdecl;
8088  int need_comma;
8089  unsigned int f;
8090
8091  /* We don't need to do anything if we aren't in mips16 mode, or if
8092     we were invoked with the -msoft-float option.  */
8093  if (! TARGET_MIPS16 || ! mips16_hard_float)
8094    return 0;
8095
8096  /* Figure out whether the value might come back in a floating point
8097     register.  */
8098  fpret = (retval != 0
8099	   && GET_MODE_CLASS (GET_MODE (retval)) == MODE_FLOAT
8100	   && GET_MODE_SIZE (GET_MODE (retval)) <= UNITS_PER_FPVALUE);
8101
8102  /* We don't need to do anything if there were no floating point
8103     arguments and the value will not be returned in a floating point
8104     register.  */
8105  if (fp_code == 0 && ! fpret)
8106    return 0;
8107
8108  /* We don't need to do anything if this is a call to a special
8109     mips16 support function.  */
8110  if (GET_CODE (fn) == SYMBOL_REF
8111      && strncmp (XSTR (fn, 0), "__mips16_", 9) == 0)
8112    return 0;
8113
8114  /* This code will only work for o32 and o64 abis.  The other ABI's
8115     require more sophisticated support.  */
8116  gcc_assert (TARGET_OLDABI);
8117
8118  /* We can only handle SFmode and DFmode floating point return
8119     values.  */
8120  if (fpret)
8121    gcc_assert (GET_MODE (retval) == SFmode || GET_MODE (retval) == DFmode);
8122
8123  /* If we're calling via a function pointer, then we must always call
8124     via a stub.  There are magic stubs provided in libgcc.a for each
8125     of the required cases.  Each of them expects the function address
8126     to arrive in register $2.  */
8127
8128  if (GET_CODE (fn) != SYMBOL_REF)
8129    {
8130      char buf[30];
8131      tree id;
8132      rtx stub_fn, insn;
8133
8134      /* ??? If this code is modified to support other ABI's, we need
8135         to handle PARALLEL return values here.  */
8136
8137      sprintf (buf, "__mips16_call_stub_%s%d",
8138	       (fpret
8139		? (GET_MODE (retval) == SFmode ? "sf_" : "df_")
8140		: ""),
8141	       fp_code);
8142      id = get_identifier (buf);
8143      stub_fn = gen_rtx_SYMBOL_REF (Pmode, IDENTIFIER_POINTER (id));
8144
8145      emit_move_insn (gen_rtx_REG (Pmode, 2), fn);
8146
8147      if (retval == NULL_RTX)
8148	insn = gen_call_internal (stub_fn, arg_size);
8149      else
8150	insn = gen_call_value_internal (retval, stub_fn, arg_size);
8151      insn = emit_call_insn (insn);
8152
8153      /* Put the register usage information on the CALL.  */
8154      CALL_INSN_FUNCTION_USAGE (insn) =
8155	gen_rtx_EXPR_LIST (VOIDmode,
8156			   gen_rtx_USE (VOIDmode, gen_rtx_REG (Pmode, 2)),
8157			   CALL_INSN_FUNCTION_USAGE (insn));
8158
8159      /* If we are handling a floating point return value, we need to
8160         save $18 in the function prologue.  Putting a note on the
8161         call will mean that regs_ever_live[$18] will be true if the
8162         call is not eliminated, and we can check that in the prologue
8163         code.  */
8164      if (fpret)
8165	CALL_INSN_FUNCTION_USAGE (insn) =
8166	  gen_rtx_EXPR_LIST (VOIDmode,
8167			     gen_rtx_USE (VOIDmode,
8168					  gen_rtx_REG (word_mode, 18)),
8169			     CALL_INSN_FUNCTION_USAGE (insn));
8170
8171      /* Return 1 to tell the caller that we've generated the call
8172         insn.  */
8173      return 1;
8174    }
8175
8176  /* We know the function we are going to call.  If we have already
8177     built a stub, we don't need to do anything further.  */
8178
8179  fnname = XSTR (fn, 0);
8180  for (l = mips16_stubs; l != NULL; l = l->next)
8181    if (strcmp (l->name, fnname) == 0)
8182      break;
8183
8184  if (l == NULL)
8185    {
8186      /* Build a special purpose stub.  When the linker sees a
8187	 function call in mips16 code, it will check where the target
8188	 is defined.  If the target is a 32 bit call, the linker will
8189	 search for the section defined here.  It can tell which
8190	 symbol this section is associated with by looking at the
8191	 relocation information (the name is unreliable, since this
8192	 might be a static function).  If such a section is found, the
8193	 linker will redirect the call to the start of the magic
8194	 section.
8195
8196	 If the function does not return a floating point value, the
8197	 special stub section is named
8198	     .mips16.call.FNNAME
8199
8200	 If the function does return a floating point value, the stub
8201	 section is named
8202	     .mips16.call.fp.FNNAME
8203	 */
8204
8205      secname = (char *) alloca (strlen (fnname) + 40);
8206      sprintf (secname, ".mips16.call.%s%s",
8207	       fpret ? "fp." : "",
8208	       fnname);
8209      stubname = (char *) alloca (strlen (fnname) + 20);
8210      sprintf (stubname, "__call_stub_%s%s",
8211	       fpret ? "fp_" : "",
8212	       fnname);
8213      stubid = get_identifier (stubname);
8214      stubdecl = build_decl (FUNCTION_DECL, stubid,
8215			     build_function_type (void_type_node, NULL_TREE));
8216      DECL_SECTION_NAME (stubdecl) = build_string (strlen (secname), secname);
8217
8218      fprintf (asm_out_file, "\t# Stub function to call %s%s (",
8219	       (fpret
8220		? (GET_MODE (retval) == SFmode ? "float " : "double ")
8221		: ""),
8222	       fnname);
8223      need_comma = 0;
8224      for (f = (unsigned int) fp_code; f != 0; f >>= 2)
8225	{
8226	  fprintf (asm_out_file, "%s%s",
8227		   need_comma ? ", " : "",
8228		   (f & 3) == 1 ? "float" : "double");
8229	  need_comma = 1;
8230	}
8231      fprintf (asm_out_file, ")\n");
8232
8233      fprintf (asm_out_file, "\t.set\tnomips16\n");
8234      assemble_start_function (stubdecl, stubname);
8235
8236      if (!FUNCTION_NAME_ALREADY_DECLARED)
8237	{
8238	  fputs ("\t.ent\t", asm_out_file);
8239	  assemble_name (asm_out_file, stubname);
8240	  fputs ("\n", asm_out_file);
8241
8242	  assemble_name (asm_out_file, stubname);
8243	  fputs (":\n", asm_out_file);
8244	}
8245
8246      /* We build the stub code by hand.  That's the only way we can
8247	 do it, since we can't generate 32 bit code during a 16 bit
8248	 compilation.  */
8249
8250      /* We don't want the assembler to insert any nops here.  */
8251      fprintf (asm_out_file, "\t.set\tnoreorder\n");
8252
8253      mips16_fp_args (asm_out_file, fp_code, 0);
8254
8255      if (! fpret)
8256	{
8257	  fprintf (asm_out_file, "\t.set\tnoat\n");
8258	  fprintf (asm_out_file, "\tla\t%s,%s\n", reg_names[GP_REG_FIRST + 1],
8259		   fnname);
8260	  fprintf (asm_out_file, "\tjr\t%s\n", reg_names[GP_REG_FIRST + 1]);
8261	  fprintf (asm_out_file, "\t.set\tat\n");
8262	  /* Unfortunately, we can't fill the jump delay slot.  We
8263	     can't fill with one of the mtc1 instructions, because the
8264	     result is not available for one instruction, so if the
8265	     very first instruction in the function refers to the
8266	     register, it will see the wrong value.  */
8267	  fprintf (asm_out_file, "\tnop\n");
8268	}
8269      else
8270	{
8271	  fprintf (asm_out_file, "\tmove\t%s,%s\n",
8272		   reg_names[GP_REG_FIRST + 18], reg_names[GP_REG_FIRST + 31]);
8273	  fprintf (asm_out_file, "\tjal\t%s\n", fnname);
8274	  /* As above, we can't fill the delay slot.  */
8275	  fprintf (asm_out_file, "\tnop\n");
8276	  if (GET_MODE (retval) == SFmode)
8277	    fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8278		     reg_names[GP_REG_FIRST + 2], reg_names[FP_REG_FIRST + 0]);
8279	  else
8280	    {
8281	      if (TARGET_BIG_ENDIAN)
8282		{
8283		  fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8284			   reg_names[GP_REG_FIRST + 2],
8285			   reg_names[FP_REG_FIRST + 1]);
8286		  fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8287			   reg_names[GP_REG_FIRST + 3],
8288			   reg_names[FP_REG_FIRST + 0]);
8289		}
8290	      else
8291		{
8292		  fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8293			   reg_names[GP_REG_FIRST + 2],
8294			   reg_names[FP_REG_FIRST + 0]);
8295		  fprintf (asm_out_file, "\tmfc1\t%s,%s\n",
8296			   reg_names[GP_REG_FIRST + 3],
8297			   reg_names[FP_REG_FIRST + 1]);
8298		}
8299	    }
8300	  fprintf (asm_out_file, "\tj\t%s\n", reg_names[GP_REG_FIRST + 18]);
8301	  /* As above, we can't fill the delay slot.  */
8302	  fprintf (asm_out_file, "\tnop\n");
8303	}
8304
8305      fprintf (asm_out_file, "\t.set\treorder\n");
8306
8307#ifdef ASM_DECLARE_FUNCTION_SIZE
8308      ASM_DECLARE_FUNCTION_SIZE (asm_out_file, stubname, stubdecl);
8309#endif
8310
8311      if (!FUNCTION_NAME_ALREADY_DECLARED)
8312	{
8313	  fputs ("\t.end\t", asm_out_file);
8314	  assemble_name (asm_out_file, stubname);
8315	  fputs ("\n", asm_out_file);
8316	}
8317
8318      fprintf (asm_out_file, "\t.set\tmips16\n");
8319
8320      /* Record this stub.  */
8321      l = (struct mips16_stub *) xmalloc (sizeof *l);
8322      l->name = xstrdup (fnname);
8323      l->fpret = fpret;
8324      l->next = mips16_stubs;
8325      mips16_stubs = l;
8326    }
8327
8328  /* If we expect a floating point return value, but we've built a
8329     stub which does not expect one, then we're in trouble.  We can't
8330     use the existing stub, because it won't handle the floating point
8331     value.  We can't build a new stub, because the linker won't know
8332     which stub to use for the various calls in this object file.
8333     Fortunately, this case is illegal, since it means that a function
8334     was declared in two different ways in a single compilation.  */
8335  if (fpret && ! l->fpret)
8336    error ("cannot handle inconsistent calls to %qs", fnname);
8337
8338  /* If we are calling a stub which handles a floating point return
8339     value, we need to arrange to save $18 in the prologue.  We do
8340     this by marking the function call as using the register.  The
8341     prologue will later see that it is used, and emit code to save
8342     it.  */
8343
8344  if (l->fpret)
8345    {
8346      rtx insn;
8347
8348      if (retval == NULL_RTX)
8349	insn = gen_call_internal (fn, arg_size);
8350      else
8351	insn = gen_call_value_internal (retval, fn, arg_size);
8352      insn = emit_call_insn (insn);
8353
8354      CALL_INSN_FUNCTION_USAGE (insn) =
8355	gen_rtx_EXPR_LIST (VOIDmode,
8356			   gen_rtx_USE (VOIDmode, gen_rtx_REG (word_mode, 18)),
8357			   CALL_INSN_FUNCTION_USAGE (insn));
8358
8359      /* Return 1 to tell the caller that we've generated the call
8360         insn.  */
8361      return 1;
8362    }
8363
8364  /* Return 0 to let the caller generate the call insn.  */
8365  return 0;
8366}
8367
8368/* An entry in the mips16 constant pool.  VALUE is the pool constant,
8369   MODE is its mode, and LABEL is the CODE_LABEL associated with it.  */
8370
8371struct mips16_constant {
8372  struct mips16_constant *next;
8373  rtx value;
8374  rtx label;
8375  enum machine_mode mode;
8376};
8377
8378/* Information about an incomplete mips16 constant pool.  FIRST is the
8379   first constant, HIGHEST_ADDRESS is the highest address that the first
8380   byte of the pool can have, and INSN_ADDRESS is the current instruction
8381   address.  */
8382
8383struct mips16_constant_pool {
8384  struct mips16_constant *first;
8385  int highest_address;
8386  int insn_address;
8387};
8388
8389/* Add constant VALUE to POOL and return its label.  MODE is the
8390   value's mode (used for CONST_INTs, etc.).  */
8391
8392static rtx
8393add_constant (struct mips16_constant_pool *pool,
8394	      rtx value, enum machine_mode mode)
8395{
8396  struct mips16_constant **p, *c;
8397  bool first_of_size_p;
8398
8399  /* See whether the constant is already in the pool.  If so, return the
8400     existing label, otherwise leave P pointing to the place where the
8401     constant should be added.
8402
8403     Keep the pool sorted in increasing order of mode size so that we can
8404     reduce the number of alignments needed.  */
8405  first_of_size_p = true;
8406  for (p = &pool->first; *p != 0; p = &(*p)->next)
8407    {
8408      if (mode == (*p)->mode && rtx_equal_p (value, (*p)->value))
8409	return (*p)->label;
8410      if (GET_MODE_SIZE (mode) < GET_MODE_SIZE ((*p)->mode))
8411	break;
8412      if (GET_MODE_SIZE (mode) == GET_MODE_SIZE ((*p)->mode))
8413	first_of_size_p = false;
8414    }
8415
8416  /* In the worst case, the constant needed by the earliest instruction
8417     will end up at the end of the pool.  The entire pool must then be
8418     accessible from that instruction.
8419
8420     When adding the first constant, set the pool's highest address to
8421     the address of the first out-of-range byte.  Adjust this address
8422     downwards each time a new constant is added.  */
8423  if (pool->first == 0)
8424    /* For pc-relative lw, addiu and daddiu instructions, the base PC value
8425       is the address of the instruction with the lowest two bits clear.
8426       The base PC value for ld has the lowest three bits clear.  Assume
8427       the worst case here.  */
8428    pool->highest_address = pool->insn_address - (UNITS_PER_WORD - 2) + 0x8000;
8429  pool->highest_address -= GET_MODE_SIZE (mode);
8430  if (first_of_size_p)
8431    /* Take into account the worst possible padding due to alignment.  */
8432    pool->highest_address -= GET_MODE_SIZE (mode) - 1;
8433
8434  /* Create a new entry.  */
8435  c = (struct mips16_constant *) xmalloc (sizeof *c);
8436  c->value = value;
8437  c->mode = mode;
8438  c->label = gen_label_rtx ();
8439  c->next = *p;
8440  *p = c;
8441
8442  return c->label;
8443}
8444
8445/* Output constant VALUE after instruction INSN and return the last
8446   instruction emitted.  MODE is the mode of the constant.  */
8447
8448static rtx
8449dump_constants_1 (enum machine_mode mode, rtx value, rtx insn)
8450{
8451  switch (GET_MODE_CLASS (mode))
8452    {
8453    case MODE_INT:
8454      {
8455	rtx size = GEN_INT (GET_MODE_SIZE (mode));
8456	return emit_insn_after (gen_consttable_int (value, size), insn);
8457      }
8458
8459    case MODE_FLOAT:
8460      return emit_insn_after (gen_consttable_float (value), insn);
8461
8462    case MODE_VECTOR_FLOAT:
8463    case MODE_VECTOR_INT:
8464      {
8465	int i;
8466	for (i = 0; i < CONST_VECTOR_NUNITS (value); i++)
8467	  insn = dump_constants_1 (GET_MODE_INNER (mode),
8468				   CONST_VECTOR_ELT (value, i), insn);
8469	return insn;
8470      }
8471
8472    default:
8473      gcc_unreachable ();
8474    }
8475}
8476
8477
8478/* Dump out the constants in CONSTANTS after INSN.  */
8479
8480static void
8481dump_constants (struct mips16_constant *constants, rtx insn)
8482{
8483  struct mips16_constant *c, *next;
8484  int align;
8485
8486  align = 0;
8487  for (c = constants; c != NULL; c = next)
8488    {
8489      /* If necessary, increase the alignment of PC.  */
8490      if (align < GET_MODE_SIZE (c->mode))
8491	{
8492	  int align_log = floor_log2 (GET_MODE_SIZE (c->mode));
8493	  insn = emit_insn_after (gen_align (GEN_INT (align_log)), insn);
8494	}
8495      align = GET_MODE_SIZE (c->mode);
8496
8497      insn = emit_label_after (c->label, insn);
8498      insn = dump_constants_1 (c->mode, c->value, insn);
8499
8500      next = c->next;
8501      free (c);
8502    }
8503
8504  emit_barrier_after (insn);
8505}
8506
8507/* Return the length of instruction INSN.  */
8508
8509static int
8510mips16_insn_length (rtx insn)
8511{
8512  if (JUMP_P (insn))
8513    {
8514      rtx body = PATTERN (insn);
8515      if (GET_CODE (body) == ADDR_VEC)
8516	return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 0);
8517      if (GET_CODE (body) == ADDR_DIFF_VEC)
8518	return GET_MODE_SIZE (GET_MODE (body)) * XVECLEN (body, 1);
8519    }
8520  return get_attr_length (insn);
8521}
8522
8523/* Rewrite *X so that constant pool references refer to the constant's
8524   label instead.  DATA points to the constant pool structure.  */
8525
8526static int
8527mips16_rewrite_pool_refs (rtx *x, void *data)
8528{
8529  struct mips16_constant_pool *pool = data;
8530  if (GET_CODE (*x) == SYMBOL_REF && CONSTANT_POOL_ADDRESS_P (*x))
8531    *x = gen_rtx_LABEL_REF (Pmode, add_constant (pool,
8532						 get_pool_constant (*x),
8533						 get_pool_mode (*x)));
8534  return 0;
8535}
8536
8537/* Build MIPS16 constant pools.  */
8538
8539static void
8540mips16_lay_out_constants (void)
8541{
8542  struct mips16_constant_pool pool;
8543  rtx insn, barrier;
8544
8545  barrier = 0;
8546  memset (&pool, 0, sizeof (pool));
8547  for (insn = get_insns (); insn; insn = NEXT_INSN (insn))
8548    {
8549      /* Rewrite constant pool references in INSN.  */
8550      if (INSN_P (insn))
8551	for_each_rtx (&PATTERN (insn), mips16_rewrite_pool_refs, &pool);
8552
8553      pool.insn_address += mips16_insn_length (insn);
8554
8555      if (pool.first != NULL)
8556	{
8557	  /* If there are no natural barriers between the first user of
8558	     the pool and the highest acceptable address, we'll need to
8559	     create a new instruction to jump around the constant pool.
8560	     In the worst case, this instruction will be 4 bytes long.
8561
8562	     If it's too late to do this transformation after INSN,
8563	     do it immediately before INSN.  */
8564	  if (barrier == 0 && pool.insn_address + 4 > pool.highest_address)
8565	    {
8566	      rtx label, jump;
8567
8568	      label = gen_label_rtx ();
8569
8570	      jump = emit_jump_insn_before (gen_jump (label), insn);
8571	      JUMP_LABEL (jump) = label;
8572	      LABEL_NUSES (label) = 1;
8573	      barrier = emit_barrier_after (jump);
8574
8575	      emit_label_after (label, barrier);
8576	      pool.insn_address += 4;
8577	    }
8578
8579	  /* See whether the constant pool is now out of range of the first
8580	     user.  If so, output the constants after the previous barrier.
8581	     Note that any instructions between BARRIER and INSN (inclusive)
8582	     will use negative offsets to refer to the pool.  */
8583	  if (pool.insn_address > pool.highest_address)
8584	    {
8585	      dump_constants (pool.first, barrier);
8586	      pool.first = NULL;
8587	      barrier = 0;
8588	    }
8589	  else if (BARRIER_P (insn))
8590	    barrier = insn;
8591	}
8592    }
8593  dump_constants (pool.first, get_last_insn ());
8594}
8595
8596/* A temporary variable used by for_each_rtx callbacks, etc.  */
8597static rtx mips_sim_insn;
8598
8599/* A structure representing the state of the processor pipeline.
8600   Used by the mips_sim_* family of functions.  */
8601struct mips_sim {
8602  /* The maximum number of instructions that can be issued in a cycle.
8603     (Caches mips_issue_rate.)  */
8604  unsigned int issue_rate;
8605
8606  /* The current simulation time.  */
8607  unsigned int time;
8608
8609  /* How many more instructions can be issued in the current cycle.  */
8610  unsigned int insns_left;
8611
8612  /* LAST_SET[X].INSN is the last instruction to set register X.
8613     LAST_SET[X].TIME is the time at which that instruction was issued.
8614     INSN is null if no instruction has yet set register X.  */
8615  struct {
8616    rtx insn;
8617    unsigned int time;
8618  } last_set[FIRST_PSEUDO_REGISTER];
8619
8620  /* The pipeline's current DFA state.  */
8621  state_t dfa_state;
8622};
8623
8624/* Reset STATE to the initial simulation state.  */
8625
8626static void
8627mips_sim_reset (struct mips_sim *state)
8628{
8629  state->time = 0;
8630  state->insns_left = state->issue_rate;
8631  memset (&state->last_set, 0, sizeof (state->last_set));
8632  state_reset (state->dfa_state);
8633}
8634
8635/* Initialize STATE before its first use.  DFA_STATE points to an
8636   allocated but uninitialized DFA state.  */
8637
8638static void
8639mips_sim_init (struct mips_sim *state, state_t dfa_state)
8640{
8641  state->issue_rate = mips_issue_rate ();
8642  state->dfa_state = dfa_state;
8643  mips_sim_reset (state);
8644}
8645
8646/* Advance STATE by one clock cycle.  */
8647
8648static void
8649mips_sim_next_cycle (struct mips_sim *state)
8650{
8651  state->time++;
8652  state->insns_left = state->issue_rate;
8653  state_transition (state->dfa_state, 0);
8654}
8655
8656/* Advance simulation state STATE until instruction INSN can read
8657   register REG.  */
8658
8659static void
8660mips_sim_wait_reg (struct mips_sim *state, rtx insn, rtx reg)
8661{
8662  unsigned int i;
8663
8664  for (i = 0; i < HARD_REGNO_NREGS (REGNO (reg), GET_MODE (reg)); i++)
8665    if (state->last_set[REGNO (reg) + i].insn != 0)
8666      {
8667	unsigned int t;
8668
8669	t = state->last_set[REGNO (reg) + i].time;
8670	t += insn_latency (state->last_set[REGNO (reg) + i].insn, insn);
8671	while (state->time < t)
8672	  mips_sim_next_cycle (state);
8673    }
8674}
8675
8676/* A for_each_rtx callback.  If *X is a register, advance simulation state
8677   DATA until mips_sim_insn can read the register's value.  */
8678
8679static int
8680mips_sim_wait_regs_2 (rtx *x, void *data)
8681{
8682  if (REG_P (*x))
8683    mips_sim_wait_reg (data, mips_sim_insn, *x);
8684  return 0;
8685}
8686
8687/* Call mips_sim_wait_regs_2 (R, DATA) for each register R mentioned in *X.  */
8688
8689static void
8690mips_sim_wait_regs_1 (rtx *x, void *data)
8691{
8692  for_each_rtx (x, mips_sim_wait_regs_2, data);
8693}
8694
8695/* Advance simulation state STATE until all of INSN's register
8696   dependencies are satisfied.  */
8697
8698static void
8699mips_sim_wait_regs (struct mips_sim *state, rtx insn)
8700{
8701  mips_sim_insn = insn;
8702  note_uses (&PATTERN (insn), mips_sim_wait_regs_1, state);
8703}
8704
8705/* Advance simulation state STATE until the units required by
8706   instruction INSN are available.  */
8707
8708static void
8709mips_sim_wait_units (struct mips_sim *state, rtx insn)
8710{
8711  state_t tmp_state;
8712
8713  tmp_state = alloca (state_size ());
8714  while (state->insns_left == 0
8715	 || (memcpy (tmp_state, state->dfa_state, state_size ()),
8716	     state_transition (tmp_state, insn) >= 0))
8717    mips_sim_next_cycle (state);
8718}
8719
8720/* Advance simulation state STATE until INSN is ready to issue.  */
8721
8722static void
8723mips_sim_wait_insn (struct mips_sim *state, rtx insn)
8724{
8725  mips_sim_wait_regs (state, insn);
8726  mips_sim_wait_units (state, insn);
8727}
8728
8729/* mips_sim_insn has just set X.  Update the LAST_SET array
8730   in simulation state DATA.  */
8731
8732static void
8733mips_sim_record_set (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
8734{
8735  struct mips_sim *state;
8736  unsigned int i;
8737
8738  state = data;
8739  if (REG_P (x))
8740    for (i = 0; i < HARD_REGNO_NREGS (REGNO (x), GET_MODE (x)); i++)
8741      {
8742	state->last_set[REGNO (x) + i].insn = mips_sim_insn;
8743	state->last_set[REGNO (x) + i].time = state->time;
8744      }
8745}
8746
8747/* Issue instruction INSN in scheduler state STATE.  Assume that INSN
8748   can issue immediately (i.e., that mips_sim_wait_insn has already
8749   been called).  */
8750
8751static void
8752mips_sim_issue_insn (struct mips_sim *state, rtx insn)
8753{
8754  state_transition (state->dfa_state, insn);
8755  state->insns_left--;
8756
8757  mips_sim_insn = insn;
8758  note_stores (PATTERN (insn), mips_sim_record_set, state);
8759}
8760
8761/* Simulate issuing a NOP in state STATE.  */
8762
8763static void
8764mips_sim_issue_nop (struct mips_sim *state)
8765{
8766  if (state->insns_left == 0)
8767    mips_sim_next_cycle (state);
8768  state->insns_left--;
8769}
8770
8771/* Update simulation state STATE so that it's ready to accept the instruction
8772   after INSN.  INSN should be part of the main rtl chain, not a member of a
8773   SEQUENCE.  */
8774
8775static void
8776mips_sim_finish_insn (struct mips_sim *state, rtx insn)
8777{
8778  /* If INSN is a jump with an implicit delay slot, simulate a nop.  */
8779  if (JUMP_P (insn))
8780    mips_sim_issue_nop (state);
8781
8782  switch (GET_CODE (SEQ_BEGIN (insn)))
8783    {
8784    case CODE_LABEL:
8785    case CALL_INSN:
8786      /* We can't predict the processor state after a call or label.  */
8787      mips_sim_reset (state);
8788      break;
8789
8790    case JUMP_INSN:
8791      /* The delay slots of branch likely instructions are only executed
8792	 when the branch is taken.  Therefore, if the caller has simulated
8793	 the delay slot instruction, STATE does not really reflect the state
8794	 of the pipeline for the instruction after the delay slot.  Also,
8795	 branch likely instructions tend to incur a penalty when not taken,
8796	 so there will probably be an extra delay between the branch and
8797	 the instruction after the delay slot.  */
8798      if (INSN_ANNULLED_BRANCH_P (SEQ_BEGIN (insn)))
8799	mips_sim_reset (state);
8800      break;
8801
8802    default:
8803      break;
8804    }
8805}
8806
8807/* The VR4130 pipeline issues aligned pairs of instructions together,
8808   but it stalls the second instruction if it depends on the first.
8809   In order to cut down the amount of logic required, this dependence
8810   check is not based on a full instruction decode.  Instead, any non-SPECIAL
8811   instruction is assumed to modify the register specified by bits 20-16
8812   (which is usually the "rt" field).
8813
8814   In beq, beql, bne and bnel instructions, the rt field is actually an
8815   input, so we can end up with a false dependence between the branch
8816   and its delay slot.  If this situation occurs in instruction INSN,
8817   try to avoid it by swapping rs and rt.  */
8818
8819static void
8820vr4130_avoid_branch_rt_conflict (rtx insn)
8821{
8822  rtx first, second;
8823
8824  first = SEQ_BEGIN (insn);
8825  second = SEQ_END (insn);
8826  if (JUMP_P (first)
8827      && NONJUMP_INSN_P (second)
8828      && GET_CODE (PATTERN (first)) == SET
8829      && GET_CODE (SET_DEST (PATTERN (first))) == PC
8830      && GET_CODE (SET_SRC (PATTERN (first))) == IF_THEN_ELSE)
8831    {
8832      /* Check for the right kind of condition.  */
8833      rtx cond = XEXP (SET_SRC (PATTERN (first)), 0);
8834      if ((GET_CODE (cond) == EQ || GET_CODE (cond) == NE)
8835	  && REG_P (XEXP (cond, 0))
8836	  && REG_P (XEXP (cond, 1))
8837	  && reg_referenced_p (XEXP (cond, 1), PATTERN (second))
8838	  && !reg_referenced_p (XEXP (cond, 0), PATTERN (second)))
8839	{
8840	  /* SECOND mentions the rt register but not the rs register.  */
8841	  rtx tmp = XEXP (cond, 0);
8842	  XEXP (cond, 0) = XEXP (cond, 1);
8843	  XEXP (cond, 1) = tmp;
8844	}
8845    }
8846}
8847
8848/* Implement -mvr4130-align.  Go through each basic block and simulate the
8849   processor pipeline.  If we find that a pair of instructions could execute
8850   in parallel, and the first of those instruction is not 8-byte aligned,
8851   insert a nop to make it aligned.  */
8852
8853static void
8854vr4130_align_insns (void)
8855{
8856  struct mips_sim state;
8857  rtx insn, subinsn, last, last2, next;
8858  bool aligned_p;
8859
8860  dfa_start ();
8861
8862  /* LAST is the last instruction before INSN to have a nonzero length.
8863     LAST2 is the last such instruction before LAST.  */
8864  last = 0;
8865  last2 = 0;
8866
8867  /* ALIGNED_P is true if INSN is known to be at an aligned address.  */
8868  aligned_p = true;
8869
8870  mips_sim_init (&state, alloca (state_size ()));
8871  for (insn = get_insns (); insn != 0; insn = next)
8872    {
8873      unsigned int length;
8874
8875      next = NEXT_INSN (insn);
8876
8877      /* See the comment above vr4130_avoid_branch_rt_conflict for details.
8878	 This isn't really related to the alignment pass, but we do it on
8879	 the fly to avoid a separate instruction walk.  */
8880      vr4130_avoid_branch_rt_conflict (insn);
8881
8882      if (USEFUL_INSN_P (insn))
8883	FOR_EACH_SUBINSN (subinsn, insn)
8884	  {
8885	    mips_sim_wait_insn (&state, subinsn);
8886
8887	    /* If we want this instruction to issue in parallel with the
8888	       previous one, make sure that the previous instruction is
8889	       aligned.  There are several reasons why this isn't worthwhile
8890	       when the second instruction is a call:
8891
8892	          - Calls are less likely to be performance critical,
8893		  - There's a good chance that the delay slot can execute
8894		    in parallel with the call.
8895	          - The return address would then be unaligned.
8896
8897	       In general, if we're going to insert a nop between instructions
8898	       X and Y, it's better to insert it immediately after X.  That
8899	       way, if the nop makes Y aligned, it will also align any labels
8900	       between X and Y.  */
8901	    if (state.insns_left != state.issue_rate
8902		&& !CALL_P (subinsn))
8903	      {
8904		if (subinsn == SEQ_BEGIN (insn) && aligned_p)
8905		  {
8906		    /* SUBINSN is the first instruction in INSN and INSN is
8907		       aligned.  We want to align the previous instruction
8908		       instead, so insert a nop between LAST2 and LAST.
8909
8910		       Note that LAST could be either a single instruction
8911		       or a branch with a delay slot.  In the latter case,
8912		       LAST, like INSN, is already aligned, but the delay
8913		       slot must have some extra delay that stops it from
8914		       issuing at the same time as the branch.  We therefore
8915		       insert a nop before the branch in order to align its
8916		       delay slot.  */
8917		    emit_insn_after (gen_nop (), last2);
8918		    aligned_p = false;
8919		  }
8920		else if (subinsn != SEQ_BEGIN (insn) && !aligned_p)
8921		  {
8922		    /* SUBINSN is the delay slot of INSN, but INSN is
8923		       currently unaligned.  Insert a nop between
8924		       LAST and INSN to align it.  */
8925		    emit_insn_after (gen_nop (), last);
8926		    aligned_p = true;
8927		  }
8928	      }
8929	    mips_sim_issue_insn (&state, subinsn);
8930	  }
8931      mips_sim_finish_insn (&state, insn);
8932
8933      /* Update LAST, LAST2 and ALIGNED_P for the next instruction.  */
8934      length = get_attr_length (insn);
8935      if (length > 0)
8936	{
8937	  /* If the instruction is an asm statement or multi-instruction
8938	     mips.md patern, the length is only an estimate.  Insert an
8939	     8 byte alignment after it so that the following instructions
8940	     can be handled correctly.  */
8941	  if (NONJUMP_INSN_P (SEQ_BEGIN (insn))
8942	      && (recog_memoized (insn) < 0 || length >= 8))
8943	    {
8944	      next = emit_insn_after (gen_align (GEN_INT (3)), insn);
8945	      next = NEXT_INSN (next);
8946	      mips_sim_next_cycle (&state);
8947	      aligned_p = true;
8948	    }
8949	  else if (length & 4)
8950	    aligned_p = !aligned_p;
8951	  last2 = last;
8952	  last = insn;
8953	}
8954
8955      /* See whether INSN is an aligned label.  */
8956      if (LABEL_P (insn) && label_to_alignment (insn) >= 3)
8957	aligned_p = true;
8958    }
8959  dfa_finish ();
8960}
8961
8962/* Subroutine of mips_reorg.  If there is a hazard between INSN
8963   and a previous instruction, avoid it by inserting nops after
8964   instruction AFTER.
8965
8966   *DELAYED_REG and *HILO_DELAY describe the hazards that apply at
8967   this point.  If *DELAYED_REG is non-null, INSN must wait a cycle
8968   before using the value of that register.  *HILO_DELAY counts the
8969   number of instructions since the last hilo hazard (that is,
8970   the number of instructions since the last mflo or mfhi).
8971
8972   After inserting nops for INSN, update *DELAYED_REG and *HILO_DELAY
8973   for the next instruction.
8974
8975   LO_REG is an rtx for the LO register, used in dependence checking.  */
8976
8977static void
8978mips_avoid_hazard (rtx after, rtx insn, int *hilo_delay,
8979		   rtx *delayed_reg, rtx lo_reg)
8980{
8981  rtx pattern, set;
8982  int nops, ninsns;
8983
8984  if (!INSN_P (insn))
8985    return;
8986
8987  pattern = PATTERN (insn);
8988
8989  /* Do not put the whole function in .set noreorder if it contains
8990     an asm statement.  We don't know whether there will be hazards
8991     between the asm statement and the gcc-generated code.  */
8992  if (GET_CODE (pattern) == ASM_INPUT || asm_noperands (pattern) >= 0)
8993    cfun->machine->all_noreorder_p = false;
8994
8995  /* Ignore zero-length instructions (barriers and the like).  */
8996  ninsns = get_attr_length (insn) / 4;
8997  if (ninsns == 0)
8998    return;
8999
9000  /* Work out how many nops are needed.  Note that we only care about
9001     registers that are explicitly mentioned in the instruction's pattern.
9002     It doesn't matter that calls use the argument registers or that they
9003     clobber hi and lo.  */
9004  if (*hilo_delay < 2 && reg_set_p (lo_reg, pattern))
9005    nops = 2 - *hilo_delay;
9006  else if (*delayed_reg != 0 && reg_referenced_p (*delayed_reg, pattern))
9007    nops = 1;
9008  else
9009    nops = 0;
9010
9011  /* Insert the nops between this instruction and the previous one.
9012     Each new nop takes us further from the last hilo hazard.  */
9013  *hilo_delay += nops;
9014  while (nops-- > 0)
9015    emit_insn_after (gen_hazard_nop (), after);
9016
9017  /* Set up the state for the next instruction.  */
9018  *hilo_delay += ninsns;
9019  *delayed_reg = 0;
9020  if (INSN_CODE (insn) >= 0)
9021    switch (get_attr_hazard (insn))
9022      {
9023      case HAZARD_NONE:
9024	break;
9025
9026      case HAZARD_HILO:
9027	*hilo_delay = 0;
9028	break;
9029
9030      case HAZARD_DELAY:
9031	set = single_set (insn);
9032	gcc_assert (set != 0);
9033	*delayed_reg = SET_DEST (set);
9034	break;
9035      }
9036}
9037
9038
9039/* Go through the instruction stream and insert nops where necessary.
9040   See if the whole function can then be put into .set noreorder &
9041   .set nomacro.  */
9042
9043static void
9044mips_avoid_hazards (void)
9045{
9046  rtx insn, last_insn, lo_reg, delayed_reg;
9047  int hilo_delay, i;
9048
9049  /* Force all instructions to be split into their final form.  */
9050  split_all_insns_noflow ();
9051
9052  /* Recalculate instruction lengths without taking nops into account.  */
9053  cfun->machine->ignore_hazard_length_p = true;
9054  shorten_branches (get_insns ());
9055
9056  cfun->machine->all_noreorder_p = true;
9057
9058  /* Profiled functions can't be all noreorder because the profiler
9059     support uses assembler macros.  */
9060  if (current_function_profile)
9061    cfun->machine->all_noreorder_p = false;
9062
9063  /* Code compiled with -mfix-vr4120 can't be all noreorder because
9064     we rely on the assembler to work around some errata.  */
9065  if (TARGET_FIX_VR4120)
9066    cfun->machine->all_noreorder_p = false;
9067
9068  /* The same is true for -mfix-vr4130 if we might generate mflo or
9069     mfhi instructions.  Note that we avoid using mflo and mfhi if
9070     the VR4130 macc and dmacc instructions are available instead;
9071     see the *mfhilo_{si,di}_macc patterns.  */
9072  if (TARGET_FIX_VR4130 && !ISA_HAS_MACCHI)
9073    cfun->machine->all_noreorder_p = false;
9074
9075  last_insn = 0;
9076  hilo_delay = 2;
9077  delayed_reg = 0;
9078  lo_reg = gen_rtx_REG (SImode, LO_REGNUM);
9079
9080  for (insn = get_insns (); insn != 0; insn = NEXT_INSN (insn))
9081    if (INSN_P (insn))
9082      {
9083	if (GET_CODE (PATTERN (insn)) == SEQUENCE)
9084	  for (i = 0; i < XVECLEN (PATTERN (insn), 0); i++)
9085	    mips_avoid_hazard (last_insn, XVECEXP (PATTERN (insn), 0, i),
9086			       &hilo_delay, &delayed_reg, lo_reg);
9087	else
9088	  mips_avoid_hazard (last_insn, insn, &hilo_delay,
9089			     &delayed_reg, lo_reg);
9090
9091	last_insn = insn;
9092      }
9093}
9094
9095
9096/* Implement TARGET_MACHINE_DEPENDENT_REORG.  */
9097
9098static void
9099mips_reorg (void)
9100{
9101  if (TARGET_MIPS16)
9102    mips16_lay_out_constants ();
9103  else if (TARGET_EXPLICIT_RELOCS)
9104    {
9105      if (mips_flag_delayed_branch)
9106	dbr_schedule (get_insns ());
9107      mips_avoid_hazards ();
9108      if (TUNE_MIPS4130 && TARGET_VR4130_ALIGN)
9109	vr4130_align_insns ();
9110    }
9111}
9112
9113/* This function does three things:
9114
9115   - Register the special divsi3 and modsi3 functions if -mfix-vr4120.
9116   - Register the mips16 hardware floating point stubs.
9117   - Register the gofast functions if selected using --enable-gofast.  */
9118
9119#include "config/gofast.h"
9120
9121static void
9122mips_init_libfuncs (void)
9123{
9124  if (TARGET_FIX_VR4120)
9125    {
9126      set_optab_libfunc (sdiv_optab, SImode, "__vr4120_divsi3");
9127      set_optab_libfunc (smod_optab, SImode, "__vr4120_modsi3");
9128    }
9129
9130  if (TARGET_MIPS16 && mips16_hard_float)
9131    {
9132      set_optab_libfunc (add_optab, SFmode, "__mips16_addsf3");
9133      set_optab_libfunc (sub_optab, SFmode, "__mips16_subsf3");
9134      set_optab_libfunc (smul_optab, SFmode, "__mips16_mulsf3");
9135      set_optab_libfunc (sdiv_optab, SFmode, "__mips16_divsf3");
9136
9137      set_optab_libfunc (eq_optab, SFmode, "__mips16_eqsf2");
9138      set_optab_libfunc (ne_optab, SFmode, "__mips16_nesf2");
9139      set_optab_libfunc (gt_optab, SFmode, "__mips16_gtsf2");
9140      set_optab_libfunc (ge_optab, SFmode, "__mips16_gesf2");
9141      set_optab_libfunc (lt_optab, SFmode, "__mips16_ltsf2");
9142      set_optab_libfunc (le_optab, SFmode, "__mips16_lesf2");
9143
9144      set_conv_libfunc (sfix_optab, SImode, SFmode, "__mips16_fix_truncsfsi");
9145      set_conv_libfunc (sfloat_optab, SFmode, SImode, "__mips16_floatsisf");
9146
9147      if (TARGET_DOUBLE_FLOAT)
9148	{
9149	  set_optab_libfunc (add_optab, DFmode, "__mips16_adddf3");
9150	  set_optab_libfunc (sub_optab, DFmode, "__mips16_subdf3");
9151	  set_optab_libfunc (smul_optab, DFmode, "__mips16_muldf3");
9152	  set_optab_libfunc (sdiv_optab, DFmode, "__mips16_divdf3");
9153
9154	  set_optab_libfunc (eq_optab, DFmode, "__mips16_eqdf2");
9155	  set_optab_libfunc (ne_optab, DFmode, "__mips16_nedf2");
9156	  set_optab_libfunc (gt_optab, DFmode, "__mips16_gtdf2");
9157	  set_optab_libfunc (ge_optab, DFmode, "__mips16_gedf2");
9158	  set_optab_libfunc (lt_optab, DFmode, "__mips16_ltdf2");
9159	  set_optab_libfunc (le_optab, DFmode, "__mips16_ledf2");
9160
9161	  set_conv_libfunc (sext_optab, DFmode, SFmode, "__mips16_extendsfdf2");
9162	  set_conv_libfunc (trunc_optab, SFmode, DFmode, "__mips16_truncdfsf2");
9163
9164	  set_conv_libfunc (sfix_optab, SImode, DFmode, "__mips16_fix_truncdfsi");
9165	  set_conv_libfunc (sfloat_optab, DFmode, SImode, "__mips16_floatsidf");
9166	}
9167    }
9168  else
9169    gofast_maybe_init_libfuncs ();
9170}
9171
9172/* Return a number assessing the cost of moving a register in class
9173   FROM to class TO.  The classes are expressed using the enumeration
9174   values such as `GENERAL_REGS'.  A value of 2 is the default; other
9175   values are interpreted relative to that.
9176
9177   It is not required that the cost always equal 2 when FROM is the
9178   same as TO; on some machines it is expensive to move between
9179   registers if they are not general registers.
9180
9181   If reload sees an insn consisting of a single `set' between two
9182   hard registers, and if `REGISTER_MOVE_COST' applied to their
9183   classes returns a value of 2, reload does not check to ensure that
9184   the constraints of the insn are met.  Setting a cost of other than
9185   2 will allow reload to verify that the constraints are met.  You
9186   should do this if the `movM' pattern's constraints do not allow
9187   such copying.
9188
9189   ??? We make the cost of moving from HI/LO into general
9190   registers the same as for one of moving general registers to
9191   HI/LO for TARGET_MIPS16 in order to prevent allocating a
9192   pseudo to HI/LO.  This might hurt optimizations though, it
9193   isn't clear if it is wise.  And it might not work in all cases.  We
9194   could solve the DImode LO reg problem by using a multiply, just
9195   like reload_{in,out}si.  We could solve the SImode/HImode HI reg
9196   problem by using divide instructions.  divu puts the remainder in
9197   the HI reg, so doing a divide by -1 will move the value in the HI
9198   reg for all values except -1.  We could handle that case by using a
9199   signed divide, e.g.  -1 / 2 (or maybe 1 / -2?).  We'd have to emit
9200   a compare/branch to test the input value to see which instruction
9201   we need to use.  This gets pretty messy, but it is feasible.  */
9202
9203int
9204mips_register_move_cost (enum machine_mode mode ATTRIBUTE_UNUSED,
9205			 enum reg_class to, enum reg_class from)
9206{
9207  if (from == M16_REGS && GR_REG_CLASS_P (to))
9208    return 2;
9209  else if (from == M16_NA_REGS && GR_REG_CLASS_P (to))
9210    return 2;
9211  else if (GR_REG_CLASS_P (from))
9212    {
9213      if (to == M16_REGS)
9214	return 2;
9215      else if (to == M16_NA_REGS)
9216	return 2;
9217      else if (GR_REG_CLASS_P (to))
9218	{
9219	  if (TARGET_MIPS16)
9220	    return 4;
9221	  else
9222	    return 2;
9223	}
9224      else if (to == FP_REGS)
9225	return 4;
9226      else if (reg_class_subset_p (to, ACC_REGS))
9227	{
9228	  if (TARGET_MIPS16)
9229	    return 12;
9230	  else
9231	    return 6;
9232	}
9233      else if (COP_REG_CLASS_P (to))
9234	{
9235	  return 5;
9236	}
9237    }
9238  else if (from == FP_REGS)
9239    {
9240      if (GR_REG_CLASS_P (to))
9241	return 4;
9242      else if (to == FP_REGS)
9243	return 2;
9244      else if (to == ST_REGS)
9245	return 8;
9246    }
9247  else if (reg_class_subset_p (from, ACC_REGS))
9248    {
9249      if (GR_REG_CLASS_P (to))
9250	{
9251	  if (TARGET_MIPS16)
9252	    return 12;
9253	  else
9254	    return 6;
9255	}
9256    }
9257  else if (from == ST_REGS && GR_REG_CLASS_P (to))
9258    return 4;
9259  else if (COP_REG_CLASS_P (from))
9260    {
9261      return 5;
9262    }
9263
9264  /* Fall through.
9265     ??? What cases are these? Shouldn't we return 2 here?  */
9266
9267  return 12;
9268}
9269
9270/* Return the length of INSN.  LENGTH is the initial length computed by
9271   attributes in the machine-description file.  */
9272
9273int
9274mips_adjust_insn_length (rtx insn, int length)
9275{
9276  /* A unconditional jump has an unfilled delay slot if it is not part
9277     of a sequence.  A conditional jump normally has a delay slot, but
9278     does not on MIPS16.  */
9279  if (CALL_P (insn) || (TARGET_MIPS16 ? simplejump_p (insn) : JUMP_P (insn)))
9280    length += 4;
9281
9282  /* See how many nops might be needed to avoid hardware hazards.  */
9283  if (!cfun->machine->ignore_hazard_length_p && INSN_CODE (insn) >= 0)
9284    switch (get_attr_hazard (insn))
9285      {
9286      case HAZARD_NONE:
9287	break;
9288
9289      case HAZARD_DELAY:
9290	length += 4;
9291	break;
9292
9293      case HAZARD_HILO:
9294	length += 8;
9295	break;
9296      }
9297
9298  /* All MIPS16 instructions are a measly two bytes.  */
9299  if (TARGET_MIPS16)
9300    length /= 2;
9301
9302  return length;
9303}
9304
9305
9306/* Return an asm sequence to start a noat block and load the address
9307   of a label into $1.  */
9308
9309const char *
9310mips_output_load_label (void)
9311{
9312  if (TARGET_EXPLICIT_RELOCS)
9313    switch (mips_abi)
9314      {
9315      case ABI_N32:
9316	return "%[lw\t%@,%%got_page(%0)(%+)\n\taddiu\t%@,%@,%%got_ofst(%0)";
9317
9318      case ABI_64:
9319	return "%[ld\t%@,%%got_page(%0)(%+)\n\tdaddiu\t%@,%@,%%got_ofst(%0)";
9320
9321      default:
9322	if (ISA_HAS_LOAD_DELAY)
9323	  return "%[lw\t%@,%%got(%0)(%+)%#\n\taddiu\t%@,%@,%%lo(%0)";
9324	return "%[lw\t%@,%%got(%0)(%+)\n\taddiu\t%@,%@,%%lo(%0)";
9325      }
9326  else
9327    {
9328      if (Pmode == DImode)
9329	return "%[dla\t%@,%0";
9330      else
9331	return "%[la\t%@,%0";
9332    }
9333}
9334
9335/* Return the assembly code for INSN, which has the operands given by
9336   OPERANDS, and which branches to OPERANDS[1] if some condition is true.
9337   BRANCH_IF_TRUE is the asm template that should be used if OPERANDS[1]
9338   is in range of a direct branch.  BRANCH_IF_FALSE is an inverted
9339   version of BRANCH_IF_TRUE.  */
9340
9341const char *
9342mips_output_conditional_branch (rtx insn, rtx *operands,
9343				const char *branch_if_true,
9344				const char *branch_if_false)
9345{
9346  unsigned int length;
9347  rtx taken, not_taken;
9348
9349  length = get_attr_length (insn);
9350  if (length <= 8)
9351    {
9352      /* Just a simple conditional branch.  */
9353      mips_branch_likely = (final_sequence && INSN_ANNULLED_BRANCH_P (insn));
9354      return branch_if_true;
9355    }
9356
9357  /* Generate a reversed branch around a direct jump.  This fallback does
9358     not use branch-likely instructions.  */
9359  mips_branch_likely = false;
9360  not_taken = gen_label_rtx ();
9361  taken = operands[1];
9362
9363  /* Generate the reversed branch to NOT_TAKEN.  */
9364  operands[1] = not_taken;
9365  output_asm_insn (branch_if_false, operands);
9366
9367  /* If INSN has a delay slot, we must provide delay slots for both the
9368     branch to NOT_TAKEN and the conditional jump.  We must also ensure
9369     that INSN's delay slot is executed in the appropriate cases.  */
9370  if (final_sequence)
9371    {
9372      /* This first delay slot will always be executed, so use INSN's
9373	 delay slot if is not annulled.  */
9374      if (!INSN_ANNULLED_BRANCH_P (insn))
9375	{
9376	  final_scan_insn (XVECEXP (final_sequence, 0, 1),
9377			   asm_out_file, optimize, 1, NULL);
9378	  INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9379	}
9380      else
9381	output_asm_insn ("nop", 0);
9382      fprintf (asm_out_file, "\n");
9383    }
9384
9385  /* Output the unconditional branch to TAKEN.  */
9386  if (length <= 16)
9387    output_asm_insn ("j\t%0%/", &taken);
9388  else
9389    {
9390      output_asm_insn (mips_output_load_label (), &taken);
9391      output_asm_insn ("jr\t%@%]%/", 0);
9392    }
9393
9394  /* Now deal with its delay slot; see above.  */
9395  if (final_sequence)
9396    {
9397      /* This delay slot will only be executed if the branch is taken.
9398	 Use INSN's delay slot if is annulled.  */
9399      if (INSN_ANNULLED_BRANCH_P (insn))
9400	{
9401	  final_scan_insn (XVECEXP (final_sequence, 0, 1),
9402			   asm_out_file, optimize, 1, NULL);
9403	  INSN_DELETED_P (XVECEXP (final_sequence, 0, 1)) = 1;
9404	}
9405      else
9406	output_asm_insn ("nop", 0);
9407      fprintf (asm_out_file, "\n");
9408    }
9409
9410  /* Output NOT_TAKEN.  */
9411  (*targetm.asm_out.internal_label) (asm_out_file, "L",
9412				     CODE_LABEL_NUMBER (not_taken));
9413  return "";
9414}
9415
9416/* Return the assembly code for INSN, which branches to OPERANDS[1]
9417   if some ordered condition is true.  The condition is given by
9418   OPERANDS[0] if !INVERTED_P, otherwise it is the inverse of
9419   OPERANDS[0].  OPERANDS[2] is the comparison's first operand;
9420   its second is always zero.  */
9421
9422const char *
9423mips_output_order_conditional_branch (rtx insn, rtx *operands, bool inverted_p)
9424{
9425  const char *branch[2];
9426
9427  /* Make BRANCH[1] branch to OPERANDS[1] when the condition is true.
9428     Make BRANCH[0] branch on the inverse condition.  */
9429  switch (GET_CODE (operands[0]))
9430    {
9431      /* These cases are equivalent to comparisons against zero.  */
9432    case LEU:
9433      inverted_p = !inverted_p;
9434      /* Fall through.  */
9435    case GTU:
9436      branch[!inverted_p] = MIPS_BRANCH ("bne", "%2,%.,%1");
9437      branch[inverted_p] = MIPS_BRANCH ("beq", "%2,%.,%1");
9438      break;
9439
9440      /* These cases are always true or always false.  */
9441    case LTU:
9442      inverted_p = !inverted_p;
9443      /* Fall through.  */
9444    case GEU:
9445      branch[!inverted_p] = MIPS_BRANCH ("beq", "%.,%.,%1");
9446      branch[inverted_p] = MIPS_BRANCH ("bne", "%.,%.,%1");
9447      break;
9448
9449    default:
9450      branch[!inverted_p] = MIPS_BRANCH ("b%C0z", "%2,%1");
9451      branch[inverted_p] = MIPS_BRANCH ("b%N0z", "%2,%1");
9452      break;
9453    }
9454  return mips_output_conditional_branch (insn, operands, branch[1], branch[0]);
9455}
9456
9457/* Used to output div or ddiv instruction DIVISION, which has the operands
9458   given by OPERANDS.  Add in a divide-by-zero check if needed.
9459
9460   When working around R4000 and R4400 errata, we need to make sure that
9461   the division is not immediately followed by a shift[1][2].  We also
9462   need to stop the division from being put into a branch delay slot[3].
9463   The easiest way to avoid both problems is to add a nop after the
9464   division.  When a divide-by-zero check is needed, this nop can be
9465   used to fill the branch delay slot.
9466
9467   [1] If a double-word or a variable shift executes immediately
9468       after starting an integer division, the shift may give an
9469       incorrect result.  See quotations of errata #16 and #28 from
9470       "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9471       in mips.md for details.
9472
9473   [2] A similar bug to [1] exists for all revisions of the
9474       R4000 and the R4400 when run in an MC configuration.
9475       From "MIPS R4000MC Errata, Processor Revision 2.2 and 3.0":
9476
9477       "19. In this following sequence:
9478
9479		    ddiv		(or ddivu or div or divu)
9480		    dsll32		(or dsrl32, dsra32)
9481
9482	    if an MPT stall occurs, while the divide is slipping the cpu
9483	    pipeline, then the following double shift would end up with an
9484	    incorrect result.
9485
9486	    Workaround: The compiler needs to avoid generating any
9487	    sequence with divide followed by extended double shift."
9488
9489       This erratum is also present in "MIPS R4400MC Errata, Processor
9490       Revision 1.0" and "MIPS R4400MC Errata, Processor Revision 2.0
9491       & 3.0" as errata #10 and #4, respectively.
9492
9493   [3] From "MIPS R4000PC/SC Errata, Processor Revision 2.2 and 3.0"
9494       (also valid for MIPS R4000MC processors):
9495
9496       "52. R4000SC: This bug does not apply for the R4000PC.
9497
9498	    There are two flavors of this bug:
9499
9500	    1) If the instruction just after divide takes an RF exception
9501	       (tlb-refill, tlb-invalid) and gets an instruction cache
9502	       miss (both primary and secondary) and the line which is
9503	       currently in secondary cache at this index had the first
9504	       data word, where the bits 5..2 are set, then R4000 would
9505	       get a wrong result for the div.
9506
9507	    ##1
9508		    nop
9509		    div	r8, r9
9510		    -------------------		# end-of page. -tlb-refill
9511		    nop
9512	    ##2
9513		    nop
9514		    div	r8, r9
9515		    -------------------		# end-of page. -tlb-invalid
9516		    nop
9517
9518	    2) If the divide is in the taken branch delay slot, where the
9519	       target takes RF exception and gets an I-cache miss for the
9520	       exception vector or where I-cache miss occurs for the
9521	       target address, under the above mentioned scenarios, the
9522	       div would get wrong results.
9523
9524	    ##1
9525		    j	r2		# to next page mapped or unmapped
9526		    div	r8,r9		# this bug would be there as long
9527					# as there is an ICache miss and
9528		    nop			# the "data pattern" is present
9529
9530	    ##2
9531		    beq	r0, r0, NextPage	# to Next page
9532		    div	r8,r9
9533		    nop
9534
9535	    This bug is present for div, divu, ddiv, and ddivu
9536	    instructions.
9537
9538	    Workaround: For item 1), OS could make sure that the next page
9539	    after the divide instruction is also mapped.  For item 2), the
9540	    compiler could make sure that the divide instruction is not in
9541	    the branch delay slot."
9542
9543       These processors have PRId values of 0x00004220 and 0x00004300 for
9544       the R4000 and 0x00004400, 0x00004500 and 0x00004600 for the R4400.  */
9545
9546const char *
9547mips_output_division (const char *division, rtx *operands)
9548{
9549  const char *s;
9550
9551  s = division;
9552  if (TARGET_FIX_R4000 || TARGET_FIX_R4400)
9553    {
9554      output_asm_insn (s, operands);
9555      s = "nop";
9556    }
9557  if (TARGET_CHECK_ZERO_DIV)
9558    {
9559      if (TARGET_MIPS16)
9560	{
9561	  output_asm_insn (s, operands);
9562	  s = "bnez\t%2,1f\n\tbreak\t7\n1:";
9563	}
9564      else if (GENERATE_DIVIDE_TRAPS)
9565        {
9566	  output_asm_insn (s, operands);
9567	  s = "teq\t%2,%.,7";
9568        }
9569      else
9570	{
9571	  output_asm_insn ("%(bne\t%2,%.,1f", operands);
9572	  output_asm_insn (s, operands);
9573	  s = "break\t7%)\n1:";
9574	}
9575    }
9576  return s;
9577}
9578
9579/* Return true if GIVEN is the same as CANONICAL, or if it is CANONICAL
9580   with a final "000" replaced by "k".  Ignore case.
9581
9582   Note: this function is shared between GCC and GAS.  */
9583
9584static bool
9585mips_strict_matching_cpu_name_p (const char *canonical, const char *given)
9586{
9587  while (*given != 0 && TOLOWER (*given) == TOLOWER (*canonical))
9588    given++, canonical++;
9589
9590  return ((*given == 0 && *canonical == 0)
9591	  || (strcmp (canonical, "000") == 0 && strcasecmp (given, "k") == 0));
9592}
9593
9594
9595/* Return true if GIVEN matches CANONICAL, where GIVEN is a user-supplied
9596   CPU name.  We've traditionally allowed a lot of variation here.
9597
9598   Note: this function is shared between GCC and GAS.  */
9599
9600static bool
9601mips_matching_cpu_name_p (const char *canonical, const char *given)
9602{
9603  /* First see if the name matches exactly, or with a final "000"
9604     turned into "k".  */
9605  if (mips_strict_matching_cpu_name_p (canonical, given))
9606    return true;
9607
9608  /* If not, try comparing based on numerical designation alone.
9609     See if GIVEN is an unadorned number, or 'r' followed by a number.  */
9610  if (TOLOWER (*given) == 'r')
9611    given++;
9612  if (!ISDIGIT (*given))
9613    return false;
9614
9615  /* Skip over some well-known prefixes in the canonical name,
9616     hoping to find a number there too.  */
9617  if (TOLOWER (canonical[0]) == 'v' && TOLOWER (canonical[1]) == 'r')
9618    canonical += 2;
9619  else if (TOLOWER (canonical[0]) == 'r' && TOLOWER (canonical[1]) == 'm')
9620    canonical += 2;
9621  else if (TOLOWER (canonical[0]) == 'r')
9622    canonical += 1;
9623
9624  return mips_strict_matching_cpu_name_p (canonical, given);
9625}
9626
9627
9628/* Return the mips_cpu_info entry for the processor or ISA given
9629   by CPU_STRING.  Return null if the string isn't recognized.
9630
9631   A similar function exists in GAS.  */
9632
9633static const struct mips_cpu_info *
9634mips_parse_cpu (const char *cpu_string)
9635{
9636  const struct mips_cpu_info *p;
9637  const char *s;
9638
9639  /* In the past, we allowed upper-case CPU names, but it doesn't
9640     work well with the multilib machinery.  */
9641  for (s = cpu_string; *s != 0; s++)
9642    if (ISUPPER (*s))
9643      {
9644	warning (0, "the cpu name must be lower case");
9645	break;
9646      }
9647
9648  /* 'from-abi' selects the most compatible architecture for the given
9649     ABI: MIPS I for 32-bit ABIs and MIPS III for 64-bit ABIs.  For the
9650     EABIs, we have to decide whether we're using the 32-bit or 64-bit
9651     version.  Look first at the -mgp options, if given, otherwise base
9652     the choice on MASK_64BIT in TARGET_DEFAULT.  */
9653  if (strcasecmp (cpu_string, "from-abi") == 0)
9654    return mips_cpu_info_from_isa (ABI_NEEDS_32BIT_REGS ? 1
9655				   : ABI_NEEDS_64BIT_REGS ? 3
9656				   : (TARGET_64BIT ? 3 : 1));
9657
9658  /* 'default' has traditionally been a no-op.  Probably not very useful.  */
9659  if (strcasecmp (cpu_string, "default") == 0)
9660    return 0;
9661
9662  for (p = mips_cpu_info_table; p->name != 0; p++)
9663    if (mips_matching_cpu_name_p (p->name, cpu_string))
9664      return p;
9665
9666  return 0;
9667}
9668
9669
9670/* Return the processor associated with the given ISA level, or null
9671   if the ISA isn't valid.  */
9672
9673static const struct mips_cpu_info *
9674mips_cpu_info_from_isa (int isa)
9675{
9676  const struct mips_cpu_info *p;
9677
9678  for (p = mips_cpu_info_table; p->name != 0; p++)
9679    if (p->isa == isa)
9680      return p;
9681
9682  return 0;
9683}
9684
9685/* Implement HARD_REGNO_NREGS.  The size of FP registers is controlled
9686   by UNITS_PER_FPREG.  The size of FP status registers is always 4, because
9687   they only hold condition code modes, and CCmode is always considered to
9688   be 4 bytes wide.  All other registers are word sized.  */
9689
9690unsigned int
9691mips_hard_regno_nregs (int regno, enum machine_mode mode)
9692{
9693  if (ST_REG_P (regno))
9694    return ((GET_MODE_SIZE (mode) + 3) / 4);
9695  else if (! FP_REG_P (regno))
9696    return ((GET_MODE_SIZE (mode) + UNITS_PER_WORD - 1) / UNITS_PER_WORD);
9697  else
9698    return ((GET_MODE_SIZE (mode) + UNITS_PER_FPREG - 1) / UNITS_PER_FPREG);
9699}
9700
9701/* Implement TARGET_RETURN_IN_MEMORY.  Under the old (i.e., 32 and O64 ABIs)
9702   all BLKmode objects are returned in memory.  Under the new (N32 and
9703   64-bit MIPS ABIs) small structures are returned in a register.
9704   Objects with varying size must still be returned in memory, of
9705   course.  */
9706
9707static bool
9708mips_return_in_memory (tree type, tree fndecl ATTRIBUTE_UNUSED)
9709{
9710  if (TARGET_OLDABI)
9711    return (TYPE_MODE (type) == BLKmode);
9712  else
9713    return ((int_size_in_bytes (type) > (2 * UNITS_PER_WORD))
9714	    || (int_size_in_bytes (type) == -1));
9715}
9716
9717static bool
9718mips_strict_argument_naming (CUMULATIVE_ARGS *ca ATTRIBUTE_UNUSED)
9719{
9720  return !TARGET_OLDABI;
9721}
9722
9723/* Return true if INSN is a multiply-add or multiply-subtract
9724   instruction and PREV assigns to the accumulator operand.  */
9725
9726bool
9727mips_linked_madd_p (rtx prev, rtx insn)
9728{
9729  rtx x;
9730
9731  x = single_set (insn);
9732  if (x == 0)
9733    return false;
9734
9735  x = SET_SRC (x);
9736
9737  if (GET_CODE (x) == PLUS
9738      && GET_CODE (XEXP (x, 0)) == MULT
9739      && reg_set_p (XEXP (x, 1), prev))
9740    return true;
9741
9742  if (GET_CODE (x) == MINUS
9743      && GET_CODE (XEXP (x, 1)) == MULT
9744      && reg_set_p (XEXP (x, 0), prev))
9745    return true;
9746
9747  return false;
9748}
9749
9750/* Used by TUNE_MACC_CHAINS to record the last scheduled instruction
9751   that may clobber hi or lo.  */
9752
9753static rtx mips_macc_chains_last_hilo;
9754
9755/* A TUNE_MACC_CHAINS helper function.  Record that instruction INSN has
9756   been scheduled, updating mips_macc_chains_last_hilo appropriately.  */
9757
9758static void
9759mips_macc_chains_record (rtx insn)
9760{
9761  if (get_attr_may_clobber_hilo (insn))
9762    mips_macc_chains_last_hilo = insn;
9763}
9764
9765/* A TUNE_MACC_CHAINS helper function.  Search ready queue READY, which
9766   has NREADY elements, looking for a multiply-add or multiply-subtract
9767   instruction that is cumulative with mips_macc_chains_last_hilo.
9768   If there is one, promote it ahead of anything else that might
9769   clobber hi or lo.  */
9770
9771static void
9772mips_macc_chains_reorder (rtx *ready, int nready)
9773{
9774  int i, j;
9775
9776  if (mips_macc_chains_last_hilo != 0)
9777    for (i = nready - 1; i >= 0; i--)
9778      if (mips_linked_madd_p (mips_macc_chains_last_hilo, ready[i]))
9779	{
9780	  for (j = nready - 1; j > i; j--)
9781	    if (recog_memoized (ready[j]) >= 0
9782		&& get_attr_may_clobber_hilo (ready[j]))
9783	      {
9784		mips_promote_ready (ready, i, j);
9785		break;
9786	      }
9787	  break;
9788	}
9789}
9790
9791/* The last instruction to be scheduled.  */
9792
9793static rtx vr4130_last_insn;
9794
9795/* A note_stores callback used by vr4130_true_reg_dependence_p.  DATA
9796   points to an rtx that is initially an instruction.  Nullify the rtx
9797   if the instruction uses the value of register X.  */
9798
9799static void
9800vr4130_true_reg_dependence_p_1 (rtx x, rtx pat ATTRIBUTE_UNUSED, void *data)
9801{
9802  rtx *insn_ptr = data;
9803  if (REG_P (x)
9804      && *insn_ptr != 0
9805      && reg_referenced_p (x, PATTERN (*insn_ptr)))
9806    *insn_ptr = 0;
9807}
9808
9809/* Return true if there is true register dependence between vr4130_last_insn
9810   and INSN.  */
9811
9812static bool
9813vr4130_true_reg_dependence_p (rtx insn)
9814{
9815  note_stores (PATTERN (vr4130_last_insn),
9816	       vr4130_true_reg_dependence_p_1, &insn);
9817  return insn == 0;
9818}
9819
9820/* A TUNE_MIPS4130 helper function.  Given that INSN1 is at the head of
9821   the ready queue and that INSN2 is the instruction after it, return
9822   true if it is worth promoting INSN2 ahead of INSN1.  Look for cases
9823   in which INSN1 and INSN2 can probably issue in parallel, but for
9824   which (INSN2, INSN1) should be less sensitive to instruction
9825   alignment than (INSN1, INSN2).  See 4130.md for more details.  */
9826
9827static bool
9828vr4130_swap_insns_p (rtx insn1, rtx insn2)
9829{
9830  rtx dep;
9831
9832  /* Check for the following case:
9833
9834     1) there is some other instruction X with an anti dependence on INSN1;
9835     2) X has a higher priority than INSN2; and
9836     3) X is an arithmetic instruction (and thus has no unit restrictions).
9837
9838     If INSN1 is the last instruction blocking X, it would better to
9839     choose (INSN1, X) over (INSN2, INSN1).  */
9840  for (dep = INSN_DEPEND (insn1); dep != 0; dep = XEXP (dep, 1))
9841    if (REG_NOTE_KIND (dep) == REG_DEP_ANTI
9842	&& INSN_PRIORITY (XEXP (dep, 0)) > INSN_PRIORITY (insn2)
9843	&& recog_memoized (XEXP (dep, 0)) >= 0
9844	&& get_attr_vr4130_class (XEXP (dep, 0)) == VR4130_CLASS_ALU)
9845      return false;
9846
9847  if (vr4130_last_insn != 0
9848      && recog_memoized (insn1) >= 0
9849      && recog_memoized (insn2) >= 0)
9850    {
9851      /* See whether INSN1 and INSN2 use different execution units,
9852	 or if they are both ALU-type instructions.  If so, they can
9853	 probably execute in parallel.  */
9854      enum attr_vr4130_class class1 = get_attr_vr4130_class (insn1);
9855      enum attr_vr4130_class class2 = get_attr_vr4130_class (insn2);
9856      if (class1 != class2 || class1 == VR4130_CLASS_ALU)
9857	{
9858	  /* If only one of the instructions has a dependence on
9859	     vr4130_last_insn, prefer to schedule the other one first.  */
9860	  bool dep1 = vr4130_true_reg_dependence_p (insn1);
9861	  bool dep2 = vr4130_true_reg_dependence_p (insn2);
9862	  if (dep1 != dep2)
9863	    return dep1;
9864
9865	  /* Prefer to schedule INSN2 ahead of INSN1 if vr4130_last_insn
9866	     is not an ALU-type instruction and if INSN1 uses the same
9867	     execution unit.  (Note that if this condition holds, we already
9868	     know that INSN2 uses a different execution unit.)  */
9869	  if (class1 != VR4130_CLASS_ALU
9870	      && recog_memoized (vr4130_last_insn) >= 0
9871	      && class1 == get_attr_vr4130_class (vr4130_last_insn))
9872	    return true;
9873	}
9874    }
9875  return false;
9876}
9877
9878/* A TUNE_MIPS4130 helper function.  (READY, NREADY) describes a ready
9879   queue with at least two instructions.  Swap the first two if
9880   vr4130_swap_insns_p says that it could be worthwhile.  */
9881
9882static void
9883vr4130_reorder (rtx *ready, int nready)
9884{
9885  if (vr4130_swap_insns_p (ready[nready - 1], ready[nready - 2]))
9886    mips_promote_ready (ready, nready - 2, nready - 1);
9887}
9888
9889/* Remove the instruction at index LOWER from ready queue READY and
9890   reinsert it in front of the instruction at index HIGHER.  LOWER must
9891   be <= HIGHER.  */
9892
9893static void
9894mips_promote_ready (rtx *ready, int lower, int higher)
9895{
9896  rtx new_head;
9897  int i;
9898
9899  new_head = ready[lower];
9900  for (i = lower; i < higher; i++)
9901    ready[i] = ready[i + 1];
9902  ready[i] = new_head;
9903}
9904
9905/* Implement TARGET_SCHED_REORDER.  */
9906
9907static int
9908mips_sched_reorder (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9909		    rtx *ready, int *nreadyp, int cycle)
9910{
9911  if (!reload_completed && TUNE_MACC_CHAINS)
9912    {
9913      if (cycle == 0)
9914	mips_macc_chains_last_hilo = 0;
9915      if (*nreadyp > 0)
9916	mips_macc_chains_reorder (ready, *nreadyp);
9917    }
9918  if (reload_completed && TUNE_MIPS4130 && !TARGET_VR4130_ALIGN)
9919    {
9920      if (cycle == 0)
9921	vr4130_last_insn = 0;
9922      if (*nreadyp > 1)
9923	vr4130_reorder (ready, *nreadyp);
9924    }
9925  return mips_issue_rate ();
9926}
9927
9928/* Implement TARGET_SCHED_VARIABLE_ISSUE.  */
9929
9930static int
9931mips_variable_issue (FILE *file ATTRIBUTE_UNUSED, int verbose ATTRIBUTE_UNUSED,
9932		     rtx insn, int more)
9933{
9934  switch (GET_CODE (PATTERN (insn)))
9935    {
9936    case USE:
9937    case CLOBBER:
9938      /* Don't count USEs and CLOBBERs against the issue rate.  */
9939      break;
9940
9941    default:
9942      more--;
9943      if (!reload_completed && TUNE_MACC_CHAINS)
9944	mips_macc_chains_record (insn);
9945      vr4130_last_insn = insn;
9946      break;
9947    }
9948  return more;
9949}
9950
9951/* Implement TARGET_SCHED_ADJUST_COST.  We assume that anti and output
9952   dependencies have no cost.  */
9953
9954static int
9955mips_adjust_cost (rtx insn ATTRIBUTE_UNUSED, rtx link,
9956		  rtx dep ATTRIBUTE_UNUSED, int cost)
9957{
9958  if (REG_NOTE_KIND (link) != 0)
9959    return 0;
9960  return cost;
9961}
9962
9963/* Return the number of instructions that can be issued per cycle.  */
9964
9965static int
9966mips_issue_rate (void)
9967{
9968  switch (mips_tune)
9969    {
9970    case PROCESSOR_R4130:
9971    case PROCESSOR_R5400:
9972    case PROCESSOR_R5500:
9973    case PROCESSOR_R7000:
9974    case PROCESSOR_R9000:
9975    case PROCESSOR_OCTEON:
9976      return 2;
9977
9978    case PROCESSOR_SB1:
9979    case PROCESSOR_SB1A:
9980      /* This is actually 4, but we get better performance if we claim 3.
9981	 This is partly because of unwanted speculative code motion with the
9982	 larger number, and partly because in most common cases we can't
9983	 reach the theoretical max of 4.  */
9984      return 3;
9985
9986    default:
9987      return 1;
9988    }
9989}
9990
9991/* Implements TARGET_SCHED_FIRST_CYCLE_MULTIPASS_DFA_LOOKAHEAD.  This should
9992   be as wide as the scheduling freedom in the DFA.  */
9993
9994static int
9995mips_multipass_dfa_lookahead (void)
9996{
9997  /* Can schedule up to 4 of the 6 function units in any one cycle.  */
9998  if (TUNE_SB1)
9999    return 4;
10000
10001  return 0;
10002}
10003
10004/* Implements a store data bypass check.  We need this because the cprestore
10005   pattern is type store, but defined using an UNSPEC.  This UNSPEC causes the
10006   default routine to abort.  We just return false for that case.  */
10007/* ??? Should try to give a better result here than assuming false.  */
10008
10009int
10010mips_store_data_bypass_p (rtx out_insn, rtx in_insn)
10011{
10012  if (GET_CODE (PATTERN (in_insn)) == UNSPEC_VOLATILE)
10013    return false;
10014
10015  return ! store_data_bypass_p (out_insn, in_insn);
10016}
10017
10018/* Given that we have an rtx of the form (prefetch ... WRITE LOCALITY),
10019   return the first operand of the associated "pref" or "prefx" insn.  */
10020
10021rtx
10022mips_prefetch_cookie (rtx write, rtx locality)
10023{
10024  /* store_streamed / load_streamed.  */
10025  if (INTVAL (locality) <= 0)
10026    return GEN_INT (INTVAL (write) + 4);
10027
10028  /* store / load.  */
10029  if (INTVAL (locality) <= 2)
10030    return write;
10031
10032  /* store_retained / load_retained.  */
10033  return GEN_INT (INTVAL (write) + 6);
10034}
10035
10036/* MIPS builtin function support. */
10037
10038struct builtin_description
10039{
10040  /* The code of the main .md file instruction.  See mips_builtin_type
10041     for more information.  */
10042  enum insn_code icode;
10043
10044  /* The floating-point comparison code to use with ICODE, if any.  */
10045  enum mips_fp_condition cond;
10046
10047  /* The name of the builtin function.  */
10048  const char *name;
10049
10050  /* Specifies how the function should be expanded.  */
10051  enum mips_builtin_type builtin_type;
10052
10053  /* The function's prototype.  */
10054  enum mips_function_type function_type;
10055
10056  /* The target flags required for this function.  */
10057  int target_flags;
10058};
10059
10060/* Define a MIPS_BUILTIN_DIRECT function for instruction CODE_FOR_mips_<INSN>.
10061   FUNCTION_TYPE and TARGET_FLAGS are builtin_description fields.  */
10062#define DIRECT_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS)		\
10063  { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN,			\
10064    MIPS_BUILTIN_DIRECT, FUNCTION_TYPE, TARGET_FLAGS }
10065
10066/* Define __builtin_mips_<INSN>_<COND>_{s,d}, both of which require
10067   TARGET_FLAGS.  */
10068#define CMP_SCALAR_BUILTINS(INSN, COND, TARGET_FLAGS)			\
10069  { CODE_FOR_mips_ ## INSN ## _cond_s, MIPS_FP_COND_ ## COND,		\
10070    "__builtin_mips_" #INSN "_" #COND "_s",				\
10071    MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_SF_SF, TARGET_FLAGS },	\
10072  { CODE_FOR_mips_ ## INSN ## _cond_d, MIPS_FP_COND_ ## COND,		\
10073    "__builtin_mips_" #INSN "_" #COND "_d",				\
10074    MIPS_BUILTIN_CMP_SINGLE, MIPS_INT_FTYPE_DF_DF, TARGET_FLAGS }
10075
10076/* Define __builtin_mips_{any,all,upper,lower}_<INSN>_<COND>_ps.
10077   The lower and upper forms require TARGET_FLAGS while the any and all
10078   forms require MASK_MIPS3D.  */
10079#define CMP_PS_BUILTINS(INSN, COND, TARGET_FLAGS)			\
10080  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10081    "__builtin_mips_any_" #INSN "_" #COND "_ps",			\
10082    MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D },	\
10083  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10084    "__builtin_mips_all_" #INSN "_" #COND "_ps",			\
10085    MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF, MASK_MIPS3D },	\
10086  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10087    "__builtin_mips_lower_" #INSN "_" #COND "_ps",			\
10088    MIPS_BUILTIN_CMP_LOWER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS },	\
10089  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10090    "__builtin_mips_upper_" #INSN "_" #COND "_ps",			\
10091    MIPS_BUILTIN_CMP_UPPER, MIPS_INT_FTYPE_V2SF_V2SF, TARGET_FLAGS }
10092
10093/* Define __builtin_mips_{any,all}_<INSN>_<COND>_4s.  The functions
10094   require MASK_MIPS3D.  */
10095#define CMP_4S_BUILTINS(INSN, COND)					\
10096  { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND,		\
10097    "__builtin_mips_any_" #INSN "_" #COND "_4s",			\
10098    MIPS_BUILTIN_CMP_ANY, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,		\
10099    MASK_MIPS3D },							\
10100  { CODE_FOR_mips_ ## INSN ## _cond_4s, MIPS_FP_COND_ ## COND,		\
10101    "__builtin_mips_all_" #INSN "_" #COND "_4s",			\
10102    MIPS_BUILTIN_CMP_ALL, MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF,		\
10103    MASK_MIPS3D }
10104
10105/* Define __builtin_mips_mov{t,f}_<INSN>_<COND>_ps.  The comparison
10106   instruction requires TARGET_FLAGS.  */
10107#define MOVTF_BUILTINS(INSN, COND, TARGET_FLAGS)			\
10108  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10109    "__builtin_mips_movt_" #INSN "_" #COND "_ps",			\
10110    MIPS_BUILTIN_MOVT, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,		\
10111    TARGET_FLAGS },							\
10112  { CODE_FOR_mips_ ## INSN ## _cond_ps, MIPS_FP_COND_ ## COND,		\
10113    "__builtin_mips_movf_" #INSN "_" #COND "_ps",			\
10114    MIPS_BUILTIN_MOVF, MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF,		\
10115    TARGET_FLAGS }
10116
10117/* Define all the builtins related to c.cond.fmt condition COND.  */
10118#define CMP_BUILTINS(COND)						\
10119  MOVTF_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT),			\
10120  MOVTF_BUILTINS (cabs, COND, MASK_MIPS3D),				\
10121  CMP_SCALAR_BUILTINS (cabs, COND, MASK_MIPS3D),			\
10122  CMP_PS_BUILTINS (c, COND, MASK_PAIRED_SINGLE_FLOAT),			\
10123  CMP_PS_BUILTINS (cabs, COND, MASK_MIPS3D),				\
10124  CMP_4S_BUILTINS (c, COND),						\
10125  CMP_4S_BUILTINS (cabs, COND)
10126
10127static const struct builtin_description mips_bdesc[] =
10128{
10129  DIRECT_BUILTIN (pll_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10130  DIRECT_BUILTIN (pul_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10131  DIRECT_BUILTIN (plu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10132  DIRECT_BUILTIN (puu_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10133  DIRECT_BUILTIN (cvt_ps_s, MIPS_V2SF_FTYPE_SF_SF, MASK_PAIRED_SINGLE_FLOAT),
10134  DIRECT_BUILTIN (cvt_s_pl, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10135  DIRECT_BUILTIN (cvt_s_pu, MIPS_SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10136  DIRECT_BUILTIN (abs_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT),
10137
10138  DIRECT_BUILTIN (alnv_ps, MIPS_V2SF_FTYPE_V2SF_V2SF_INT,
10139		  MASK_PAIRED_SINGLE_FLOAT),
10140  DIRECT_BUILTIN (addr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10141  DIRECT_BUILTIN (mulr_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10142  DIRECT_BUILTIN (cvt_pw_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10143  DIRECT_BUILTIN (cvt_ps_pw, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10144
10145  DIRECT_BUILTIN (recip1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10146  DIRECT_BUILTIN (recip1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10147  DIRECT_BUILTIN (recip1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10148  DIRECT_BUILTIN (recip2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10149  DIRECT_BUILTIN (recip2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10150  DIRECT_BUILTIN (recip2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10151
10152  DIRECT_BUILTIN (rsqrt1_s, MIPS_SF_FTYPE_SF, MASK_MIPS3D),
10153  DIRECT_BUILTIN (rsqrt1_d, MIPS_DF_FTYPE_DF, MASK_MIPS3D),
10154  DIRECT_BUILTIN (rsqrt1_ps, MIPS_V2SF_FTYPE_V2SF, MASK_MIPS3D),
10155  DIRECT_BUILTIN (rsqrt2_s, MIPS_SF_FTYPE_SF_SF, MASK_MIPS3D),
10156  DIRECT_BUILTIN (rsqrt2_d, MIPS_DF_FTYPE_DF_DF, MASK_MIPS3D),
10157  DIRECT_BUILTIN (rsqrt2_ps, MIPS_V2SF_FTYPE_V2SF_V2SF, MASK_MIPS3D),
10158
10159  MIPS_FP_CONDITIONS (CMP_BUILTINS)
10160};
10161
10162/* Builtin functions for the SB-1 processor.  */
10163
10164#define CODE_FOR_mips_sqrt_ps CODE_FOR_sqrtv2sf2
10165
10166static const struct builtin_description sb1_bdesc[] =
10167{
10168  DIRECT_BUILTIN (sqrt_ps, MIPS_V2SF_FTYPE_V2SF, MASK_PAIRED_SINGLE_FLOAT)
10169};
10170
10171/* Builtin functions for DSP ASE.  */
10172
10173#define CODE_FOR_mips_addq_ph CODE_FOR_addv2hi3
10174#define CODE_FOR_mips_addu_qb CODE_FOR_addv4qi3
10175#define CODE_FOR_mips_subq_ph CODE_FOR_subv2hi3
10176#define CODE_FOR_mips_subu_qb CODE_FOR_subv4qi3
10177
10178/* Define a MIPS_BUILTIN_DIRECT_NO_TARGET function for instruction
10179   CODE_FOR_mips_<INSN>.  FUNCTION_TYPE and TARGET_FLAGS are
10180   builtin_description fields.  */
10181#define DIRECT_NO_TARGET_BUILTIN(INSN, FUNCTION_TYPE, TARGET_FLAGS)	\
10182  { CODE_FOR_mips_ ## INSN, 0, "__builtin_mips_" #INSN,			\
10183    MIPS_BUILTIN_DIRECT_NO_TARGET, FUNCTION_TYPE, TARGET_FLAGS }
10184
10185/* Define __builtin_mips_bposge<VALUE>.  <VALUE> is 32 for the MIPS32 DSP
10186   branch instruction.  TARGET_FLAGS is a builtin_description field.  */
10187#define BPOSGE_BUILTIN(VALUE, TARGET_FLAGS)				\
10188  { CODE_FOR_mips_bposge, 0, "__builtin_mips_bposge" #VALUE,		\
10189    MIPS_BUILTIN_BPOSGE ## VALUE, MIPS_SI_FTYPE_VOID, TARGET_FLAGS }
10190
10191static const struct builtin_description dsp_bdesc[] =
10192{
10193  DIRECT_BUILTIN (addq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10194  DIRECT_BUILTIN (addq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10195  DIRECT_BUILTIN (addq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10196  DIRECT_BUILTIN (addu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10197  DIRECT_BUILTIN (addu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10198  DIRECT_BUILTIN (subq_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10199  DIRECT_BUILTIN (subq_s_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10200  DIRECT_BUILTIN (subq_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10201  DIRECT_BUILTIN (subu_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10202  DIRECT_BUILTIN (subu_s_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10203  DIRECT_BUILTIN (addsc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10204  DIRECT_BUILTIN (addwc, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10205  DIRECT_BUILTIN (modsub, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10206  DIRECT_BUILTIN (raddu_w_qb, MIPS_SI_FTYPE_V4QI, MASK_DSP),
10207  DIRECT_BUILTIN (absq_s_ph, MIPS_V2HI_FTYPE_V2HI, MASK_DSP),
10208  DIRECT_BUILTIN (absq_s_w, MIPS_SI_FTYPE_SI, MASK_DSP),
10209  DIRECT_BUILTIN (precrq_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10210  DIRECT_BUILTIN (precrq_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10211  DIRECT_BUILTIN (precrq_rs_ph_w, MIPS_V2HI_FTYPE_SI_SI, MASK_DSP),
10212  DIRECT_BUILTIN (precrqu_s_qb_ph, MIPS_V4QI_FTYPE_V2HI_V2HI, MASK_DSP),
10213  DIRECT_BUILTIN (preceq_w_phl, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10214  DIRECT_BUILTIN (preceq_w_phr, MIPS_SI_FTYPE_V2HI, MASK_DSP),
10215  DIRECT_BUILTIN (precequ_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10216  DIRECT_BUILTIN (precequ_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10217  DIRECT_BUILTIN (precequ_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10218  DIRECT_BUILTIN (precequ_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10219  DIRECT_BUILTIN (preceu_ph_qbl, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10220  DIRECT_BUILTIN (preceu_ph_qbr, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10221  DIRECT_BUILTIN (preceu_ph_qbla, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10222  DIRECT_BUILTIN (preceu_ph_qbra, MIPS_V2HI_FTYPE_V4QI, MASK_DSP),
10223  DIRECT_BUILTIN (shll_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10224  DIRECT_BUILTIN (shll_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10225  DIRECT_BUILTIN (shll_s_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10226  DIRECT_BUILTIN (shll_s_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10227  DIRECT_BUILTIN (shrl_qb, MIPS_V4QI_FTYPE_V4QI_SI, MASK_DSP),
10228  DIRECT_BUILTIN (shra_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10229  DIRECT_BUILTIN (shra_r_ph, MIPS_V2HI_FTYPE_V2HI_SI, MASK_DSP),
10230  DIRECT_BUILTIN (shra_r_w, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10231  DIRECT_BUILTIN (muleu_s_ph_qbl, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10232  DIRECT_BUILTIN (muleu_s_ph_qbr, MIPS_V2HI_FTYPE_V4QI_V2HI, MASK_DSP),
10233  DIRECT_BUILTIN (mulq_rs_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10234  DIRECT_BUILTIN (muleq_s_w_phl, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10235  DIRECT_BUILTIN (muleq_s_w_phr, MIPS_SI_FTYPE_V2HI_V2HI, MASK_DSP),
10236  DIRECT_BUILTIN (dpau_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10237  DIRECT_BUILTIN (dpau_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10238  DIRECT_BUILTIN (dpsu_h_qbl, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10239  DIRECT_BUILTIN (dpsu_h_qbr, MIPS_DI_FTYPE_DI_V4QI_V4QI, MASK_DSP),
10240  DIRECT_BUILTIN (dpaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10241  DIRECT_BUILTIN (dpsq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10242  DIRECT_BUILTIN (mulsaq_s_w_ph, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10243  DIRECT_BUILTIN (dpaq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10244  DIRECT_BUILTIN (dpsq_sa_l_w, MIPS_DI_FTYPE_DI_SI_SI, MASK_DSP),
10245  DIRECT_BUILTIN (maq_s_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10246  DIRECT_BUILTIN (maq_s_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10247  DIRECT_BUILTIN (maq_sa_w_phl, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10248  DIRECT_BUILTIN (maq_sa_w_phr, MIPS_DI_FTYPE_DI_V2HI_V2HI, MASK_DSP),
10249  DIRECT_BUILTIN (bitrev, MIPS_SI_FTYPE_SI, MASK_DSP),
10250  DIRECT_BUILTIN (insv, MIPS_SI_FTYPE_SI_SI, MASK_DSP),
10251  DIRECT_BUILTIN (repl_qb, MIPS_V4QI_FTYPE_SI, MASK_DSP),
10252  DIRECT_BUILTIN (repl_ph, MIPS_V2HI_FTYPE_SI, MASK_DSP),
10253  DIRECT_NO_TARGET_BUILTIN (cmpu_eq_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10254  DIRECT_NO_TARGET_BUILTIN (cmpu_lt_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10255  DIRECT_NO_TARGET_BUILTIN (cmpu_le_qb, MIPS_VOID_FTYPE_V4QI_V4QI, MASK_DSP),
10256  DIRECT_BUILTIN (cmpgu_eq_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10257  DIRECT_BUILTIN (cmpgu_lt_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10258  DIRECT_BUILTIN (cmpgu_le_qb, MIPS_SI_FTYPE_V4QI_V4QI, MASK_DSP),
10259  DIRECT_NO_TARGET_BUILTIN (cmp_eq_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10260  DIRECT_NO_TARGET_BUILTIN (cmp_lt_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10261  DIRECT_NO_TARGET_BUILTIN (cmp_le_ph, MIPS_VOID_FTYPE_V2HI_V2HI, MASK_DSP),
10262  DIRECT_BUILTIN (pick_qb, MIPS_V4QI_FTYPE_V4QI_V4QI, MASK_DSP),
10263  DIRECT_BUILTIN (pick_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10264  DIRECT_BUILTIN (packrl_ph, MIPS_V2HI_FTYPE_V2HI_V2HI, MASK_DSP),
10265  DIRECT_BUILTIN (extr_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10266  DIRECT_BUILTIN (extr_r_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10267  DIRECT_BUILTIN (extr_rs_w, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10268  DIRECT_BUILTIN (extr_s_h, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10269  DIRECT_BUILTIN (extp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10270  DIRECT_BUILTIN (extpdp, MIPS_SI_FTYPE_DI_SI, MASK_DSP),
10271  DIRECT_BUILTIN (shilo, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10272  DIRECT_BUILTIN (mthlip, MIPS_DI_FTYPE_DI_SI, MASK_DSP),
10273  DIRECT_NO_TARGET_BUILTIN (wrdsp, MIPS_VOID_FTYPE_SI_SI, MASK_DSP),
10274  DIRECT_BUILTIN (rddsp, MIPS_SI_FTYPE_SI, MASK_DSP),
10275  DIRECT_BUILTIN (lbux, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10276  DIRECT_BUILTIN (lhx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10277  DIRECT_BUILTIN (lwx, MIPS_SI_FTYPE_PTR_SI, MASK_DSP),
10278  BPOSGE_BUILTIN (32, MASK_DSP)
10279};
10280
10281/* This helps provide a mapping from builtin function codes to bdesc
10282   arrays.  */
10283
10284struct bdesc_map
10285{
10286  /* The builtin function table that this entry describes.  */
10287  const struct builtin_description *bdesc;
10288
10289  /* The number of entries in the builtin function table.  */
10290  unsigned int size;
10291
10292  /* The target processor that supports these builtin functions.
10293     PROCESSOR_MAX means we enable them for all processors.  */
10294  enum processor_type proc;
10295};
10296
10297static const struct bdesc_map bdesc_arrays[] =
10298{
10299  { mips_bdesc, ARRAY_SIZE (mips_bdesc), PROCESSOR_MAX },
10300  { sb1_bdesc, ARRAY_SIZE (sb1_bdesc), PROCESSOR_SB1 },
10301  { dsp_bdesc, ARRAY_SIZE (dsp_bdesc), PROCESSOR_MAX }
10302};
10303
10304/* Take the head of argument list *ARGLIST and convert it into a form
10305   suitable for input operand OP of instruction ICODE.  Return the value
10306   and point *ARGLIST at the next element of the list.  */
10307
10308static rtx
10309mips_prepare_builtin_arg (enum insn_code icode,
10310			  unsigned int op, tree *arglist)
10311{
10312  rtx value;
10313  enum machine_mode mode;
10314
10315  value = expand_normal (TREE_VALUE (*arglist));
10316  mode = insn_data[icode].operand[op].mode;
10317  if (!insn_data[icode].operand[op].predicate (value, mode))
10318    {
10319      value = copy_to_mode_reg (mode, value);
10320      /* Check the predicate again.  */
10321      if (!insn_data[icode].operand[op].predicate (value, mode))
10322	{
10323	  error ("invalid argument to builtin function");
10324	  return const0_rtx;
10325	}
10326    }
10327
10328  *arglist = TREE_CHAIN (*arglist);
10329  return value;
10330}
10331
10332/* Return an rtx suitable for output operand OP of instruction ICODE.
10333   If TARGET is non-null, try to use it where possible.  */
10334
10335static rtx
10336mips_prepare_builtin_target (enum insn_code icode, unsigned int op, rtx target)
10337{
10338  enum machine_mode mode;
10339
10340  mode = insn_data[icode].operand[op].mode;
10341  if (target == 0 || !insn_data[icode].operand[op].predicate (target, mode))
10342    target = gen_reg_rtx (mode);
10343
10344  return target;
10345}
10346
10347/* Expand builtin functions.  This is called from TARGET_EXPAND_BUILTIN.  */
10348
10349rtx
10350mips_expand_builtin (tree exp, rtx target, rtx subtarget ATTRIBUTE_UNUSED,
10351		     enum machine_mode mode ATTRIBUTE_UNUSED,
10352		     int ignore ATTRIBUTE_UNUSED)
10353{
10354  enum insn_code icode;
10355  enum mips_builtin_type type;
10356  tree fndecl, arglist;
10357  unsigned int fcode;
10358  const struct builtin_description *bdesc;
10359  const struct bdesc_map *m;
10360
10361  fndecl = TREE_OPERAND (TREE_OPERAND (exp, 0), 0);
10362  arglist = TREE_OPERAND (exp, 1);
10363  fcode = DECL_FUNCTION_CODE (fndecl);
10364
10365  bdesc = NULL;
10366  for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10367    {
10368      if (fcode < m->size)
10369	{
10370	  bdesc = m->bdesc;
10371	  icode = bdesc[fcode].icode;
10372	  type = bdesc[fcode].builtin_type;
10373	  break;
10374	}
10375      fcode -= m->size;
10376    }
10377  if (bdesc == NULL)
10378    return 0;
10379
10380  switch (type)
10381    {
10382    case MIPS_BUILTIN_DIRECT:
10383      return mips_expand_builtin_direct (icode, target, arglist, true);
10384
10385    case MIPS_BUILTIN_DIRECT_NO_TARGET:
10386      return mips_expand_builtin_direct (icode, target, arglist, false);
10387
10388    case MIPS_BUILTIN_MOVT:
10389    case MIPS_BUILTIN_MOVF:
10390      return mips_expand_builtin_movtf (type, icode, bdesc[fcode].cond,
10391					target, arglist);
10392
10393    case MIPS_BUILTIN_CMP_ANY:
10394    case MIPS_BUILTIN_CMP_ALL:
10395    case MIPS_BUILTIN_CMP_UPPER:
10396    case MIPS_BUILTIN_CMP_LOWER:
10397    case MIPS_BUILTIN_CMP_SINGLE:
10398      return mips_expand_builtin_compare (type, icode, bdesc[fcode].cond,
10399					  target, arglist);
10400
10401    case MIPS_BUILTIN_BPOSGE32:
10402      return mips_expand_builtin_bposge (type, target);
10403
10404    default:
10405      return 0;
10406    }
10407}
10408
10409/* Init builtin functions.  This is called from TARGET_INIT_BUILTIN.  */
10410
10411void
10412mips_init_builtins (void)
10413{
10414  const struct builtin_description *d;
10415  const struct bdesc_map *m;
10416  tree types[(int) MIPS_MAX_FTYPE_MAX];
10417  tree V2SF_type_node;
10418  tree V2HI_type_node;
10419  tree V4QI_type_node;
10420  unsigned int offset;
10421
10422  /* We have only builtins for -mpaired-single, -mips3d and -mdsp.  */
10423  if (!TARGET_PAIRED_SINGLE_FLOAT && !TARGET_DSP)
10424    return;
10425
10426  if (TARGET_PAIRED_SINGLE_FLOAT)
10427    {
10428      V2SF_type_node = build_vector_type_for_mode (float_type_node, V2SFmode);
10429
10430      types[MIPS_V2SF_FTYPE_V2SF]
10431	= build_function_type_list (V2SF_type_node, V2SF_type_node, NULL_TREE);
10432
10433      types[MIPS_V2SF_FTYPE_V2SF_V2SF]
10434	= build_function_type_list (V2SF_type_node,
10435				    V2SF_type_node, V2SF_type_node, NULL_TREE);
10436
10437      types[MIPS_V2SF_FTYPE_V2SF_V2SF_INT]
10438	= build_function_type_list (V2SF_type_node,
10439				    V2SF_type_node, V2SF_type_node,
10440				    integer_type_node, NULL_TREE);
10441
10442      types[MIPS_V2SF_FTYPE_V2SF_V2SF_V2SF_V2SF]
10443	= build_function_type_list (V2SF_type_node,
10444				    V2SF_type_node, V2SF_type_node,
10445				    V2SF_type_node, V2SF_type_node, NULL_TREE);
10446
10447      types[MIPS_V2SF_FTYPE_SF_SF]
10448	= build_function_type_list (V2SF_type_node,
10449				    float_type_node, float_type_node, NULL_TREE);
10450
10451      types[MIPS_INT_FTYPE_V2SF_V2SF]
10452	= build_function_type_list (integer_type_node,
10453				    V2SF_type_node, V2SF_type_node, NULL_TREE);
10454
10455      types[MIPS_INT_FTYPE_V2SF_V2SF_V2SF_V2SF]
10456	= build_function_type_list (integer_type_node,
10457				    V2SF_type_node, V2SF_type_node,
10458				    V2SF_type_node, V2SF_type_node, NULL_TREE);
10459
10460      types[MIPS_INT_FTYPE_SF_SF]
10461	= build_function_type_list (integer_type_node,
10462				    float_type_node, float_type_node, NULL_TREE);
10463
10464      types[MIPS_INT_FTYPE_DF_DF]
10465	= build_function_type_list (integer_type_node,
10466				    double_type_node, double_type_node, NULL_TREE);
10467
10468      types[MIPS_SF_FTYPE_V2SF]
10469	= build_function_type_list (float_type_node, V2SF_type_node, NULL_TREE);
10470
10471      types[MIPS_SF_FTYPE_SF]
10472	= build_function_type_list (float_type_node,
10473				    float_type_node, NULL_TREE);
10474
10475      types[MIPS_SF_FTYPE_SF_SF]
10476	= build_function_type_list (float_type_node,
10477				    float_type_node, float_type_node, NULL_TREE);
10478
10479      types[MIPS_DF_FTYPE_DF]
10480	= build_function_type_list (double_type_node,
10481				    double_type_node, NULL_TREE);
10482
10483      types[MIPS_DF_FTYPE_DF_DF]
10484	= build_function_type_list (double_type_node,
10485				    double_type_node, double_type_node, NULL_TREE);
10486    }
10487
10488  if (TARGET_DSP)
10489    {
10490      V2HI_type_node = build_vector_type_for_mode (intHI_type_node, V2HImode);
10491      V4QI_type_node = build_vector_type_for_mode (intQI_type_node, V4QImode);
10492
10493      types[MIPS_V2HI_FTYPE_V2HI_V2HI]
10494	= build_function_type_list (V2HI_type_node,
10495				    V2HI_type_node, V2HI_type_node,
10496				    NULL_TREE);
10497
10498      types[MIPS_SI_FTYPE_SI_SI]
10499	= build_function_type_list (intSI_type_node,
10500				    intSI_type_node, intSI_type_node,
10501				    NULL_TREE);
10502
10503      types[MIPS_V4QI_FTYPE_V4QI_V4QI]
10504	= build_function_type_list (V4QI_type_node,
10505				    V4QI_type_node, V4QI_type_node,
10506				    NULL_TREE);
10507
10508      types[MIPS_SI_FTYPE_V4QI]
10509	= build_function_type_list (intSI_type_node,
10510				    V4QI_type_node,
10511				    NULL_TREE);
10512
10513      types[MIPS_V2HI_FTYPE_V2HI]
10514	= build_function_type_list (V2HI_type_node,
10515				    V2HI_type_node,
10516				    NULL_TREE);
10517
10518      types[MIPS_SI_FTYPE_SI]
10519	= build_function_type_list (intSI_type_node,
10520				    intSI_type_node,
10521				    NULL_TREE);
10522
10523      types[MIPS_V4QI_FTYPE_V2HI_V2HI]
10524	= build_function_type_list (V4QI_type_node,
10525				    V2HI_type_node, V2HI_type_node,
10526				    NULL_TREE);
10527
10528      types[MIPS_V2HI_FTYPE_SI_SI]
10529	= build_function_type_list (V2HI_type_node,
10530				    intSI_type_node, intSI_type_node,
10531				    NULL_TREE);
10532
10533      types[MIPS_SI_FTYPE_V2HI]
10534	= build_function_type_list (intSI_type_node,
10535				    V2HI_type_node,
10536				    NULL_TREE);
10537
10538      types[MIPS_V2HI_FTYPE_V4QI]
10539	= build_function_type_list (V2HI_type_node,
10540				    V4QI_type_node,
10541				    NULL_TREE);
10542
10543      types[MIPS_V4QI_FTYPE_V4QI_SI]
10544	= build_function_type_list (V4QI_type_node,
10545				    V4QI_type_node, intSI_type_node,
10546				    NULL_TREE);
10547
10548      types[MIPS_V2HI_FTYPE_V2HI_SI]
10549	= build_function_type_list (V2HI_type_node,
10550				    V2HI_type_node, intSI_type_node,
10551				    NULL_TREE);
10552
10553      types[MIPS_V2HI_FTYPE_V4QI_V2HI]
10554	= build_function_type_list (V2HI_type_node,
10555				    V4QI_type_node, V2HI_type_node,
10556				    NULL_TREE);
10557
10558      types[MIPS_SI_FTYPE_V2HI_V2HI]
10559	= build_function_type_list (intSI_type_node,
10560				    V2HI_type_node, V2HI_type_node,
10561				    NULL_TREE);
10562
10563      types[MIPS_DI_FTYPE_DI_V4QI_V4QI]
10564	= build_function_type_list (intDI_type_node,
10565				    intDI_type_node, V4QI_type_node, V4QI_type_node,
10566				    NULL_TREE);
10567
10568      types[MIPS_DI_FTYPE_DI_V2HI_V2HI]
10569	= build_function_type_list (intDI_type_node,
10570				    intDI_type_node, V2HI_type_node, V2HI_type_node,
10571				    NULL_TREE);
10572
10573      types[MIPS_DI_FTYPE_DI_SI_SI]
10574	= build_function_type_list (intDI_type_node,
10575				    intDI_type_node, intSI_type_node, intSI_type_node,
10576				    NULL_TREE);
10577
10578      types[MIPS_V4QI_FTYPE_SI]
10579	= build_function_type_list (V4QI_type_node,
10580				    intSI_type_node,
10581				    NULL_TREE);
10582
10583      types[MIPS_V2HI_FTYPE_SI]
10584	= build_function_type_list (V2HI_type_node,
10585				    intSI_type_node,
10586				    NULL_TREE);
10587
10588      types[MIPS_VOID_FTYPE_V4QI_V4QI]
10589	= build_function_type_list (void_type_node,
10590				    V4QI_type_node, V4QI_type_node,
10591				    NULL_TREE);
10592
10593      types[MIPS_SI_FTYPE_V4QI_V4QI]
10594	= build_function_type_list (intSI_type_node,
10595				    V4QI_type_node, V4QI_type_node,
10596				    NULL_TREE);
10597
10598      types[MIPS_VOID_FTYPE_V2HI_V2HI]
10599	= build_function_type_list (void_type_node,
10600				    V2HI_type_node, V2HI_type_node,
10601				    NULL_TREE);
10602
10603      types[MIPS_SI_FTYPE_DI_SI]
10604	= build_function_type_list (intSI_type_node,
10605				    intDI_type_node, intSI_type_node,
10606				    NULL_TREE);
10607
10608      types[MIPS_DI_FTYPE_DI_SI]
10609	= build_function_type_list (intDI_type_node,
10610				    intDI_type_node, intSI_type_node,
10611				    NULL_TREE);
10612
10613      types[MIPS_VOID_FTYPE_SI_SI]
10614	= build_function_type_list (void_type_node,
10615				    intSI_type_node, intSI_type_node,
10616				    NULL_TREE);
10617
10618      types[MIPS_SI_FTYPE_PTR_SI]
10619	= build_function_type_list (intSI_type_node,
10620				    ptr_type_node, intSI_type_node,
10621				    NULL_TREE);
10622
10623      types[MIPS_SI_FTYPE_VOID]
10624	= build_function_type (intSI_type_node, void_list_node);
10625    }
10626
10627  /* Iterate through all of the bdesc arrays, initializing all of the
10628     builtin functions.  */
10629
10630  offset = 0;
10631  for (m = bdesc_arrays; m < &bdesc_arrays[ARRAY_SIZE (bdesc_arrays)]; m++)
10632    {
10633      if (m->proc == PROCESSOR_MAX || (m->proc == mips_arch))
10634	for (d = m->bdesc; d < &m->bdesc[m->size]; d++)
10635	  if ((d->target_flags & target_flags) == d->target_flags)
10636	    lang_hooks.builtin_function (d->name, types[d->function_type],
10637					 d - m->bdesc + offset,
10638					 BUILT_IN_MD, NULL, NULL);
10639      offset += m->size;
10640    }
10641}
10642
10643/* Expand a MIPS_BUILTIN_DIRECT function.  ICODE is the code of the
10644   .md pattern and ARGLIST is the list of function arguments.  TARGET,
10645   if nonnull, suggests a good place to put the result.
10646   HAS_TARGET indicates the function must return something.  */
10647
10648static rtx
10649mips_expand_builtin_direct (enum insn_code icode, rtx target, tree arglist,
10650			    bool has_target)
10651{
10652  rtx ops[MAX_RECOG_OPERANDS];
10653  int i = 0;
10654
10655  if (has_target)
10656    {
10657      /* We save target to ops[0].  */
10658      ops[0] = mips_prepare_builtin_target (icode, 0, target);
10659      i = 1;
10660    }
10661
10662  /* We need to test if arglist is not zero.  Some instructions have extra
10663     clobber registers.  */
10664  for (; i < insn_data[icode].n_operands && arglist != 0; i++)
10665    ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10666
10667  switch (i)
10668    {
10669    case 2:
10670      emit_insn (GEN_FCN (icode) (ops[0], ops[1]));
10671      break;
10672
10673    case 3:
10674      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2]));
10675      break;
10676
10677    case 4:
10678      emit_insn (GEN_FCN (icode) (ops[0], ops[1], ops[2], ops[3]));
10679      break;
10680
10681    default:
10682      gcc_unreachable ();
10683    }
10684  return target;
10685}
10686
10687/* Expand a __builtin_mips_movt_*_ps() or __builtin_mips_movf_*_ps()
10688   function (TYPE says which).  ARGLIST is the list of arguments to the
10689   function, ICODE is the instruction that should be used to compare
10690   the first two arguments, and COND is the condition it should test.
10691   TARGET, if nonnull, suggests a good place to put the result.  */
10692
10693static rtx
10694mips_expand_builtin_movtf (enum mips_builtin_type type,
10695			   enum insn_code icode, enum mips_fp_condition cond,
10696			   rtx target, tree arglist)
10697{
10698  rtx cmp_result, op0, op1;
10699
10700  cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10701  op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10702  op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10703  emit_insn (GEN_FCN (icode) (cmp_result, op0, op1, GEN_INT (cond)));
10704
10705  icode = CODE_FOR_mips_cond_move_tf_ps;
10706  target = mips_prepare_builtin_target (icode, 0, target);
10707  if (type == MIPS_BUILTIN_MOVT)
10708    {
10709      op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10710      op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10711    }
10712  else
10713    {
10714      op0 = mips_prepare_builtin_arg (icode, 1, &arglist);
10715      op1 = mips_prepare_builtin_arg (icode, 2, &arglist);
10716    }
10717  emit_insn (gen_mips_cond_move_tf_ps (target, op0, op1, cmp_result));
10718  return target;
10719}
10720
10721/* Move VALUE_IF_TRUE into TARGET if CONDITION is true; move VALUE_IF_FALSE
10722   into TARGET otherwise.  Return TARGET.  */
10723
10724static rtx
10725mips_builtin_branch_and_move (rtx condition, rtx target,
10726			      rtx value_if_true, rtx value_if_false)
10727{
10728  rtx true_label, done_label;
10729
10730  true_label = gen_label_rtx ();
10731  done_label = gen_label_rtx ();
10732
10733  /* First assume that CONDITION is false.  */
10734  emit_move_insn (target, value_if_false);
10735
10736  /* Branch to TRUE_LABEL if CONDITION is true and DONE_LABEL otherwise.  */
10737  emit_jump_insn (gen_condjump (condition, true_label));
10738  emit_jump_insn (gen_jump (done_label));
10739  emit_barrier ();
10740
10741  /* Fix TARGET if CONDITION is true.  */
10742  emit_label (true_label);
10743  emit_move_insn (target, value_if_true);
10744
10745  emit_label (done_label);
10746  return target;
10747}
10748
10749/* Expand a comparison builtin of type BUILTIN_TYPE.  ICODE is the code
10750   of the comparison instruction and COND is the condition it should test.
10751   ARGLIST is the list of function arguments and TARGET, if nonnull,
10752   suggests a good place to put the boolean result.  */
10753
10754static rtx
10755mips_expand_builtin_compare (enum mips_builtin_type builtin_type,
10756			     enum insn_code icode, enum mips_fp_condition cond,
10757			     rtx target, tree arglist)
10758{
10759  rtx offset, condition, cmp_result, ops[MAX_RECOG_OPERANDS];
10760  int i;
10761
10762  if (target == 0 || GET_MODE (target) != SImode)
10763    target = gen_reg_rtx (SImode);
10764
10765  /* Prepare the operands to the comparison.  */
10766  cmp_result = mips_prepare_builtin_target (icode, 0, 0);
10767  for (i = 1; i < insn_data[icode].n_operands - 1; i++)
10768    ops[i] = mips_prepare_builtin_arg (icode, i, &arglist);
10769
10770  switch (insn_data[icode].n_operands)
10771    {
10772    case 4:
10773      emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2], GEN_INT (cond)));
10774      break;
10775
10776    case 6:
10777      emit_insn (GEN_FCN (icode) (cmp_result, ops[1], ops[2],
10778				  ops[3], ops[4], GEN_INT (cond)));
10779      break;
10780
10781    default:
10782      gcc_unreachable ();
10783    }
10784
10785  /* If the comparison sets more than one register, we define the result
10786     to be 0 if all registers are false and -1 if all registers are true.
10787     The value of the complete result is indeterminate otherwise.  */
10788  switch (builtin_type)
10789    {
10790    case MIPS_BUILTIN_CMP_ALL:
10791      condition = gen_rtx_NE (VOIDmode, cmp_result, constm1_rtx);
10792      return mips_builtin_branch_and_move (condition, target,
10793					   const0_rtx, const1_rtx);
10794
10795    case MIPS_BUILTIN_CMP_UPPER:
10796    case MIPS_BUILTIN_CMP_LOWER:
10797      offset = GEN_INT (builtin_type == MIPS_BUILTIN_CMP_UPPER);
10798      condition = gen_single_cc (cmp_result, offset);
10799      return mips_builtin_branch_and_move (condition, target,
10800					   const1_rtx, const0_rtx);
10801
10802    default:
10803      condition = gen_rtx_NE (VOIDmode, cmp_result, const0_rtx);
10804      return mips_builtin_branch_and_move (condition, target,
10805					   const1_rtx, const0_rtx);
10806    }
10807}
10808
10809/* Expand a bposge builtin of type BUILTIN_TYPE.  TARGET, if nonnull,
10810   suggests a good place to put the boolean result.  */
10811
10812static rtx
10813mips_expand_builtin_bposge (enum mips_builtin_type builtin_type, rtx target)
10814{
10815  rtx condition, cmp_result;
10816  int cmp_value;
10817
10818  if (target == 0 || GET_MODE (target) != SImode)
10819    target = gen_reg_rtx (SImode);
10820
10821  cmp_result = gen_rtx_REG (CCDSPmode, CCDSP_PO_REGNUM);
10822
10823  if (builtin_type == MIPS_BUILTIN_BPOSGE32)
10824    cmp_value = 32;
10825  else
10826    gcc_assert (0);
10827
10828  condition = gen_rtx_GE (VOIDmode, cmp_result, GEN_INT (cmp_value));
10829  return mips_builtin_branch_and_move (condition, target,
10830				       const1_rtx, const0_rtx);
10831}
10832
10833/* Set SYMBOL_REF_FLAGS for the SYMBOL_REF inside RTL, which belongs to DECL.
10834   FIRST is true if this is the first time handling this decl.  */
10835
10836static void
10837mips_encode_section_info (tree decl, rtx rtl, int first)
10838{
10839  default_encode_section_info (decl, rtl, first);
10840
10841  if (TREE_CODE (decl) == FUNCTION_DECL
10842      && lookup_attribute ("long_call", TYPE_ATTRIBUTES (TREE_TYPE (decl))))
10843    {
10844      rtx symbol = XEXP (rtl, 0);
10845      SYMBOL_REF_FLAGS (symbol) |= SYMBOL_FLAG_LONG_CALL;
10846    }
10847}
10848
10849/* Implement TARGET_EXTRA_LIVE_ON_ENTRY.  PIC_FUNCTION_ADDR_REGNUM is live
10850   on entry to a function when generating -mshared abicalls code.  */
10851
10852static void
10853mips_extra_live_on_entry (bitmap regs)
10854{
10855  if (TARGET_ABICALLS && !TARGET_ABSOLUTE_ABICALLS)
10856    bitmap_set_bit (regs, PIC_FUNCTION_ADDR_REGNUM);
10857}
10858
10859/* SImode values are represented as sign-extended to DImode.  */
10860
10861int
10862mips_mode_rep_extended (enum machine_mode mode, enum machine_mode mode_rep)
10863{
10864  if (TARGET_64BIT && mode == SImode && mode_rep == DImode)
10865    return SIGN_EXTEND;
10866
10867  return UNKNOWN;
10868}
10869
10870#include "gt-mips.h"
10871