1;; Predicate definitions for IA-32 and x86-64.
2;; Copyright (C) 2004-2020 Free Software Foundation, Inc.
3;;
4;; This file is part of GCC.
5;;
6;; GCC is free software; you can redistribute it and/or modify
7;; it under the terms of the GNU General Public License as published by
8;; the Free Software Foundation; either version 3, or (at your option)
9;; any later version.
10;;
11;; GCC is distributed in the hope that it will be useful,
12;; but WITHOUT ANY WARRANTY; without even the implied warranty of
13;; MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the
14;; GNU General Public License for more details.
15;;
16;; You should have received a copy of the GNU General Public License
17;; along with GCC; see the file COPYING3.  If not see
18;; <http://www.gnu.org/licenses/>.
19
20;; Return true if OP is either a i387 or SSE fp register.
21(define_predicate "any_fp_register_operand"
22  (and (match_code "reg")
23       (match_test "ANY_FP_REGNO_P (REGNO (op))")))
24
25;; Return true if OP is an i387 fp register.
26(define_predicate "fp_register_operand"
27  (and (match_code "reg")
28       (match_test "STACK_REGNO_P (REGNO (op))")))
29
30;; True if the operand is a GENERAL class register.
31(define_predicate "general_reg_operand"
32  (and (match_code "reg")
33       (match_test "GENERAL_REGNO_P (REGNO (op))")))
34
35;; True if the operand is a nonimmediate operand with GENERAL class register.
36(define_predicate "nonimmediate_gr_operand"
37  (if_then_else (match_code "reg")
38    (match_test "GENERAL_REGNO_P (REGNO (op))")
39    (match_operand 0 "nonimmediate_operand")))
40
41;; True if the operand is a general operand with GENERAL class register.
42(define_predicate "general_gr_operand"
43  (if_then_else (match_code "reg")
44    (match_test "GENERAL_REGNO_P (REGNO (op))")
45    (match_operand 0 "general_operand")))
46
47;; True if the operand is an MMX register.
48(define_predicate "mmx_reg_operand"
49  (and (match_code "reg")
50       (match_test "MMX_REGNO_P (REGNO (op))")))
51
52;; Match register operands, but include memory operands for
53;; !TARGET_MMX_WITH_SSE.
54(define_predicate "register_mmxmem_operand"
55  (ior (match_operand 0 "register_operand")
56       (and (not (match_test "TARGET_MMX_WITH_SSE"))
57	    (match_operand 0 "memory_operand"))))
58
59;; True if the operand is an SSE register.
60(define_predicate "sse_reg_operand"
61  (and (match_code "reg")
62       (match_test "SSE_REGNO_P (REGNO (op))")))
63
64;; Return true if op is a QImode register.
65(define_predicate "any_QIreg_operand"
66  (and (match_code "reg")
67       (match_test "ANY_QI_REGNO_P (REGNO (op))")))
68
69;; Return true if op is one of QImode registers: %[abcd][hl].
70(define_predicate "QIreg_operand"
71  (and (match_code "reg")
72       (match_test "QI_REGNO_P (REGNO (op))")))
73
74;; Return true if op is a QImode register operand other than %[abcd][hl].
75(define_predicate "ext_QIreg_operand"
76  (and (match_test "TARGET_64BIT")
77       (match_code "reg")
78       (not (match_test "QI_REGNO_P (REGNO (op))"))))
79
80;; Return true if op is the AX register.
81(define_predicate "ax_reg_operand"
82  (and (match_code "reg")
83       (match_test "REGNO (op) == AX_REG")))
84
85;; Return true if op is the flags register.
86(define_predicate "flags_reg_operand"
87  (and (match_code "reg")
88       (match_test "REGNO (op) == FLAGS_REG")))
89
90;; Match a DI, SI or HImode register for a zero_extract.
91(define_special_predicate "ext_register_operand"
92  (and (match_operand 0 "register_operand")
93       (ior (and (match_test "TARGET_64BIT")
94		 (match_test "GET_MODE (op) == DImode"))
95	    (match_test "GET_MODE (op) == SImode")
96	    (match_test "GET_MODE (op) == HImode"))))
97
98;; Match a DI, SI, HI or QImode nonimmediate_operand.
99(define_special_predicate "int_nonimmediate_operand"
100  (and (match_operand 0 "nonimmediate_operand")
101       (ior (and (match_test "TARGET_64BIT")
102		 (match_test "GET_MODE (op) == DImode"))
103	    (match_test "GET_MODE (op) == SImode")
104	    (match_test "GET_MODE (op) == HImode")
105	    (match_test "GET_MODE (op) == QImode"))))
106
107;; Match register operands, but include memory operands for TARGET_SSE_MATH.
108(define_predicate "register_ssemem_operand"
109  (if_then_else
110    (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
111    (match_operand 0 "nonimmediate_operand")
112    (match_operand 0 "register_operand")))
113
114;; Match nonimmediate operands, but exclude memory operands
115;; for TARGET_SSE_MATH if TARGET_MIX_SSE_I387 is not enabled.
116(define_predicate "nonimm_ssenomem_operand"
117  (if_then_else
118    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
119	 (not (match_test "TARGET_MIX_SSE_I387")))
120    (match_operand 0 "register_operand")
121    (match_operand 0 "nonimmediate_operand")))
122
123;; The above predicate, suitable for x87 arithmetic operators.
124(define_predicate "x87nonimm_ssenomem_operand"
125  (if_then_else
126    (and (match_test "SSE_FLOAT_MODE_P (mode) && TARGET_SSE_MATH")
127	 (not (match_test "TARGET_MIX_SSE_I387 && X87_ENABLE_ARITH (mode)")))
128    (match_operand 0 "register_operand")
129    (match_operand 0 "nonimmediate_operand")))
130
131;; Match register operands, include memory operand for TARGET_SSE4_1.
132(define_predicate "register_sse4nonimm_operand"
133  (if_then_else (match_test "TARGET_SSE4_1")
134    (match_operand 0 "nonimmediate_operand")
135    (match_operand 0 "register_operand")))
136
137;; Return true if VALUE is symbol reference
138(define_predicate "symbol_operand"
139  (match_code "symbol_ref"))
140
141;; Return true if VALUE can be stored in a sign extended immediate field.
142(define_predicate "x86_64_immediate_operand"
143  (match_code "const_int,symbol_ref,label_ref,const")
144{
145  if (!TARGET_64BIT)
146    return immediate_operand (op, mode);
147
148  switch (GET_CODE (op))
149    {
150    case CONST_INT:
151      {
152        HOST_WIDE_INT val = INTVAL (op);
153        return trunc_int_for_mode (val, SImode) == val;
154      }
155    case SYMBOL_REF:
156      /* TLS symbols are not constant.  */
157      if (SYMBOL_REF_TLS_MODEL (op))
158	return false;
159
160      /* Load the external function address via the GOT slot.  */
161      if (ix86_force_load_from_GOT_p (op))
162	return false;
163
164      /* For certain code models, the symbolic references are known to fit.
165	 in CM_SMALL_PIC model we know it fits if it is local to the shared
166	 library.  Don't count TLS SYMBOL_REFs here, since they should fit
167	 only if inside of UNSPEC handled below.  */
168      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_KERNEL
169	      || (ix86_cmodel == CM_MEDIUM && !SYMBOL_REF_FAR_ADDR_P (op)));
170
171    case LABEL_REF:
172      /* For certain code models, the code is near as well.  */
173      return (ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM
174	      || ix86_cmodel == CM_KERNEL);
175
176    case CONST:
177      /* We also may accept the offsetted memory references in certain
178	 special cases.  */
179      if (GET_CODE (XEXP (op, 0)) == UNSPEC)
180	switch (XINT (XEXP (op, 0), 1))
181	  {
182	  case UNSPEC_GOTPCREL:
183	  case UNSPEC_DTPOFF:
184	  case UNSPEC_GOTNTPOFF:
185	  case UNSPEC_NTPOFF:
186	    return true;
187	  default:
188	    break;
189	  }
190
191      if (GET_CODE (XEXP (op, 0)) == PLUS)
192	{
193	  rtx op1 = XEXP (XEXP (op, 0), 0);
194	  rtx op2 = XEXP (XEXP (op, 0), 1);
195
196	  if (ix86_cmodel == CM_LARGE && GET_CODE (op1) != UNSPEC)
197	    return false;
198	  if (!CONST_INT_P (op2))
199	    return false;
200
201	  HOST_WIDE_INT offset = INTVAL (op2);
202	  if (trunc_int_for_mode (offset, SImode) != offset)
203	    return false;
204
205	  switch (GET_CODE (op1))
206	    {
207	    case SYMBOL_REF:
208	      /* TLS symbols are not constant.  */
209	      if (SYMBOL_REF_TLS_MODEL (op1))
210		return false;
211
212	      /* Load the external function address via the GOT slot.  */
213	      if (ix86_force_load_from_GOT_p (op1))
214	        return false;
215
216	      /* For CM_SMALL assume that latest object is 16MB before
217		 end of 31bits boundary.  We may also accept pretty
218		 large negative constants knowing that all objects are
219		 in the positive half of address space.  */
220	      if ((ix86_cmodel == CM_SMALL
221		   || (ix86_cmodel == CM_MEDIUM
222		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
223		  && offset < 16*1024*1024)
224		return true;
225	      /* For CM_KERNEL we know that all object resist in the
226		 negative half of 32bits address space.  We may not
227		 accept negative offsets, since they may be just off
228		 and we may accept pretty large positive ones.  */
229	      if (ix86_cmodel == CM_KERNEL
230		  && offset > 0)
231		return true;
232	      break;
233
234	    case LABEL_REF:
235	      /* These conditions are similar to SYMBOL_REF ones, just the
236		 constraints for code models differ.  */
237	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
238		  && offset < 16*1024*1024)
239		return true;
240	      if (ix86_cmodel == CM_KERNEL
241		  && offset > 0)
242		return true;
243	      break;
244
245	    case UNSPEC:
246	      switch (XINT (op1, 1))
247		{
248		case UNSPEC_DTPOFF:
249		case UNSPEC_NTPOFF:
250		  return true;
251		}
252	      break;
253
254	    default:
255	      break;
256	    }
257	}
258      break;
259
260      default:
261	gcc_unreachable ();
262    }
263
264  return false;
265})
266
267;; Return true if VALUE can be stored in the zero extended immediate field.
268(define_predicate "x86_64_zext_immediate_operand"
269  (match_code "const_int,symbol_ref,label_ref,const")
270{
271  switch (GET_CODE (op))
272    {
273    case CONST_INT:
274      return !(INTVAL (op) & ~(HOST_WIDE_INT) 0xffffffff);
275
276    case SYMBOL_REF:
277      /* TLS symbols are not constant.  */
278      if (SYMBOL_REF_TLS_MODEL (op))
279	return false;
280
281      /* Load the external function address via the GOT slot.  */
282      if (ix86_force_load_from_GOT_p (op))
283	return false;
284
285     /* For certain code models, the symbolic references are known to fit.  */
286      return (ix86_cmodel == CM_SMALL
287	      || (ix86_cmodel == CM_MEDIUM
288		  && !SYMBOL_REF_FAR_ADDR_P (op)));
289
290    case LABEL_REF:
291      /* For certain code models, the code is near as well.  */
292      return ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM;
293
294    case CONST:
295      /* We also may accept the offsetted memory references in certain
296	 special cases.  */
297      if (GET_CODE (XEXP (op, 0)) == PLUS)
298	{
299	  rtx op1 = XEXP (XEXP (op, 0), 0);
300	  rtx op2 = XEXP (XEXP (op, 0), 1);
301
302	  if (ix86_cmodel == CM_LARGE)
303	    return false;
304	  if (!CONST_INT_P (op2))
305	    return false;
306
307	  HOST_WIDE_INT offset = INTVAL (op2);
308	  if (trunc_int_for_mode (offset, SImode) != offset)
309	    return false;
310
311	  switch (GET_CODE (op1))
312	    {
313	    case SYMBOL_REF:
314	      /* TLS symbols are not constant.  */
315	      if (SYMBOL_REF_TLS_MODEL (op1))
316		return false;
317
318	      /* Load the external function address via the GOT slot.  */
319	      if (ix86_force_load_from_GOT_p (op1))
320	        return false;
321
322	      /* For small code model we may accept pretty large positive
323		 offsets, since one bit is available for free.  Negative
324		 offsets are limited by the size of NULL pointer area
325		 specified by the ABI.  */
326	      if ((ix86_cmodel == CM_SMALL
327		   || (ix86_cmodel == CM_MEDIUM
328		       && !SYMBOL_REF_FAR_ADDR_P (op1)))
329		  && offset > -0x10000)
330		return true;
331	      /* ??? For the kernel, we may accept adjustment of
332		 -0x10000000, since we know that it will just convert
333		 negative address space to positive, but perhaps this
334		 is not worthwhile.  */
335	      break;
336
337	    case LABEL_REF:
338	      /* These conditions are similar to SYMBOL_REF ones, just the
339		 constraints for code models differ.  */
340	      if ((ix86_cmodel == CM_SMALL || ix86_cmodel == CM_MEDIUM)
341		  && offset > -0x10000)
342		return true;
343	      break;
344
345	    default:
346	      return false;
347	    }
348	}
349      break;
350
351    default:
352      gcc_unreachable ();
353    }
354  return false;
355})
356
357;; Return true if VALUE is a constant integer whose low and high words satisfy
358;; x86_64_immediate_operand.
359(define_predicate "x86_64_hilo_int_operand"
360  (match_code "const_int,const_wide_int")
361{
362  switch (GET_CODE (op))
363    {
364    case CONST_INT:
365      return x86_64_immediate_operand (op, mode);
366
367    case CONST_WIDE_INT:
368      gcc_assert (CONST_WIDE_INT_NUNITS (op) == 2);
369      return (x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op, 0)),
370					DImode)
371	      && x86_64_immediate_operand (GEN_INT (CONST_WIDE_INT_ELT (op,
372									1)),
373					   DImode));
374
375    default:
376      gcc_unreachable ();
377    }
378})
379
380;; Return true if VALUE is a constant integer whose value is
381;; x86_64_immediate_operand value zero extended from word mode to mode.
382(define_predicate "x86_64_dwzext_immediate_operand"
383  (match_code "const_int,const_wide_int")
384{
385  switch (GET_CODE (op))
386    {
387    case CONST_INT:
388      if (!TARGET_64BIT)
389	return UINTVAL (op) <= HOST_WIDE_INT_UC (0xffffffff);
390      return UINTVAL (op) <= HOST_WIDE_INT_UC (0x7fffffff);
391
392    case CONST_WIDE_INT:
393      if (!TARGET_64BIT)
394	return false;
395      return (CONST_WIDE_INT_NUNITS (op) == 2
396	      && CONST_WIDE_INT_ELT (op, 1) == 0
397	      && (trunc_int_for_mode (CONST_WIDE_INT_ELT (op, 0), SImode)
398		  == (HOST_WIDE_INT) CONST_WIDE_INT_ELT (op, 0)));
399
400    default:
401      gcc_unreachable ();
402    }
403})
404
405;; Return true if size of VALUE can be stored in a sign
406;; extended immediate field.
407(define_predicate "x86_64_immediate_size_operand"
408  (and (match_code "symbol_ref")
409       (ior (not (match_test "TARGET_64BIT"))
410	    (match_test "ix86_cmodel == CM_SMALL")
411	    (match_test "ix86_cmodel == CM_KERNEL"))))
412
413;; Return true if OP is general operand representable on x86_64.
414(define_predicate "x86_64_general_operand"
415  (if_then_else (match_test "TARGET_64BIT")
416    (ior (match_operand 0 "nonimmediate_operand")
417	 (match_operand 0 "x86_64_immediate_operand"))
418    (match_operand 0 "general_operand")))
419
420;; Return true if OP's both words are general operands representable
421;; on x86_64.
422(define_predicate "x86_64_hilo_general_operand"
423  (if_then_else (match_test "TARGET_64BIT")
424    (ior (match_operand 0 "nonimmediate_operand")
425	 (match_operand 0 "x86_64_hilo_int_operand"))
426    (match_operand 0 "general_operand")))
427
428;; Return true if OP is non-VOIDmode general operand representable
429;; on x86_64.  This predicate is used in sign-extending conversion
430;; operations that require non-VOIDmode immediate operands.
431(define_predicate "x86_64_sext_operand"
432  (and (match_test "GET_MODE (op) != VOIDmode")
433       (match_operand 0 "x86_64_general_operand")))
434
435;; Return true if OP is non-VOIDmode general operand.  This predicate
436;; is used in sign-extending conversion operations that require
437;; non-VOIDmode immediate operands.
438(define_predicate "sext_operand"
439  (and (match_test "GET_MODE (op) != VOIDmode")
440       (match_operand 0 "general_operand")))
441
442;; Return true if OP is representable on x86_64 as zero-extended operand.
443;; This predicate is used in zero-extending conversion operations that
444;; require non-VOIDmode immediate operands.
445(define_predicate "x86_64_zext_operand"
446  (if_then_else (match_test "TARGET_64BIT")
447    (ior (match_operand 0 "nonimmediate_operand")
448	 (and (match_operand 0 "x86_64_zext_immediate_operand")
449	      (match_test "GET_MODE (op) != VOIDmode")))
450    (match_operand 0 "nonimmediate_operand")))
451
452;; Return true if OP is general operand representable on x86_64
453;; as either sign extended or zero extended constant.
454(define_predicate "x86_64_szext_general_operand"
455  (if_then_else (match_test "TARGET_64BIT")
456    (ior (match_operand 0 "nonimmediate_operand")
457	 (match_operand 0 "x86_64_immediate_operand")
458	 (match_operand 0 "x86_64_zext_immediate_operand"))
459    (match_operand 0 "general_operand")))
460
461;; Return true if OP is nonmemory operand representable on x86_64.
462(define_predicate "x86_64_nonmemory_operand"
463  (if_then_else (match_test "TARGET_64BIT")
464    (ior (match_operand 0 "register_operand")
465	 (match_operand 0 "x86_64_immediate_operand"))
466    (match_operand 0 "nonmemory_operand")))
467
468;; Return true if OP is nonmemory operand representable on x86_64.
469(define_predicate "x86_64_szext_nonmemory_operand"
470  (if_then_else (match_test "TARGET_64BIT")
471    (ior (match_operand 0 "register_operand")
472	 (match_operand 0 "x86_64_immediate_operand")
473	 (match_operand 0 "x86_64_zext_immediate_operand"))
474    (match_operand 0 "nonmemory_operand")))
475
476;; Return true when operand is PIC expression that can be computed by lea
477;; operation.
478(define_predicate "pic_32bit_operand"
479  (match_code "const,symbol_ref,label_ref")
480{
481  if (!flag_pic)
482    return false;
483
484  /* Rule out relocations that translate into 64bit constants.  */
485  if (TARGET_64BIT && GET_CODE (op) == CONST)
486    {
487      op = XEXP (op, 0);
488      if (GET_CODE (op) == PLUS && CONST_INT_P (XEXP (op, 1)))
489	op = XEXP (op, 0);
490      if (GET_CODE (op) == UNSPEC
491	  && (XINT (op, 1) == UNSPEC_GOTOFF
492	      || XINT (op, 1) == UNSPEC_GOT))
493	return false;
494    }
495
496  return symbolic_operand (op, mode);
497})
498
499;; Return true if OP is nonmemory operand acceptable by movabs patterns.
500(define_predicate "x86_64_movabs_operand"
501  (and (match_operand 0 "nonmemory_operand")
502       (not (match_operand 0 "pic_32bit_operand"))))
503
504;; Return true if OP is either a symbol reference or a sum of a symbol
505;; reference and a constant.
506(define_predicate "symbolic_operand"
507  (match_code "symbol_ref,label_ref,const")
508{
509  switch (GET_CODE (op))
510    {
511    case SYMBOL_REF:
512    case LABEL_REF:
513      return true;
514
515    case CONST:
516      op = XEXP (op, 0);
517      if (GET_CODE (op) == SYMBOL_REF
518	  || GET_CODE (op) == LABEL_REF
519	  || (GET_CODE (op) == UNSPEC
520	      && (XINT (op, 1) == UNSPEC_GOT
521		  || XINT (op, 1) == UNSPEC_GOTOFF
522		  || XINT (op, 1) == UNSPEC_PCREL
523		  || XINT (op, 1) == UNSPEC_GOTPCREL)))
524	return true;
525      if (GET_CODE (op) != PLUS
526	  || !CONST_INT_P (XEXP (op, 1)))
527	return false;
528
529      op = XEXP (op, 0);
530      if (GET_CODE (op) == SYMBOL_REF
531	  || GET_CODE (op) == LABEL_REF)
532	return true;
533      /* Only @GOTOFF gets offsets.  */
534      if (GET_CODE (op) != UNSPEC
535	  || XINT (op, 1) != UNSPEC_GOTOFF)
536	return false;
537
538      op = XVECEXP (op, 0, 0);
539      if (GET_CODE (op) == SYMBOL_REF
540	  || GET_CODE (op) == LABEL_REF)
541	return true;
542      return false;
543
544    default:
545      gcc_unreachable ();
546    }
547})
548
549;; Return true if OP is a symbolic operand that resolves locally.
550(define_predicate "local_symbolic_operand"
551  (match_code "const,label_ref,symbol_ref")
552{
553  if (GET_CODE (op) == CONST
554      && GET_CODE (XEXP (op, 0)) == PLUS
555      && CONST_INT_P (XEXP (XEXP (op, 0), 1)))
556    op = XEXP (XEXP (op, 0), 0);
557
558  if (GET_CODE (op) == LABEL_REF)
559    return true;
560
561  if (GET_CODE (op) != SYMBOL_REF)
562    return false;
563
564  if (SYMBOL_REF_TLS_MODEL (op))
565    return false;
566
567  /* Dll-imported symbols are always external.  */
568  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
569    return false;
570  if (SYMBOL_REF_LOCAL_P (op))
571    return true;
572
573  /* There is, however, a not insubstantial body of code in the rest of
574     the compiler that assumes it can just stick the results of
575     ASM_GENERATE_INTERNAL_LABEL in a symbol_ref and have done.  */
576  /* ??? This is a hack.  Should update the body of the compiler to
577     always create a DECL an invoke targetm.encode_section_info.  */
578  if (strncmp (XSTR (op, 0), internal_label_prefix,
579	       internal_label_prefix_len) == 0)
580    return true;
581
582  return false;
583})
584
585;; Test for a legitimate @GOTOFF operand.
586;;
587;; VxWorks does not impose a fixed gap between segments; the run-time
588;; gap can be different from the object-file gap.  We therefore can't
589;; use @GOTOFF unless we are absolutely sure that the symbol is in the
590;; same segment as the GOT.  Unfortunately, the flexibility of linker
591;; scripts means that we can't be sure of that in general, so assume
592;; that @GOTOFF is never valid on VxWorks.
593(define_predicate "gotoff_operand"
594  (and (not (match_test "TARGET_VXWORKS_RTP"))
595       (match_operand 0 "local_symbolic_operand")))
596
597;; Test for various thread-local symbols.
598(define_special_predicate "tls_symbolic_operand"
599  (and (match_code "symbol_ref")
600       (match_test "SYMBOL_REF_TLS_MODEL (op)")))
601
602(define_special_predicate "tls_modbase_operand"
603  (and (match_code "symbol_ref")
604       (match_test "op == ix86_tls_module_base ()")))
605
606(define_predicate "tls_address_pattern"
607  (and (match_code "set,parallel,unspec,unspec_volatile")
608       (match_test "ix86_tls_address_pattern_p (op)")))
609
610;; Test for a pc-relative call operand
611(define_predicate "constant_call_address_operand"
612  (match_code "symbol_ref")
613{
614  if (ix86_cmodel == CM_LARGE || ix86_cmodel == CM_LARGE_PIC
615      || flag_force_indirect_call)
616    return false;
617  if (TARGET_DLLIMPORT_DECL_ATTRIBUTES && SYMBOL_REF_DLLIMPORT_P (op))
618    return false;
619  return true;
620})
621
622;; P6 processors will jump to the address after the decrement when %esp
623;; is used as a call operand, so they will execute return address as a code.
624;; See Pentium Pro errata 70, Pentium 2 errata A33 and Pentium 3 errata E17.
625
626(define_predicate "call_register_no_elim_operand"
627  (match_operand 0 "register_operand")
628{
629  if (SUBREG_P (op))
630    op = SUBREG_REG (op);
631
632  if (!TARGET_64BIT && op == stack_pointer_rtx)
633    return false;
634
635  return register_no_elim_operand (op, mode);
636})
637
638;; True for any non-virtual or eliminable register.  Used in places where
639;; instantiation of such a register may cause the pattern to not be recognized.
640(define_predicate "register_no_elim_operand"
641  (match_operand 0 "register_operand")
642{
643  if (SUBREG_P (op))
644    op = SUBREG_REG (op);
645  return !(op == arg_pointer_rtx
646	   || op == frame_pointer_rtx
647	   || IN_RANGE (REGNO (op),
648			FIRST_PSEUDO_REGISTER, LAST_VIRTUAL_REGISTER));
649})
650
651;; Similarly, but include the stack pointer.  This is used to prevent esp
652;; from being used as an index reg.
653(define_predicate "index_register_operand"
654  (match_operand 0 "register_operand")
655{
656  if (SUBREG_P (op))
657    op = SUBREG_REG (op);
658  if (reload_completed)
659    return REG_OK_FOR_INDEX_STRICT_P (op);
660  else
661    return REG_OK_FOR_INDEX_NONSTRICT_P (op);
662})
663
664;; Return false if this is any eliminable register.  Otherwise general_operand.
665(define_predicate "general_no_elim_operand"
666  (if_then_else (match_code "reg,subreg")
667    (match_operand 0 "register_no_elim_operand")
668    (match_operand 0 "general_operand")))
669
670;; Return false if this is any eliminable register.  Otherwise
671;; register_operand or a constant.
672(define_predicate "nonmemory_no_elim_operand"
673  (ior (match_operand 0 "register_no_elim_operand")
674       (match_operand 0 "immediate_operand")))
675
676;; Test for a valid operand for indirect branch.
677(define_predicate "indirect_branch_operand"
678  (ior (match_operand 0 "register_operand")
679       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
680	    (not (match_test "TARGET_X32"))
681	    (match_operand 0 "memory_operand"))))
682
683;; Return true if OP is a memory operands that can be used in sibcalls.
684;; Since sibcall never returns, we can only use call-clobbered register
685;; as GOT base.  Allow GOT slot here only with pseudo register as GOT
686;; base.  Properly handle sibcall over GOT slot with *sibcall_GOT_32
687;; and *sibcall_value_GOT_32 patterns.
688(define_predicate "sibcall_memory_operand"
689  (match_operand 0 "memory_operand")
690{
691  op = XEXP (op, 0);
692  if (CONSTANT_P (op))
693    return true;
694  if (GET_CODE (op) == PLUS && REG_P (XEXP (op, 0)))
695    {
696      int regno = REGNO (XEXP (op, 0));
697      if (!HARD_REGISTER_NUM_P (regno) || call_used_or_fixed_reg_p (regno))
698	{
699	  op = XEXP (op, 1);
700	  if (GOT32_symbol_operand (op, VOIDmode))
701	    return true;
702	}
703    }
704  return false;
705})
706
707;; Return true if OP is a GOT memory operand.
708(define_predicate "GOT_memory_operand"
709  (match_operand 0 "memory_operand")
710{
711  op = XEXP (op, 0);
712  return (GET_CODE (op) == CONST
713	  && GET_CODE (XEXP (op, 0)) == UNSPEC
714	  && XINT (XEXP (op, 0), 1) == UNSPEC_GOTPCREL);
715})
716
717;; Test for a valid operand for a call instruction.
718;; Allow constant call address operands in Pmode only.
719(define_special_predicate "call_insn_operand"
720  (ior (match_test "constant_call_address_operand
721		     (op, mode == VOIDmode ? mode : Pmode)")
722       (match_operand 0 "call_register_no_elim_operand")
723       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
724	    (ior (and (not (match_test "TARGET_X32"))
725		      (match_operand 0 "memory_operand"))
726		 (and (match_test "TARGET_X32 && Pmode == DImode")
727		      (match_operand 0 "GOT_memory_operand"))))))
728
729;; Similarly, but for tail calls, in which we cannot allow memory references.
730(define_special_predicate "sibcall_insn_operand"
731  (ior (match_test "constant_call_address_operand
732		     (op, mode == VOIDmode ? mode : Pmode)")
733       (match_operand 0 "register_no_elim_operand")
734       (and (not (match_test "TARGET_INDIRECT_BRANCH_REGISTER"))
735	    (ior (and (not (match_test "TARGET_X32"))
736		      (match_operand 0 "sibcall_memory_operand"))
737		 (and (match_test "TARGET_X32 && Pmode == DImode")
738		      (match_operand 0 "GOT_memory_operand"))))))
739
740;; Return true if OP is a 32-bit GOT symbol operand.
741(define_predicate "GOT32_symbol_operand"
742  (match_test "GET_CODE (op) == CONST
743               && GET_CODE (XEXP (op, 0)) == UNSPEC
744               && XINT (XEXP (op, 0), 1) == UNSPEC_GOT"))
745
746;; Match exactly zero.
747(define_predicate "const0_operand"
748  (match_code "const_int,const_double,const_vector")
749{
750  if (mode == VOIDmode)
751    mode = GET_MODE (op);
752  return op == CONST0_RTX (mode);
753})
754
755;; Match one or a vector with all elements equal to one.
756(define_predicate "const1_operand"
757  (match_code "const_int,const_double,const_vector")
758{
759  if (mode == VOIDmode)
760    mode = GET_MODE (op);
761  return op == CONST1_RTX (mode);
762})
763
764;; Match exactly -1.
765(define_predicate "constm1_operand"
766  (and (match_code "const_int")
767       (match_test "op == constm1_rtx")))
768
769;; Match exactly eight.
770(define_predicate "const8_operand"
771  (and (match_code "const_int")
772       (match_test "INTVAL (op) == 8")))
773
774;; Match exactly 128.
775(define_predicate "const128_operand"
776  (and (match_code "const_int")
777       (match_test "INTVAL (op) == 128")))
778
779;; Match exactly 0x0FFFFFFFF in anddi as a zero-extension operation
780(define_predicate "const_32bit_mask"
781  (and (match_code "const_int")
782       (match_test "trunc_int_for_mode (INTVAL (op), DImode)
783		    == (HOST_WIDE_INT) 0xffffffff")))
784
785;; Match 2, 4, or 8.  Used for leal multiplicands.
786(define_predicate "const248_operand"
787  (match_code "const_int")
788{
789  HOST_WIDE_INT i = INTVAL (op);
790  return i == 2 || i == 4 || i == 8;
791})
792
793;; Match 1, 2, or 3.  Used for lea shift amounts.
794(define_predicate "const123_operand"
795  (match_code "const_int")
796{
797  HOST_WIDE_INT i = INTVAL (op);
798  return i == 1 || i == 2 || i == 3;
799})
800
801;; Match 2, 3, 6, or 7
802(define_predicate "const2367_operand"
803  (match_code "const_int")
804{
805  HOST_WIDE_INT i = INTVAL (op);
806  return i == 2 || i == 3 || i == 6 || i == 7;
807})
808
809;; Match 1, 2, 4, or 8
810(define_predicate "const1248_operand"
811  (match_code "const_int")
812{
813  HOST_WIDE_INT i = INTVAL (op);
814  return i == 1 || i == 2 || i == 4 || i == 8;
815})
816
817;; Match 3, 5, or 9.  Used for leal multiplicands.
818(define_predicate "const359_operand"
819  (match_code "const_int")
820{
821  HOST_WIDE_INT i = INTVAL (op);
822  return i == 3 || i == 5 || i == 9;
823})
824
825;; Match 4 or 8 to 11.  Used for embeded rounding.
826(define_predicate "const_4_or_8_to_11_operand"
827  (match_code "const_int")
828{
829  HOST_WIDE_INT i = INTVAL (op);
830  return i == 4 || (i >= 8 && i <= 11);
831})
832
833;; Match 4 or 8. Used for SAE.
834(define_predicate "const48_operand"
835  (match_code "const_int")
836{
837  HOST_WIDE_INT i = INTVAL (op);
838  return i == 4 || i == 8;
839})
840
841;; Match 0 or 1.
842(define_predicate "const_0_to_1_operand"
843  (and (match_code "const_int")
844       (ior (match_test "op == const0_rtx")
845	    (match_test "op == const1_rtx"))))
846
847;; Match 0 to 3.
848(define_predicate "const_0_to_3_operand"
849  (and (match_code "const_int")
850       (match_test "IN_RANGE (INTVAL (op), 0, 3)")))
851
852;; Match 0 to 4.
853(define_predicate "const_0_to_4_operand"
854  (and (match_code "const_int")
855       (match_test "IN_RANGE (INTVAL (op), 0, 4)")))
856
857;; Match 0 to 5.
858(define_predicate "const_0_to_5_operand"
859  (and (match_code "const_int")
860       (match_test "IN_RANGE (INTVAL (op), 0, 5)")))
861
862;; Match 0 to 7.
863(define_predicate "const_0_to_7_operand"
864  (and (match_code "const_int")
865       (match_test "IN_RANGE (INTVAL (op), 0, 7)")))
866
867;; Match 0 to 15.
868(define_predicate "const_0_to_15_operand"
869  (and (match_code "const_int")
870       (match_test "IN_RANGE (INTVAL (op), 0, 15)")))
871
872;; Match 0 to 31.
873(define_predicate "const_0_to_31_operand"
874  (and (match_code "const_int")
875       (match_test "IN_RANGE (INTVAL (op), 0, 31)")))
876
877;; Match 0 to 63.
878(define_predicate "const_0_to_63_operand"
879  (and (match_code "const_int")
880       (match_test "IN_RANGE (INTVAL (op), 0, 63)")))
881
882;; Match 0 to 255.
883(define_predicate "const_0_to_255_operand"
884  (and (match_code "const_int")
885       (match_test "IN_RANGE (INTVAL (op), 0, 255)")))
886
887;; Match (0 to 255) * 8
888(define_predicate "const_0_to_255_mul_8_operand"
889  (match_code "const_int")
890{
891  unsigned HOST_WIDE_INT val = INTVAL (op);
892  return val <= 255*8 && val % 8 == 0;
893})
894
895;; Return true if OP is CONST_INT >= 1 and <= 31 (a valid operand
896;; for shift & compare patterns, as shifting by 0 does not change flags).
897(define_predicate "const_1_to_31_operand"
898  (and (match_code "const_int")
899       (match_test "IN_RANGE (INTVAL (op), 1, 31)")))
900
901;; Return true if OP is CONST_INT >= 1 and <= 63 (a valid operand
902;; for 64bit shift & compare patterns, as shifting by 0 does not change flags).
903(define_predicate "const_1_to_63_operand"
904  (and (match_code "const_int")
905       (match_test "IN_RANGE (INTVAL (op), 1, 63)")))
906
907;; Match 2 or 3.
908(define_predicate "const_2_to_3_operand"
909  (and (match_code "const_int")
910       (match_test "IN_RANGE (INTVAL (op), 2, 3)")))
911
912;; Match 4 to 5.
913(define_predicate "const_4_to_5_operand"
914  (and (match_code "const_int")
915       (match_test "IN_RANGE (INTVAL (op), 4, 5)")))
916
917;; Match 4 to 7.
918(define_predicate "const_4_to_7_operand"
919  (and (match_code "const_int")
920       (match_test "IN_RANGE (INTVAL (op), 4, 7)")))
921
922;; Match 6 to 7.
923(define_predicate "const_6_to_7_operand"
924  (and (match_code "const_int")
925       (match_test "IN_RANGE (INTVAL (op), 6, 7)")))
926
927;; Match 8 to 9.
928(define_predicate "const_8_to_9_operand"
929  (and (match_code "const_int")
930       (match_test "IN_RANGE (INTVAL (op), 8, 9)")))
931
932;; Match 8 to 11.
933(define_predicate "const_8_to_11_operand"
934  (and (match_code "const_int")
935       (match_test "IN_RANGE (INTVAL (op), 8, 11)")))
936
937;; Match 8 to 15.
938(define_predicate "const_8_to_15_operand"
939  (and (match_code "const_int")
940       (match_test "IN_RANGE (INTVAL (op), 8, 15)")))
941
942;; Match 10 to 11.
943(define_predicate "const_10_to_11_operand"
944  (and (match_code "const_int")
945       (match_test "IN_RANGE (INTVAL (op), 10, 11)")))
946
947;; Match 12 to 13.
948(define_predicate "const_12_to_13_operand"
949  (and (match_code "const_int")
950       (match_test "IN_RANGE (INTVAL (op), 12, 13)")))
951
952;; Match 12 to 15.
953(define_predicate "const_12_to_15_operand"
954  (and (match_code "const_int")
955       (match_test "IN_RANGE (INTVAL (op), 12, 15)")))
956
957;; Match 14 to 15.
958(define_predicate "const_14_to_15_operand"
959  (and (match_code "const_int")
960       (match_test "IN_RANGE (INTVAL (op), 14, 15)")))
961
962;; Match 16 to 19.
963(define_predicate "const_16_to_19_operand"
964  (and (match_code "const_int")
965       (match_test "IN_RANGE (INTVAL (op), 16, 19)")))
966
967;; Match 16 to 31.
968(define_predicate "const_16_to_31_operand"
969  (and (match_code "const_int")
970       (match_test "IN_RANGE (INTVAL (op), 16, 31)")))
971
972;; Match 20 to 23.
973(define_predicate "const_20_to_23_operand"
974  (and (match_code "const_int")
975       (match_test "IN_RANGE (INTVAL (op), 20, 23)")))
976
977;; Match 24 to 27.
978(define_predicate "const_24_to_27_operand"
979  (and (match_code "const_int")
980       (match_test "IN_RANGE (INTVAL (op), 24, 27)")))
981
982;; Match 28 to 31.
983(define_predicate "const_28_to_31_operand"
984  (and (match_code "const_int")
985       (match_test "IN_RANGE (INTVAL (op), 28, 31)")))
986
987;; True if this is a constant appropriate for an increment or decrement.
988(define_predicate "incdec_operand"
989  (match_code "const_int")
990{
991  /* On Pentium4, the inc and dec operations causes extra dependency on flag
992     registers, since carry flag is not set.  */
993  if (!TARGET_USE_INCDEC && !optimize_insn_for_size_p ())
994    return false;
995  return op == const1_rtx || op == constm1_rtx;
996})
997
998;; True for registers, or 1 or -1.  Used to optimize double-word shifts.
999(define_predicate "reg_or_pm1_operand"
1000  (ior (match_operand 0 "register_operand")
1001       (and (match_code "const_int")
1002	    (ior (match_test "op == const1_rtx")
1003		 (match_test "op == constm1_rtx")))))
1004
1005;; True if OP is acceptable as operand of DImode shift expander.
1006(define_predicate "shiftdi_operand"
1007  (if_then_else (match_test "TARGET_64BIT")
1008    (match_operand 0 "nonimmediate_operand")
1009    (match_operand 0 "register_operand")))
1010
1011(define_predicate "ashldi_input_operand"
1012  (if_then_else (match_test "TARGET_64BIT")
1013    (match_operand 0 "nonimmediate_operand")
1014    (match_operand 0 "reg_or_pm1_operand")))
1015
1016;; Return true if OP is a vector load from the constant pool with just
1017;; the first element nonzero.
1018(define_predicate "zero_extended_scalar_load_operand"
1019  (match_code "mem")
1020{
1021  unsigned n_elts;
1022  op = avoid_constant_pool_reference (op);
1023
1024  if (GET_CODE (op) != CONST_VECTOR)
1025    return false;
1026
1027  n_elts = CONST_VECTOR_NUNITS (op);
1028
1029  for (n_elts--; n_elts > 0; n_elts--)
1030    {
1031      rtx elt = CONST_VECTOR_ELT (op, n_elts);
1032      if (elt != CONST0_RTX (GET_MODE_INNER (GET_MODE (op))))
1033	return false;
1034    }
1035  return true;
1036})
1037
1038/* Return true if operand is a vector constant that is all ones. */
1039(define_predicate "vector_all_ones_operand"
1040  (and (match_code "const_vector")
1041       (match_test "INTEGRAL_MODE_P (GET_MODE (op))")
1042       (match_test "op == CONSTM1_RTX (GET_MODE (op))")))
1043
1044; Return true when OP is operand acceptable for vector memory operand.
1045; Only AVX can have misaligned memory operand.
1046(define_predicate "vector_memory_operand"
1047  (and (match_operand 0 "memory_operand")
1048       (ior (match_test "TARGET_AVX")
1049	    (match_test "MEM_ALIGN (op) >= GET_MODE_ALIGNMENT (mode)"))))
1050
1051; Return true when OP is register_operand or vector_memory_operand.
1052(define_predicate "vector_operand"
1053  (ior (match_operand 0 "register_operand")
1054       (match_operand 0 "vector_memory_operand")))
1055
1056;; Return true when OP is either nonimmediate operand, or any
1057;; CONST_VECTOR.
1058(define_predicate "nonimmediate_or_const_vector_operand"
1059  (ior (match_operand 0 "nonimmediate_operand")
1060       (match_code "const_vector")))
1061
1062;; Return true when OP is nonimmediate or standard SSE constant.
1063(define_predicate "nonimmediate_or_sse_const_operand"
1064  (ior (match_operand 0 "nonimmediate_operand")
1065       (match_test "standard_sse_constant_p (op, mode)")))
1066
1067;; Return true if OP is a register or a zero.
1068(define_predicate "reg_or_0_operand"
1069  (ior (match_operand 0 "register_operand")
1070       (match_operand 0 "const0_operand")))
1071
1072; Return true when OP is a nonimmediate or zero.
1073(define_predicate "nonimm_or_0_operand"
1074  (ior (match_operand 0 "nonimmediate_operand")
1075       (match_operand 0 "const0_operand")))
1076
1077(define_predicate "norex_memory_operand"
1078  (and (match_operand 0 "memory_operand")
1079       (not (match_test "x86_extended_reg_mentioned_p (op)"))))
1080
1081;; Return true for RTX codes that force SImode address.
1082(define_predicate "SImode_address_operand"
1083  (match_code "subreg,zero_extend,and"))
1084
1085;; Return true if op is a valid address for LEA, and does not contain
1086;; a segment override.  Defined as a special predicate to allow
1087;; mode-less const_int operands pass to address_operand.
1088(define_special_predicate "address_no_seg_operand"
1089  (match_test "address_operand (op, VOIDmode)")
1090{
1091  struct ix86_address parts;
1092  int ok;
1093
1094  if (!CONST_INT_P (op)
1095      && mode != VOIDmode
1096      && GET_MODE (op) != mode)
1097    return false;
1098
1099  ok = ix86_decompose_address (op, &parts);
1100  gcc_assert (ok);
1101  return parts.seg == ADDR_SPACE_GENERIC;
1102})
1103
1104;; Return true if op if a valid base register, displacement or
1105;; sum of base register and displacement for VSIB addressing.
1106(define_predicate "vsib_address_operand"
1107  (match_test "address_operand (op, VOIDmode)")
1108{
1109  struct ix86_address parts;
1110  int ok;
1111  rtx disp;
1112
1113  ok = ix86_decompose_address (op, &parts);
1114  gcc_assert (ok);
1115  if (parts.index || parts.seg != ADDR_SPACE_GENERIC)
1116    return false;
1117
1118  /* VSIB addressing doesn't support (%rip).  */
1119  if (parts.disp)
1120    {
1121      disp = parts.disp;
1122      if (GET_CODE (disp) == CONST)
1123	{
1124	  disp = XEXP (disp, 0);
1125	  if (GET_CODE (disp) == PLUS)
1126	    disp = XEXP (disp, 0);
1127	  if (GET_CODE (disp) == UNSPEC)
1128	    switch (XINT (disp, 1))
1129	      {
1130	      case UNSPEC_GOTPCREL:
1131	      case UNSPEC_PCREL:
1132	      case UNSPEC_GOTNTPOFF:
1133		return false;
1134	      }
1135	}
1136      if (TARGET_64BIT
1137	  && flag_pic
1138	  && (GET_CODE (disp) == SYMBOL_REF
1139	      || GET_CODE (disp) == LABEL_REF))
1140	return false;
1141    }
1142
1143  return true;
1144})
1145
1146(define_predicate "vsib_mem_operator"
1147  (match_code "mem"))
1148
1149;; Return true if the rtx is known to be at least 32 bits aligned.
1150(define_predicate "aligned_operand"
1151  (match_operand 0 "general_operand")
1152{
1153  struct ix86_address parts;
1154  int ok;
1155
1156  /* Registers and immediate operands are always "aligned".  */
1157  if (!MEM_P (op))
1158    return true;
1159
1160  /* All patterns using aligned_operand on memory operands ends up
1161     in promoting memory operand to 64bit and thus causing memory mismatch.  */
1162  if (TARGET_MEMORY_MISMATCH_STALL && !optimize_insn_for_size_p ())
1163    return false;
1164
1165  /* Don't even try to do any aligned optimizations with volatiles.  */
1166  if (MEM_VOLATILE_P (op))
1167    return false;
1168
1169  if (MEM_ALIGN (op) >= 32)
1170    return true;
1171
1172  op = XEXP (op, 0);
1173
1174  /* Pushes and pops are only valid on the stack pointer.  */
1175  if (GET_CODE (op) == PRE_DEC
1176      || GET_CODE (op) == POST_INC)
1177    return true;
1178
1179  /* Decode the address.  */
1180  ok = ix86_decompose_address (op, &parts);
1181  gcc_assert (ok);
1182
1183  if (parts.base && SUBREG_P (parts.base))
1184    parts.base = SUBREG_REG (parts.base);
1185  if (parts.index && SUBREG_P (parts.index))
1186    parts.index = SUBREG_REG (parts.index);
1187
1188  /* Look for some component that isn't known to be aligned.  */
1189  if (parts.index)
1190    {
1191      if (REGNO_POINTER_ALIGN (REGNO (parts.index)) * parts.scale < 32)
1192	return false;
1193    }
1194  if (parts.base)
1195    {
1196      if (REGNO_POINTER_ALIGN (REGNO (parts.base)) < 32)
1197	return false;
1198    }
1199  if (parts.disp)
1200    {
1201      if (!CONST_INT_P (parts.disp)
1202	  || (INTVAL (parts.disp) & 3))
1203	return false;
1204    }
1205
1206  /* Didn't find one -- this must be an aligned address.  */
1207  return true;
1208})
1209
1210;; Return true if OP is memory operand with a displacement.
1211(define_predicate "memory_displacement_operand"
1212  (match_operand 0 "memory_operand")
1213{
1214  struct ix86_address parts;
1215  int ok;
1216
1217  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1218  gcc_assert (ok);
1219  return parts.disp != NULL_RTX;
1220})
1221
1222;; Return true if OP is memory operand with a displacement only.
1223(define_predicate "memory_displacement_only_operand"
1224  (match_operand 0 "memory_operand")
1225{
1226  struct ix86_address parts;
1227  int ok;
1228
1229  if (TARGET_64BIT)
1230    return false;
1231
1232  ok = ix86_decompose_address (XEXP (op, 0), &parts);
1233  gcc_assert (ok);
1234
1235  if (parts.base || parts.index)
1236    return false;
1237
1238  return parts.disp != NULL_RTX;
1239})
1240
1241;; Return true if OP is memory operand that cannot be represented
1242;; by the modRM array.
1243(define_predicate "long_memory_operand"
1244  (and (match_operand 0 "memory_operand")
1245       (match_test "memory_address_length (op, false)")))
1246
1247;; Return true if OP is a comparison operator that can be issued by fcmov.
1248(define_predicate "fcmov_comparison_operator"
1249  (match_operand 0 "comparison_operator")
1250{
1251  machine_mode inmode = GET_MODE (XEXP (op, 0));
1252  enum rtx_code code = GET_CODE (op);
1253
1254  if (inmode == CCFPmode)
1255    {
1256      if (!ix86_trivial_fp_comparison_operator (op, mode))
1257	return false;
1258      code = ix86_fp_compare_code_to_integer (code);
1259    }
1260  /* i387 supports just limited amount of conditional codes.  */
1261  switch (code)
1262    {
1263    case LTU: case GTU: case LEU: case GEU:
1264      if (inmode == CCmode || inmode == CCFPmode || inmode == CCCmode)
1265	return true;
1266      return false;
1267    case ORDERED: case UNORDERED:
1268    case EQ: case NE:
1269      return true;
1270    default:
1271      return false;
1272    }
1273})
1274
1275;; Return true if OP is a comparison that can be used in the CMPSS/CMPPS insns.
1276;; The first set are supported directly; the second set can't be done with
1277;; full IEEE support, i.e. NaNs.
1278
1279(define_predicate "sse_comparison_operator"
1280  (ior (match_code "eq,ne,lt,le,unordered,unge,ungt,ordered")
1281       (and (match_test "TARGET_AVX")
1282	    (match_code "ge,gt,uneq,unle,unlt,ltgt"))))
1283
1284(define_predicate "ix86_comparison_int_operator"
1285  (match_code "ne,eq,ge,gt,le,lt"))
1286
1287(define_predicate "ix86_comparison_uns_operator"
1288  (match_code "ne,eq,geu,gtu,leu,ltu"))
1289
1290(define_predicate "bt_comparison_operator"
1291  (match_code "ne,eq"))
1292
1293;; Return true if OP is a valid comparison operator in valid mode.
1294(define_predicate "ix86_comparison_operator"
1295  (match_operand 0 "comparison_operator")
1296{
1297  machine_mode inmode = GET_MODE (XEXP (op, 0));
1298  enum rtx_code code = GET_CODE (op);
1299
1300  if (inmode == CCFPmode)
1301    return ix86_trivial_fp_comparison_operator (op, mode);
1302
1303  switch (code)
1304    {
1305    case EQ: case NE:
1306      if (inmode == CCGZmode)
1307	return false;
1308      return true;
1309    case GE: case LT:
1310      if (inmode == CCmode || inmode == CCGCmode
1311	  || inmode == CCGOCmode || inmode == CCNOmode || inmode == CCGZmode)
1312	return true;
1313      return false;
1314    case GEU: case LTU:
1315      if (inmode == CCGZmode)
1316	return true;
1317      /* FALLTHRU */
1318    case GTU: case LEU:
1319      if (inmode == CCmode || inmode == CCCmode || inmode == CCGZmode)
1320	return true;
1321      return false;
1322    case ORDERED: case UNORDERED:
1323      if (inmode == CCmode)
1324	return true;
1325      return false;
1326    case GT: case LE:
1327      if (inmode == CCmode || inmode == CCGCmode || inmode == CCNOmode)
1328	return true;
1329      return false;
1330    default:
1331      return false;
1332    }
1333})
1334
1335;; Return true if OP is a valid comparison operator
1336;; testing carry flag to be set.
1337(define_predicate "ix86_carry_flag_operator"
1338  (match_code "ltu,lt,unlt,gtu,gt,ungt,le,unle,ge,unge,ltgt,uneq")
1339{
1340  machine_mode inmode = GET_MODE (XEXP (op, 0));
1341  enum rtx_code code = GET_CODE (op);
1342
1343  if (inmode == CCFPmode)
1344    {
1345      if (!ix86_trivial_fp_comparison_operator (op, mode))
1346	return false;
1347      code = ix86_fp_compare_code_to_integer (code);
1348    }
1349  else if (inmode == CCCmode)
1350   return code == LTU || code == GTU;
1351  else if (inmode != CCmode)
1352    return false;
1353
1354  return code == LTU;
1355})
1356
1357;; Return true if this comparison only requires testing one flag bit.
1358(define_predicate "ix86_trivial_fp_comparison_operator"
1359  (match_code "gt,ge,unlt,unle,uneq,ltgt,ordered,unordered"))
1360
1361;; Return true if we know how to do this comparison.  Others require
1362;; testing more than one flag bit, and we let the generic middle-end
1363;; code do that.
1364(define_predicate "ix86_fp_comparison_operator"
1365  (if_then_else (match_test "ix86_fp_comparison_strategy (GET_CODE (op))
1366                             == IX86_FPCMP_ARITH")
1367               (match_operand 0 "comparison_operator")
1368               (match_operand 0 "ix86_trivial_fp_comparison_operator")))
1369
1370;; Nearly general operand, but accept any const_double, since we wish
1371;; to be able to drop them into memory rather than have them get pulled
1372;; into registers.
1373(define_predicate "cmp_fp_expander_operand"
1374  (ior (match_code "const_double")
1375       (match_operand 0 "general_operand")))
1376
1377;; Return true if this is a valid binary floating-point operation.
1378(define_predicate "binary_fp_operator"
1379  (match_code "plus,minus,mult,div"))
1380
1381;; Return true if this is a multiply operation.
1382(define_predicate "mult_operator"
1383  (match_code "mult"))
1384
1385;; Return true if this is a division operation.
1386(define_predicate "div_operator"
1387  (match_code "div"))
1388
1389;; Return true if this is a plus, minus, and, ior or xor operation.
1390(define_predicate "plusminuslogic_operator"
1391  (match_code "plus,minus,and,ior,xor"))
1392
1393;; Return true for ARITHMETIC_P.
1394(define_predicate "arith_or_logical_operator"
1395  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax,compare,minus,div,
1396	       mod,udiv,umod,ashift,rotate,ashiftrt,lshiftrt,rotatert"))
1397
1398;; Return true for COMMUTATIVE_P.
1399(define_predicate "commutative_operator"
1400  (match_code "plus,mult,and,ior,xor,smin,smax,umin,umax"))
1401
1402;; Return true if OP is a binary operator that can be promoted to wider mode.
1403(define_predicate "promotable_binary_operator"
1404  (ior (match_code "plus,minus,and,ior,xor,ashift")
1405       (and (match_code "mult")
1406	    (match_test "TARGET_TUNE_PROMOTE_HIMODE_IMUL"))))
1407
1408(define_predicate "compare_operator"
1409  (match_code "compare"))
1410
1411;; Return true if OP is a memory operand, aligned to
1412;; less than its natural alignment.
1413(define_predicate "misaligned_operand"
1414  (and (match_code "mem")
1415       (match_test "MEM_ALIGN (op) < GET_MODE_BITSIZE (mode)")))
1416
1417;; Return true if OP is a vzeroall operation, known to be a PARALLEL.
1418(define_predicate "vzeroall_operation"
1419  (match_code "parallel")
1420{
1421  unsigned i, nregs = TARGET_64BIT ? 16 : 8;
1422
1423  if ((unsigned) XVECLEN (op, 0) != 1 + nregs)
1424    return false;
1425
1426  for (i = 0; i < nregs; i++)
1427    {
1428      rtx elt = XVECEXP (op, 0, i+1);
1429
1430      if (GET_CODE (elt) != SET
1431	  || GET_CODE (SET_DEST (elt)) != REG
1432	  || GET_MODE (SET_DEST (elt)) != V8SImode
1433	  || REGNO (SET_DEST (elt)) != GET_SSE_REGNO (i)
1434	  || SET_SRC (elt) != CONST0_RTX (V8SImode))
1435	return false;
1436    }
1437  return true;
1438})
1439
1440;; return true if OP is a vzeroall pattern.
1441(define_predicate "vzeroall_pattern"
1442  (and (match_code "parallel")
1443       (match_code "unspec_volatile" "a")
1444       (match_test "XINT (XVECEXP (op, 0, 0), 1) == UNSPECV_VZEROALL")))
1445
1446;; return true if OP is a vzeroupper pattern.
1447(define_predicate "vzeroupper_pattern"
1448  (and (match_code "parallel")
1449       (match_code "unspec_volatile" "a")
1450       (match_test "XINT (XVECEXP (op, 0, 0), 1) == UNSPECV_VZEROUPPER")))
1451
1452;; Return true if OP is an addsub vec_merge operation
1453(define_predicate "addsub_vm_operator"
1454  (match_code "vec_merge")
1455{
1456  rtx op0, op1;
1457  int swapped;
1458  HOST_WIDE_INT mask;
1459  int nunits, elt;
1460
1461  op0 = XEXP (op, 0);
1462  op1 = XEXP (op, 1);
1463
1464  /* Sanity check.  */
1465  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1466    swapped = 0;
1467  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1468    swapped = 1;
1469  else
1470    gcc_unreachable ();
1471
1472  mask = INTVAL (XEXP (op, 2));
1473  nunits = GET_MODE_NUNITS (mode);
1474
1475  for (elt = 0; elt < nunits; elt++)
1476    {
1477      /* bit clear: take from op0, set: take from op1  */
1478      int bit = !(mask & (HOST_WIDE_INT_1U << elt));
1479
1480      if (bit != ((elt & 1) ^ swapped))
1481	return false;
1482    }
1483
1484  return true;
1485})
1486
1487;; Return true if OP is an addsub vec_select/vec_concat operation
1488(define_predicate "addsub_vs_operator"
1489  (and (match_code "vec_select")
1490       (match_code "vec_concat" "0"))
1491{
1492  rtx op0, op1;
1493  bool swapped;
1494  int nunits, elt;
1495
1496  op0 = XEXP (XEXP (op, 0), 0);
1497  op1 = XEXP (XEXP (op, 0), 1);
1498
1499  /* Sanity check.  */
1500  if (GET_CODE (op0) == MINUS && GET_CODE (op1) == PLUS)
1501    swapped = false;
1502  else if (GET_CODE (op0) == PLUS && GET_CODE (op1) == MINUS)
1503    swapped = true;
1504  else
1505    gcc_unreachable ();
1506
1507  nunits = GET_MODE_NUNITS (mode);
1508  if (XVECLEN (XEXP (op, 1), 0) != nunits)
1509    return false;
1510
1511  /* We already checked that permutation is suitable for addsub,
1512     so only look at the first element of the parallel.  */
1513  elt = INTVAL (XVECEXP (XEXP (op, 1), 0, 0));
1514
1515  return elt == (swapped ? nunits : 0);
1516})
1517
1518;; Return true if OP is a parallel for an addsub vec_select.
1519(define_predicate "addsub_vs_parallel"
1520  (and (match_code "parallel")
1521       (match_code "const_int" "a"))
1522{
1523  int nelt = XVECLEN (op, 0);
1524  int elt, i;
1525  
1526  if (nelt < 2)
1527    return false;
1528
1529  /* Check that the permutation is suitable for addsub.
1530     For example, { 0 9 2 11 4 13 6 15 } or { 8 1 10 3 12 5 14 7 }.  */
1531  elt = INTVAL (XVECEXP (op, 0, 0));
1532  if (elt == 0)
1533    {
1534      for (i = 1; i < nelt; ++i)
1535	if (INTVAL (XVECEXP (op, 0, i)) != (i + (i & 1) * nelt))
1536	  return false;
1537    }
1538  else if (elt == nelt)
1539    {
1540      for (i = 1; i < nelt; ++i)
1541	if (INTVAL (XVECEXP (op, 0, i)) != (elt + i - (i & 1) * nelt))
1542	  return false;
1543    }
1544  else
1545    return false;
1546
1547  return true;
1548})
1549
1550;; Return true if OP is a parallel for a vbroadcast permute.
1551(define_predicate "avx_vbroadcast_operand"
1552  (and (match_code "parallel")
1553       (match_code "const_int" "a"))
1554{
1555  rtx elt = XVECEXP (op, 0, 0);
1556  int i, nelt = XVECLEN (op, 0);
1557
1558  /* Don't bother checking there are the right number of operands,
1559     merely that they're all identical.  */
1560  for (i = 1; i < nelt; ++i)
1561    if (XVECEXP (op, 0, i) != elt)
1562      return false;
1563  return true;
1564})
1565
1566;; Return true if OP is a parallel for a palignr permute.
1567(define_predicate "palignr_operand"
1568  (and (match_code "parallel")
1569       (match_code "const_int" "a"))
1570{
1571  int elt = INTVAL (XVECEXP (op, 0, 0));
1572  int i, nelt = XVECLEN (op, 0);
1573
1574  /* Check that an order in the permutation is suitable for palignr.
1575     For example, {5 6 7 0 1 2 3 4} is "palignr 5, xmm, xmm".  */
1576  for (i = 1; i < nelt; ++i)
1577    if (INTVAL (XVECEXP (op, 0, i)) != ((elt + i) % nelt))
1578      return false;
1579  return true;
1580})
1581
1582;; Return true if OP is a proper third operand to vpblendw256.
1583(define_predicate "avx2_pblendw_operand"
1584  (match_code "const_int")
1585{
1586  HOST_WIDE_INT val = INTVAL (op);
1587  HOST_WIDE_INT low = val & 0xff;
1588  return val == ((low << 8) | low);
1589})
1590
1591;; Return true if OP is vector_operand or CONST_VECTOR.
1592(define_predicate "general_vector_operand"
1593  (ior (match_operand 0 "vector_operand")
1594       (match_code "const_vector")))
1595
1596;; Return true if OP is either -1 constant or stored in register.
1597(define_predicate "register_or_constm1_operand"
1598  (ior (match_operand 0 "register_operand")
1599       (and (match_code "const_int")
1600	    (match_test "op == constm1_rtx"))))
1601
1602;; Return true if the vector ends with between 12 and 18 register saves using
1603;; RAX as the base address.
1604(define_predicate "save_multiple"
1605  (match_code "parallel")
1606{
1607  const unsigned len = XVECLEN (op, 0);
1608  unsigned i;
1609
1610  /* Starting from end of vector, count register saves.  */
1611  for (i = 0; i < len; ++i)
1612    {
1613      rtx src, dest, addr;
1614      rtx e = XVECEXP (op, 0, len - 1 - i);
1615
1616      if (GET_CODE (e) != SET)
1617	break;
1618
1619      src  = SET_SRC (e);
1620      dest = SET_DEST (e);
1621
1622      if (!REG_P (src) || !MEM_P (dest))
1623	break;
1624
1625      addr = XEXP (dest, 0);
1626
1627      /* Good if dest address is in RAX.  */
1628      if (REG_P (addr) && REGNO (addr) == AX_REG)
1629	continue;
1630
1631      /* Good if dest address is offset of RAX.  */
1632      if (GET_CODE (addr) == PLUS
1633	  && REG_P (XEXP (addr, 0))
1634	  && REGNO (XEXP (addr, 0)) == AX_REG)
1635	continue;
1636
1637      break;
1638    }
1639  return (i >= 12 && i <= 18);
1640})
1641
1642
1643;; Return true if the vector ends with between 12 and 18 register loads using
1644;; RSI as the base address.
1645(define_predicate "restore_multiple"
1646  (match_code "parallel")
1647{
1648  const unsigned len = XVECLEN (op, 0);
1649  unsigned i;
1650
1651  /* Starting from end of vector, count register restores.  */
1652  for (i = 0; i < len; ++i)
1653    {
1654      rtx src, dest, addr;
1655      rtx e = XVECEXP (op, 0, len - 1 - i);
1656
1657      if (GET_CODE (e) != SET)
1658	break;
1659
1660      src  = SET_SRC (e);
1661      dest = SET_DEST (e);
1662
1663      if (!MEM_P (src) || !REG_P (dest))
1664	break;
1665
1666      addr = XEXP (src, 0);
1667
1668      /* Good if src address is in RSI.  */
1669      if (REG_P (addr) && REGNO (addr) == SI_REG)
1670	continue;
1671
1672      /* Good if src address is offset of RSI.  */
1673      if (GET_CODE (addr) == PLUS
1674	  && REG_P (XEXP (addr, 0))
1675	  && REGNO (XEXP (addr, 0)) == SI_REG)
1676	continue;
1677
1678      break;
1679    }
1680  return (i >= 12 && i <= 18);
1681})
1682