• Home
  • History
  • Annotate
  • Raw
  • Download
  • only in /freebsd-13-stable/contrib/llvm-project/llvm/lib/CodeGen/SelectionDAG/

Lines Matching refs:shl

2530   // fold (add x, shl(0 - y, n)) -> sub(x, shl(y, n))
3610 // mul x, (2^N + 1) --> add (shl x, N), x
3611 // mul x, (2^N - 1) --> sub (shl x, N), x
3642 // (mul (shl X, c1), c2) -> (mul X, c2 << c1)
3651 // Change (mul (shl X, C), Y) -> (shl (mul X, Y), C) when the shift has one
3656 // Check for both (mul (shl X, C), Y) and (mul Y, (shl X, C)).
4051 // fold (udiv x, (shl c, y)) -> x >>u (log2(c)+y) iff c is power of 2
4120 // fold (urem x, (shl pow2, y)) -> (and x, (add (shl pow2, y), -1))
5382 SplatValue |= SplatValue.shl(SplatBitSize);
5523 // fold (and (or (srl N, 8), (shl N, 8)), 0xffff) -> (srl (bswap N), const)
5552 // Recognize (and (shl a, 8), 0xff00), (and (srl a, 8), 0xff)
5596 // Look for (shl (and a, 0xff), 8), (srl (and a, 0xff00), 8)
5761 // (or (and (shl (A, 8)), 0xff00ff00), (and (srl (A, 8)), 0x00ff00ff))
6063 // Recognize halfword bswaps as (bswap + rotl 16) or (bswap + shl 16)
6127 /// Match "(X shl/srl V1) & V2" where V2 may not be present.
6139 /// from a shl/srl/mul/udiv. This is meant to handle cases where
6147 /// expands (add v v) -> (shl v 1)
6150 /// expands (mul v c0) -> (shl (mul v c1) c3)
6152 /// (or (udiv v c0) (shl (udiv v c1) c2)):
6155 /// (or (shl v c0) (shrl (shl v c1) c2)):
6156 /// expands (shl v c0) -> (shl (shl v c1) c3)
6158 /// (or (shrl v c0) (shl (shrl v c1) c2)):
6179 // (add v v) -> (shl v 1)
6394 // fold (or (shl x, (*ext y)),
6398 // fold (or (shl x, (*ext (sub 32, y))),
6424 // fold (or (shl x0, (*ext y)),
6428 // fold (or (shl x0, (*ext (sub 32, y))),
6448 // fold (or (shl x0, y), (srl (srl x1, 1), (xor y, 31)))
6457 // fold (or (shl (shl x0, 1), (xor y, 31)), (srl x1, y))
6466 // fold (or (shl (add x0, x0), (xor y, 31)), (srl x1, y))
6468 // TODO: Should add(x,x) -> shl(x,1) be a general DAG canonicalization?
6507 // Match "(X shl/srl V1) & V2" where V2 may not be present.
6520 // InstCombine may have combined a constant shl, srl, mul, or udiv with one
6525 // can be broken down (ie if InstCombine merged two shl or srl ops into a
6552 // Canonicalize shl to left side in a shl/srl pair.
6565 // fold (or (shl x, C1), (srl x, C2)) -> (rotl x, C1)
6566 // fold (or (shl x, C1), (srl x, C2)) -> (rotr x, C2)
6567 // fold (or (shl x, C1), (srl y, C2)) -> (fshl x, y, C1)
6568 // fold (or (shl x, C1), (srl y, C2)) -> (fshr x, y, C2)
7017 /// t32: i32 = shl t29, Constant:i8<8>
7424 Ones = N0Opcode == ISD::SHL ? Ones.shl(ShiftAmt) : Ones.lshr(ShiftAmt);
7456 // fold (xor (shl 1, x), -1) -> (rotl ~1, x)
7459 // i16 shl == 1 << 14 == 16384 == 0b0100000000000000
7567 /// We are looking for: (shift being one of shl/sra/srl)
7604 return SDValue(); // only shl(add) not sr[al](add).
7753 // (shl (and (setcc) N01CV) N1CV) -> (and (setcc) N01CV<<N1CV)
7773 // fold (shl c1, c2) -> c1<<c2
7780 // if (shl x, c) is known to be zero, return 0
7785 // fold (shl x, (trunc (and y, c))) -> (shl x, (and (trunc y), (trunc c))).
7795 // fold (shl (shl x, c1), c2) -> 0 or (shl x, (add c1, c2))
7821 // fold (shl (ext (shl x, c1)), c2) -> (shl (ext x), (add c1, c2))
7867 // fold (shl (zext (srl x, C)), C) -> (zext (shl (srl x, C), C))
7893 // fold (shl (sr[la] exact X, C1), C2) -> (shl X, (C2-C1)) if C1 <= C2
7894 // fold (shl (sr[la] exact X, C1), C2) -> (sr[la] X, (C2-C1)) if C1 > C2
7910 // fold (shl (srl x, c1), c2) -> (and (shl x, (sub c2, c1), MASK) or
7942 // fold (shl (sra x, c1), c1) -> (and x, (shl -1, c1))
7951 // fold (shl (add x, c1), c2) -> (add (shl x, c2), c1 << c2)
7952 // fold (shl (or x, c1), c2) -> (or (shl x, c2), c1 << c2)
7967 // fold (shl (mul x, c1), c2) -> (mul x, c1 << c2)
7980 // Fold (shl (vscale * C0), C1) to (vscale * (C0 << C1)).
8093 // fold (sra (shl x, c1), c1) -> sext_inreg for some c1 and target supports
8136 // fold (sra (shl X, m), (sub result_size, n))
8137 // -> (sign_extend (trunc (shl X, (sub (sub result_size, n), m)))) for
8139 // If truncate is free for the target sext(shl) is likely to result in better
8177 // sra (add (shl X, N1C), AddC), N1C -->
8352 // fold (srl (shl x, c), c) -> (and x, cst2)
8353 // TODO - (srl (shl x, c1), c2).
8525 // fold fshl(N0, undef_or_zero, C) -> shl(N0, C)
8526 // fold fshr(N0, undef_or_zero, C) -> shl(N0, BW-C)
8577 // fold fshl(N0, undef_or_zero, N2) -> shl(N0, N2)
9755 // fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
9756 // (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
9772 // shl/shr
10465 // fold (zext (and/or/xor (shl/shr (load x), cst), cst)) ->
10466 // (and/or/xor (shl/shr (zextload x), (zext cst)), (zext cst))
10519 // (zext (shl (zext x), cst)) -> (shl (zext x), cst)
10527 // If the original shl may be shifting out bits, do not perform this
10993 // before, so we need to shl the loaded data into the correct position in the
11258 // trunc (shl x, K) -> shl (trunc x), K => K < VT.getScalarSizeInBits()
11325 // For example "trunc (or (shl x, 8), y)" // -> trunc y
17066 // "truncstore (or (shl x, 8), y), i8" -> "truncstore y, i8"
17233 /// (shl (zext I to i64), 32)), addr) -->
21309 // fold (select_cc seteq (and x, y), 0, 0, A) -> (and (shr (shl x)) A)
21341 // fold select C, 16, 0 -> shl C, 4
21383 // shl setcc result by log2 n2c