Lines Matching +defs:label +defs:offset

250     /* The offset in bytes of the piece from the start of the type.  */
251 poly_uint64_pod offset;
1877 rtx offset = gen_int_mode (pieces[i].offset, Pmode);
1878 RTVEC_ELT (rtxes, i) = gen_rtx_EXPR_LIST (VOIDmode, reg, offset);
1973 p.offset += i * element_bytes;
2004 the field offset using poly_ints. */
2009 whose offset isn't byte-aligned. */
2025 p.offset += bytepos;
2050 && known_eq (prev.offset + GET_MODE_SIZE (prev.mode), p.offset)
2148 strip_offset_and_salt (rtx addr, poly_int64 *offset)
2150 return strip_salt (strip_offset (addr, offset));
3150 poly_int64 offset;
3151 addr = strip_offset_and_salt (addr, &offset);
4148 aarch64_output_sve_scalar_inc_dec (rtx offset)
4150 poly_int64 offset_value = rtx_to_poly_int64 (offset);
4189 aarch64_output_sve_addvl_addpl (rtx offset)
4192 poly_int64 offset_value = rtx_to_poly_int64 (offset);
4414 aarch64_add_offset_1_temporaries (HOST_WIDE_INT offset)
4416 return absu_hwi (offset) < 0x1000000 ? 0 : 1;
4437 rtx src, HOST_WIDE_INT offset, rtx temp1,
4443 unsigned HOST_WIDE_INT moffset = absu_hwi (offset);
4459 insn = emit_insn (gen_add3_insn (dest, src, GEN_INT (offset)));
4467 a) the offset cannot be loaded by a 16-bit move or
4475 low_off = offset < 0 ? -low_off : low_off;
4478 insn = emit_insn (gen_add2_insn (dest, GEN_INT (offset - low_off)));
4490 insn = emit_insn (offset < 0
4496 rtx adj = plus_constant (mode, src, offset);
4506 aarch64_offset_temporaries (bool add_p, poly_int64 offset)
4509 if (add_p && aarch64_sve_addvl_addpl_immediate_p (offset))
4513 HOST_WIDE_INT factor = offset.coeffs[1];
4514 HOST_WIDE_INT constant = offset.coeffs[0] - factor;
4525 constant part of the offset. */
4541 poly_int64 offset;
4542 if (!poly_int_rtx_p (x, &offset))
4544 return aarch64_offset_temporaries (true, offset);
4567 poly_int64 offset, rtx temp1, rtx temp2,
4578 if (src != const0_rtx && aarch64_sve_addvl_addpl_immediate_p (offset))
4580 rtx offset_rtx = gen_int_mode (offset, mode);
4590 HOST_WIDE_INT factor = offset.coeffs[1];
4591 HOST_WIDE_INT constant = offset.coeffs[0] - factor;
4734 /* Like aarch64_add_offset, but the offset is given as an rtx rather
5322 poly_int64 offset;
5326 /* If we have (const (plus symbol offset)), separate out the offset
5328 rtx base = strip_offset (imm, &offset);
5330 /* We must always add an offset involving VL separately, rather than
5332 if (!offset.is_constant (&const_offset))
5339 if (base == const0_rtx && aarch64_sve_cnt_immediate_p (offset))
5356 aarch64_add_offset (int_mode, dest, base, offset,
5360 aarch64_add_offset (int_mode, dest, base, offset,
5503 poly_int64 offset;
5504 rtx base = strip_offset_and_salt (addr, &offset);
5511 addr = plus_constant (Pmode, addr, offset);
5886 rtx offset = gen_int_mode (i * GET_MODE_SIZE (ag_mode), Pmode);
5887 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, offset);
6196 rtx offset = gen_int_mode
6198 tmp = gen_rtx_EXPR_LIST (VOIDmode, tmp, offset);
6762 /* Emit loop start label. */
6829 poly_int64 offset = 0;
6907 offset range. These saves happen below the hard frame pointer. */
6911 frame.reg_offset[regno] = offset;
6912 offset += BYTES_PER_SVE_PRED;
6915 if (maybe_ne (offset, 0))
6918 the offset of the vector register save slots need to be a multiple
6929 if (last_fp_reg == (int) INVALID_REGNUM && offset.is_constant ())
6930 offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
6933 if (known_le (offset, vector_save_size))
6934 offset = vector_save_size;
6935 else if (known_le (offset, vector_save_size * 2))
6936 offset = vector_save_size * 2;
6947 frame.reg_offset[regno] = offset;
6948 offset += vector_save_size;
6951 /* OFFSET is now the offset of the hard frame pointer from the bottom
6953 bool saves_below_hard_fp_p = maybe_ne (offset, 0);
6954 frame.below_hard_fp_saved_regs_size = offset;
6958 frame.reg_offset[R29_REGNUM] = offset;
6960 frame.reg_offset[R30_REGNUM] = offset + UNITS_PER_WORD;
6962 offset += 2 * UNITS_PER_WORD;
6968 frame.reg_offset[regno] = offset;
6973 offset += UNITS_PER_WORD;
6976 poly_int64 max_int_offset = offset;
6977 offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
6978 bool has_align_gap = maybe_ne (offset, max_int_offset);
6988 && multiple_p (offset, 16))
6994 frame.reg_offset[regno] = offset;
7000 offset += vector_save_size;
7003 offset = aligned_upper_bound (offset, STACK_BOUNDARY / BITS_PER_UNIT);
7005 frame.saved_regs_size = offset;
7007 poly_int64 varargs_and_saved_regs_size = offset + frame.saved_varargs_size;
7358 rtx &anchor_reg, poly_int64 &offset,
7361 if (maybe_ge (offset, 8 * GET_MODE_SIZE (mode)))
7363 /* This is the maximum valid offset of the anchor from the base.
7373 offset -= anchor_offset;
7389 rtx base, poly_int64 offset)
7392 plus_constant (Pmode, base, offset));
7397 to LIMIT to the stack at the location starting at offset START_OFFSET,
7416 poly_int64 offset;
7429 offset = start_offset + cfun->machine->frame.reg_offset[regno];
7431 poly_int64 sp_offset = offset;
7436 offset, ptrue);
7438 && (!offset.is_constant (&const_offset) || const_offset >= 512))
7455 offset -= fp_offset;
7457 mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
7470 offset += GET_MODE_SIZE (mode);
7471 mem2 = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
7507 up to and including LIMIT. Restore from the stack offset START_OFFSET,
7517 poly_int64 offset;
7537 offset = start_offset + cfun->machine->frame.reg_offset[regno];
7541 offset, ptrue);
7542 mem = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
7554 offset += GET_MODE_SIZE (mode);
7555 mem2 = gen_frame_mem (mode, plus_constant (Pmode, base_rtx, offset));
7576 offset_4bit_signed_scaled_p (machine_mode mode, poly_int64 offset)
7579 return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
7587 offset_6bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
7590 return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
7598 aarch64_offset_7bit_signed_scaled_p (machine_mode mode, poly_int64 offset)
7601 return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
7609 poly_int64 offset)
7612 return (offset.is_constant (&const_offset)
7620 offset_9bit_signed_scaled_p (machine_mode mode, poly_int64 offset)
7623 return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
7631 offset_12bit_unsigned_scaled_p (machine_mode mode, poly_int64 offset)
7634 return (constant_multiple_p (offset, GET_MODE_SIZE (mode), &multiple)
7659 poly_int64 offset = cfun->machine->frame.reg_offset[regno];
7665 && known_eq (offset, 0))
7668 /* Get the offset relative to the register we'll use. */
7670 offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
7672 offset += crtl->outgoing_args_size;
7677 ? offset_9bit_signed_scaled_p (mode, offset)
7678 : offset_12bit_unsigned_scaled_p (mode, offset))
7747 /* If there is a callee-save at an adjacent offset, add it too
7749 poly_int64 offset = cfun->machine->frame.reg_offset[regno];
7750 unsigned regno2 = multiple_p (offset, 16) ? regno + 1 : regno - 1;
7756 ? known_eq (offset + 8, offset2)
7757 : multiple_p (offset2, 16) && known_eq (offset2 + 8, offset))
7816 poly_int64 offset = cfun->machine->frame.reg_offset[regno];
7818 offset -= cfun->machine->frame.below_hard_fp_saved_regs_size;
7820 offset += crtl->outgoing_args_size;
7822 rtx addr = plus_constant (Pmode, ptr_reg, offset);
7844 /* The next register is not of the same class or its offset is not
7935 determining the probe offset for alloca. */
8174 /* If doing a small final adjustment, we always probe at offset 0.
8176 the final adjustment is smaller than the probing offset. */
8368 /* The offset of the frame chain record (if any) from the current SP. */
8373 /* The offset of the bottom of the save area from the current SP. */
8622 /* We need to unwind the stack by the offset computed by
8641 that must be applied after the frame has been destroyed. An extra label
8652 so the offset of LR is not known yet. Also optimizations will remove the
8654 base or offset for loading LR is different in many cases).
8999 poly_int64 offset;
9000 rtx base = strip_offset_and_salt (x, &offset);
9004 if (aarch64_classify_symbol (base, offset.to_constant ())
9094 /* Return true if address offset is a valid index. If it is, fill in INFO
9258 info->offset = index;
9304 poly_int64 offset;
9337 [Rn, #offset, MUL VL]. */
9358 info->offset = const0_rtx;
9369 && poly_int_rtx_p (op1, &offset))
9373 info->offset = op1;
9374 info->const_offset = offset;
9381 && poly_int_rtx_p (op1, &offset))
9385 info->offset = op1;
9386 info->const_offset = offset;
9391 X,X: 7-bit signed scaled offset
9392 Q: 9-bit signed offset
9393 We conservatively require an offset representable in either mode.
9398 return (aarch64_offset_7bit_signed_scaled_p (DImode, offset)
9399 && (aarch64_offset_9bit_signed_unscaled_p (mode, offset)
9400 || offset_12bit_unsigned_scaled_p (mode, offset)));
9402 /* A 7bit offset check because OImode will emit a ldp/stp
9404 For ldp/stp instructions, the offset is scaled for the size of a
9407 return aarch64_offset_7bit_signed_scaled_p (TImode, offset);
9412 return (aarch64_offset_7bit_signed_scaled_p (TImode, offset)
9414 offset + 32)
9416 offset + 32)));
9421 return (aarch64_offset_7bit_signed_scaled_p (TImode, offset)
9423 offset + 32));
9425 /* Make "m" use the LD1 offset range for SVE data modes, so
9430 ? offset_4bit_signed_scaled_p (mode, offset)
9431 : offset_9bit_signed_scaled_p (mode, offset));
9435 poly_int64 end_offset = (offset
9439 ? offset_4bit_signed_scaled_p (mode, offset)
9440 : (offset_9bit_signed_scaled_p (SVE_BYTE_MODE, offset)
9446 return offset_9bit_signed_scaled_p (mode, offset);
9452 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
9454 return (aarch64_offset_9bit_signed_unscaled_p (mode, offset)
9455 || offset_12bit_unsigned_scaled_p (mode, offset));
9483 info->offset = NULL_RTX;
9491 && poly_int_rtx_p (XEXP (XEXP (x, 1), 1), &offset)
9495 info->offset = XEXP (XEXP (x, 1), 1);
9496 info->const_offset = offset;
9501 X,X: 7-bit signed scaled offset
9502 Q: 9-bit signed offset
9503 We conservatively require an offset representable in either mode.
9506 return (aarch64_offset_7bit_signed_scaled_p (mode, offset)
9507 && aarch64_offset_9bit_signed_unscaled_p (mode, offset));
9513 && aarch64_offset_7bit_signed_scaled_p (mode, offset));
9515 return aarch64_offset_9bit_signed_unscaled_p (mode, offset);
9530 poly_int64 offset;
9531 rtx sym = strip_offset_and_salt (x, &offset);
9542 info->offset = XEXP (x, 1);
9546 poly_int64 offset;
9548 rtx sym = strip_offset_and_salt (info->offset, &offset);
9550 && offset.is_constant (&const_offset)
9554 /* The symbol and offset must be aligned to the access size. */
9609 poly_int64 offset;
9610 x = strip_offset_and_salt (x, &offset);
9619 rtx offset;
9621 split_const (x, &x, &offset);
9622 return aarch64_classify_symbol (x, INTVAL (offset));
9659 /* A general SVE offset is A * VQ + B. Remove the A component from
9664 offset. Use 4KB range for 1- and 2-byte accesses and a 16KB
9668 scaled 7-bit and signed 9-bit offset. */
9679 /* Split the offset into second_offset and the rest. */
9709 /* Convert the "mul vl" multiplier into a byte offset. */
9714 /* Split the offset into second_offset and the rest. */
10323 argument of X, specifying a relocation offset
10326 with a relocation offset if appropriate.
10345 poly_int64 offset;
10346 rtx base = strip_offset_and_salt (x, &offset);
10843 INTVAL (addr.offset));
10849 reg_names [REGNO (addr.offset)]);
10852 reg_names [REGNO (addr.offset)], addr.shift);
10858 REGNO (addr.offset) - R0_REGNUM);
10861 REGNO (addr.offset) - R0_REGNUM, addr.shift);
10867 REGNO (addr.offset) - R0_REGNUM);
10870 REGNO (addr.offset) - R0_REGNUM, addr.shift);
10892 INTVAL (addr.offset));
10896 INTVAL (addr.offset));
10905 output_addr_const (f, addr.offset);
11002 /* OFFSET is an address offset for mode MODE, which has SIZE bytes.
11003 If OFFSET is out of range, return an offset of an anchor point
11007 aarch64_anchor_offset (HOST_WIDE_INT offset, HOST_WIDE_INT size,
11012 return (offset + 0x400) & ~0x7f0;
11016 if (offset & (size - 1))
11020 return (offset + 512) & ~0x3ff;
11021 return (offset + 0x100) & ~0x1ff;
11025 if (IN_RANGE (offset, -256, 0))
11029 return (offset + 0x100) & ~0x1ff;
11031 /* Use 12-bit offset by access size. */
11032 return offset & (~0xfff * size);
11039 where mask is selected by alignment and size of the offset.
11040 We try to pick as large a range for the offset as possible to
11051 HOST_WIDE_INT offset = INTVAL (offset_rtx);
11087 HOST_WIDE_INT base_offset = aarch64_anchor_offset (offset, size,
11093 return plus_constant (Pmode, base, offset - base_offset);
11433 char label[100];
11468 ASM_GENERATE_INTERNAL_LABEL (label, "Lrtx", CODE_LABEL_NUMBER (operands[2]));
11470 "adr\t%%4, %s", targetm.strip_name_encoding (label));
11476 assemble_label (asm_out_file, label);
11537 HOST_WIDE_INT offset)
11542 if ((offset & 3) && aarch64_can_use_per_function_literal_pools_p ())
11835 followed by an immediate (possibly 0) offset. */
14538 FIXME: 16E should be 64bit, we only support 48bit offset now. */
14680 "%<-mstack-protector-guard-offset=%s%>",
14688 error ("both %<-mstack-protector-guard-offset%> and "
14706 error ("%qs is not a valid offset in %qs", str,
14707 "-mstack-protector-guard-offset=");
16315 aarch64_classify_symbol (rtx x, HOST_WIDE_INT offset)
16348 /* When we retrieve symbol + offset address, we have to make sure
16349 the offset does not cause overflow of the final address. But
16352 symbol + offset is outside the addressible range of +/-1MB in the
16353 TINY code model. So we limit the maximum offset to +/-64KB and
16354 assume the offset to the symbol is not larger than +/-(1MB - 64KB).
16361 if (!(IN_RANGE (offset, -0x10000, 0x10000)
16362 || offset_within_block_p (x, offset)))
16368 /* Same reasoning as the tiny code model, but the offset cap here is
16369 1MB, allowing +/-3.9GB for the offset to the symbol. */
16373 if (!(IN_RANGE (offset, -0x100000, 0x100000)
16374 || offset_within_block_p (x, offset)))
16418 poly_int64 offset;
16419 x = strip_offset_and_salt (x, &offset);
16464 poly_int64 offset;
16465 if (poly_int_rtx_p (x, &offset))
16466 return aarch64_offset_temporaries (false, offset) <= 1;
16468 /* If an offset is being added to something else, we need to allow the
16470 are no free temporaries for the offset. */
16471 x = strip_offset_and_salt (x, &offset);
16472 if (!offset.is_constant () && aarch64_offset_temporaries (true, offset) > 0)
16476 if (maybe_ne (offset, 0) && SYMBOL_REF_P (x) && SYMBOL_REF_ANCHOR_P (x))
16555 /* Tell tree-stdarg pass about our internal offset fields.
16558 offset fields through irregular way. */
16656 /* Emit code to initialize GROFF, the offset from GRTOP of the
16662 /* Likewise emit code to initialize VROFF, the offset from FTOP
16986 /* Set OFF to the offset from virtual_incoming_args_rtx of
19616 /* Emit the patching area before the entry label, if any. */
20037 rtx_code_label *label;
20041 label = gen_label_rtx ();
20042 emit_label (label);
20101 gen_rtx_LABEL_REF (Pmode, label), pc_rtx);
20775 rtx offset;
20805 offset = GEN_INT (location);
20808 gen_rtvec (3, d->op0, d->op1, offset),
22174 /* If MEM is in the form of [base+offset], extract the two parts
22179 extract_base_offset_in_addr (rtx mem, rtx *base, rtx *offset)
22190 *offset = const0_rtx;
22198 *offset = XEXP (addr, 1);
22203 *offset = NULL_RTX;
22219 /* If INSN is a load or store of address in the form of [base+offset],
22224 fusion_load_store (rtx_insn *insn, rtx *base, rtx *offset)
22258 extract_base_offset_in_addr (src, base, offset);
22262 extract_base_offset_in_addr (dest, base, offset);
22267 if (*base == NULL_RTX || *offset == NULL_RTX)
22287 rtx base, offset;
22293 fusion = fusion_load_store (insn, &base, &offset);
22307 /* INSN with smaller offset goes first. */
22308 off_val = (int)(INTVAL (offset));
22387 /* Check if the addresses are in the form of [base+offset]. */
22491 other pointing to a REG rtx containing an offset, compare the offsets
22496 1 iff offset (X) > offset (Y)
22497 0 iff offset (X) == offset (Y)
22498 -1 iff offset (X) < offset (Y) */
22526 them into ldp/stp by adjusting the offset. LOAD is true if they
22537 still pair them after adjusting the offset, like:
22553 rtx mem[num_insns], reg[num_insns], base[num_insns], offset[num_insns];
22588 /* Check if the addresses are in the form of [base+offset]. */
22589 extract_base_offset_in_addr (mem[i], base + i, offset + i);
22590 if (base[i] == NULL_RTX || offset[i] == NULL_RTX)
22623 offvals[i] = INTVAL (offset[i]);
22657 into LDP/STP after adjusting the offset. It depends on the fact
22703 /* Adjust offset so it can fit in LDP/STP instruction. */
22711 /* The base offset is optimally half way between the two STP/LDP offsets. */
22715 /* However, due to issues with negative LDP/STP offset generation for
22724 /* Fix the offset, bearing in mind we want to make it bigger not
22731 /* Check if base offset is too big or too small. We can attempt to resolve
22737 /* We must still make sure that the base offset is aligned with respect
22968 int *offset)
22973 *offset = 1;
24110 /* Limit the maximum anchor offset to 4k-1, since that's the limit for a
24111 byte offset; we can do much more for larger data types, but have no way