MCAssembler.cpp revision 321369
1//===- lib/MC/MCAssembler.cpp - Assembler Backend Implementation ----------===// 2// 3// The LLVM Compiler Infrastructure 4// 5// This file is distributed under the University of Illinois Open Source 6// License. See LICENSE.TXT for details. 7// 8//===----------------------------------------------------------------------===// 9 10#include "llvm/MC/MCAssembler.h" 11#include "llvm/ADT/ArrayRef.h" 12#include "llvm/ADT/SmallString.h" 13#include "llvm/ADT/SmallVector.h" 14#include "llvm/ADT/Statistic.h" 15#include "llvm/ADT/StringRef.h" 16#include "llvm/ADT/Twine.h" 17#include "llvm/MC/MCAsmBackend.h" 18#include "llvm/MC/MCAsmInfo.h" 19#include "llvm/MC/MCAsmLayout.h" 20#include "llvm/MC/MCCodeEmitter.h" 21#include "llvm/MC/MCCodeView.h" 22#include "llvm/MC/MCContext.h" 23#include "llvm/MC/MCDwarf.h" 24#include "llvm/MC/MCExpr.h" 25#include "llvm/MC/MCFixup.h" 26#include "llvm/MC/MCFixupKindInfo.h" 27#include "llvm/MC/MCFragment.h" 28#include "llvm/MC/MCInst.h" 29#include "llvm/MC/MCObjectWriter.h" 30#include "llvm/MC/MCSection.h" 31#include "llvm/MC/MCSectionELF.h" 32#include "llvm/MC/MCSymbol.h" 33#include "llvm/MC/MCValue.h" 34#include "llvm/Support/Casting.h" 35#include "llvm/Support/Debug.h" 36#include "llvm/Support/ErrorHandling.h" 37#include "llvm/Support/LEB128.h" 38#include "llvm/Support/MathExtras.h" 39#include "llvm/Support/raw_ostream.h" 40#include <cassert> 41#include <cstdint> 42#include <cstring> 43#include <tuple> 44#include <utility> 45 46using namespace llvm; 47 48#define DEBUG_TYPE "assembler" 49 50namespace { 51namespace stats { 52 53STATISTIC(EmittedFragments, "Number of emitted assembler fragments - total"); 54STATISTIC(EmittedRelaxableFragments, 55 "Number of emitted assembler fragments - relaxable"); 56STATISTIC(EmittedDataFragments, 57 "Number of emitted assembler fragments - data"); 58STATISTIC(EmittedCompactEncodedInstFragments, 59 "Number of emitted assembler fragments - compact encoded inst"); 60STATISTIC(EmittedAlignFragments, 61 "Number of emitted assembler fragments - align"); 62STATISTIC(EmittedFillFragments, 63 "Number of emitted assembler fragments - fill"); 64STATISTIC(EmittedOrgFragments, 65 "Number of emitted assembler fragments - org"); 66STATISTIC(evaluateFixup, "Number of evaluated fixups"); 67STATISTIC(FragmentLayouts, "Number of fragment layouts"); 68STATISTIC(ObjectBytes, "Number of emitted object file bytes"); 69STATISTIC(RelaxationSteps, "Number of assembler layout and relaxation steps"); 70STATISTIC(RelaxedInstructions, "Number of relaxed instructions"); 71 72} // end namespace stats 73} // end anonymous namespace 74 75// FIXME FIXME FIXME: There are number of places in this file where we convert 76// what is a 64-bit assembler value used for computation into a value in the 77// object file, which may truncate it. We should detect that truncation where 78// invalid and report errors back. 79 80/* *** */ 81 82MCAssembler::MCAssembler(MCContext &Context, MCAsmBackend &Backend, 83 MCCodeEmitter &Emitter, MCObjectWriter &Writer) 84 : Context(Context), Backend(Backend), Emitter(Emitter), Writer(Writer), 85 BundleAlignSize(0), RelaxAll(false), SubsectionsViaSymbols(false), 86 IncrementalLinkerCompatible(false), ELFHeaderEFlags(0) { 87 VersionMinInfo.Major = 0; // Major version == 0 for "none specified" 88} 89 90MCAssembler::~MCAssembler() = default; 91 92void MCAssembler::reset() { 93 Sections.clear(); 94 Symbols.clear(); 95 IndirectSymbols.clear(); 96 DataRegions.clear(); 97 LinkerOptions.clear(); 98 FileNames.clear(); 99 ThumbFuncs.clear(); 100 BundleAlignSize = 0; 101 RelaxAll = false; 102 SubsectionsViaSymbols = false; 103 IncrementalLinkerCompatible = false; 104 ELFHeaderEFlags = 0; 105 LOHContainer.reset(); 106 VersionMinInfo.Major = 0; 107 108 // reset objects owned by us 109 getBackend().reset(); 110 getEmitter().reset(); 111 getWriter().reset(); 112 getLOHContainer().reset(); 113} 114 115bool MCAssembler::registerSection(MCSection &Section) { 116 if (Section.isRegistered()) 117 return false; 118 Sections.push_back(&Section); 119 Section.setIsRegistered(true); 120 return true; 121} 122 123bool MCAssembler::isThumbFunc(const MCSymbol *Symbol) const { 124 if (ThumbFuncs.count(Symbol)) 125 return true; 126 127 if (!Symbol->isVariable()) 128 return false; 129 130 const MCExpr *Expr = Symbol->getVariableValue(); 131 132 MCValue V; 133 if (!Expr->evaluateAsRelocatable(V, nullptr, nullptr)) 134 return false; 135 136 if (V.getSymB() || V.getRefKind() != MCSymbolRefExpr::VK_None) 137 return false; 138 139 const MCSymbolRefExpr *Ref = V.getSymA(); 140 if (!Ref) 141 return false; 142 143 if (Ref->getKind() != MCSymbolRefExpr::VK_None) 144 return false; 145 146 const MCSymbol &Sym = Ref->getSymbol(); 147 if (!isThumbFunc(&Sym)) 148 return false; 149 150 ThumbFuncs.insert(Symbol); // Cache it. 151 return true; 152} 153 154bool MCAssembler::isSymbolLinkerVisible(const MCSymbol &Symbol) const { 155 // Non-temporary labels should always be visible to the linker. 156 if (!Symbol.isTemporary()) 157 return true; 158 159 // Absolute temporary labels are never visible. 160 if (!Symbol.isInSection()) 161 return false; 162 163 if (Symbol.isUsedInReloc()) 164 return true; 165 166 return false; 167} 168 169const MCSymbol *MCAssembler::getAtom(const MCSymbol &S) const { 170 // Linker visible symbols define atoms. 171 if (isSymbolLinkerVisible(S)) 172 return &S; 173 174 // Absolute and undefined symbols have no defining atom. 175 if (!S.isInSection()) 176 return nullptr; 177 178 // Non-linker visible symbols in sections which can't be atomized have no 179 // defining atom. 180 if (!getContext().getAsmInfo()->isSectionAtomizableBySymbols( 181 *S.getFragment()->getParent())) 182 return nullptr; 183 184 // Otherwise, return the atom for the containing fragment. 185 return S.getFragment()->getAtom(); 186} 187 188bool MCAssembler::evaluateFixup(const MCAsmLayout &Layout, 189 const MCFixup &Fixup, const MCFragment *DF, 190 MCValue &Target, uint64_t &Value) const { 191 ++stats::evaluateFixup; 192 193 // FIXME: This code has some duplication with recordRelocation. We should 194 // probably merge the two into a single callback that tries to evaluate a 195 // fixup and records a relocation if one is needed. 196 197 // On error claim to have completely evaluated the fixup, to prevent any 198 // further processing from being done. 199 const MCExpr *Expr = Fixup.getValue(); 200 MCContext &Ctx = getContext(); 201 Value = 0; 202 if (!Expr->evaluateAsRelocatable(Target, &Layout, &Fixup)) { 203 Ctx.reportError(Fixup.getLoc(), "expected relocatable expression"); 204 return true; 205 } 206 if (const MCSymbolRefExpr *RefB = Target.getSymB()) { 207 if (RefB->getKind() != MCSymbolRefExpr::VK_None) { 208 Ctx.reportError(Fixup.getLoc(), 209 "unsupported subtraction of qualified symbol"); 210 return true; 211 } 212 } 213 214 bool IsPCRel = Backend.getFixupKindInfo( 215 Fixup.getKind()).Flags & MCFixupKindInfo::FKF_IsPCRel; 216 217 bool IsResolved; 218 if (IsPCRel) { 219 if (Target.getSymB()) { 220 IsResolved = false; 221 } else if (!Target.getSymA()) { 222 IsResolved = false; 223 } else { 224 const MCSymbolRefExpr *A = Target.getSymA(); 225 const MCSymbol &SA = A->getSymbol(); 226 if (A->getKind() != MCSymbolRefExpr::VK_None || SA.isUndefined()) { 227 IsResolved = false; 228 } else { 229 IsResolved = getWriter().isSymbolRefDifferenceFullyResolvedImpl( 230 *this, SA, *DF, false, true); 231 } 232 } 233 } else { 234 IsResolved = Target.isAbsolute(); 235 } 236 237 Value = Target.getConstant(); 238 239 if (const MCSymbolRefExpr *A = Target.getSymA()) { 240 const MCSymbol &Sym = A->getSymbol(); 241 if (Sym.isDefined()) 242 Value += Layout.getSymbolOffset(Sym); 243 } 244 if (const MCSymbolRefExpr *B = Target.getSymB()) { 245 const MCSymbol &Sym = B->getSymbol(); 246 if (Sym.isDefined()) 247 Value -= Layout.getSymbolOffset(Sym); 248 } 249 250 bool ShouldAlignPC = Backend.getFixupKindInfo(Fixup.getKind()).Flags & 251 MCFixupKindInfo::FKF_IsAlignedDownTo32Bits; 252 assert((ShouldAlignPC ? IsPCRel : true) && 253 "FKF_IsAlignedDownTo32Bits is only allowed on PC-relative fixups!"); 254 255 if (IsPCRel) { 256 uint32_t Offset = Layout.getFragmentOffset(DF) + Fixup.getOffset(); 257 258 // A number of ARM fixups in Thumb mode require that the effective PC 259 // address be determined as the 32-bit aligned version of the actual offset. 260 if (ShouldAlignPC) Offset &= ~0x3; 261 Value -= Offset; 262 } 263 264 // Let the backend force a relocation if needed. 265 if (IsResolved && Backend.shouldForceRelocation(*this, Fixup, Target)) 266 IsResolved = false; 267 268 return IsResolved; 269} 270 271uint64_t MCAssembler::computeFragmentSize(const MCAsmLayout &Layout, 272 const MCFragment &F) const { 273 switch (F.getKind()) { 274 case MCFragment::FT_Data: 275 return cast<MCDataFragment>(F).getContents().size(); 276 case MCFragment::FT_Relaxable: 277 return cast<MCRelaxableFragment>(F).getContents().size(); 278 case MCFragment::FT_CompactEncodedInst: 279 return cast<MCCompactEncodedInstFragment>(F).getContents().size(); 280 case MCFragment::FT_Fill: 281 return cast<MCFillFragment>(F).getSize(); 282 283 case MCFragment::FT_LEB: 284 return cast<MCLEBFragment>(F).getContents().size(); 285 286 case MCFragment::FT_SafeSEH: 287 return 4; 288 289 case MCFragment::FT_Align: { 290 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 291 unsigned Offset = Layout.getFragmentOffset(&AF); 292 unsigned Size = OffsetToAlignment(Offset, AF.getAlignment()); 293 // If we are padding with nops, force the padding to be larger than the 294 // minimum nop size. 295 if (Size > 0 && AF.hasEmitNops()) { 296 while (Size % getBackend().getMinimumNopSize()) 297 Size += AF.getAlignment(); 298 } 299 if (Size > AF.getMaxBytesToEmit()) 300 return 0; 301 return Size; 302 } 303 304 case MCFragment::FT_Org: { 305 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 306 MCValue Value; 307 if (!OF.getOffset().evaluateAsValue(Value, Layout)) { 308 getContext().reportError(OF.getLoc(), 309 "expected assembly-time absolute expression"); 310 return 0; 311 } 312 313 uint64_t FragmentOffset = Layout.getFragmentOffset(&OF); 314 int64_t TargetLocation = Value.getConstant(); 315 if (const MCSymbolRefExpr *A = Value.getSymA()) { 316 uint64_t Val; 317 if (!Layout.getSymbolOffset(A->getSymbol(), Val)) { 318 getContext().reportError(OF.getLoc(), "expected absolute expression"); 319 return 0; 320 } 321 TargetLocation += Val; 322 } 323 int64_t Size = TargetLocation - FragmentOffset; 324 if (Size < 0 || Size >= 0x40000000) { 325 getContext().reportError( 326 OF.getLoc(), "invalid .org offset '" + Twine(TargetLocation) + 327 "' (at offset '" + Twine(FragmentOffset) + "')"); 328 return 0; 329 } 330 return Size; 331 } 332 333 case MCFragment::FT_Dwarf: 334 return cast<MCDwarfLineAddrFragment>(F).getContents().size(); 335 case MCFragment::FT_DwarfFrame: 336 return cast<MCDwarfCallFrameFragment>(F).getContents().size(); 337 case MCFragment::FT_CVInlineLines: 338 return cast<MCCVInlineLineTableFragment>(F).getContents().size(); 339 case MCFragment::FT_CVDefRange: 340 return cast<MCCVDefRangeFragment>(F).getContents().size(); 341 case MCFragment::FT_Dummy: 342 llvm_unreachable("Should not have been added"); 343 } 344 345 llvm_unreachable("invalid fragment kind"); 346} 347 348void MCAsmLayout::layoutFragment(MCFragment *F) { 349 MCFragment *Prev = F->getPrevNode(); 350 351 // We should never try to recompute something which is valid. 352 assert(!isFragmentValid(F) && "Attempt to recompute a valid fragment!"); 353 // We should never try to compute the fragment layout if its predecessor 354 // isn't valid. 355 assert((!Prev || isFragmentValid(Prev)) && 356 "Attempt to compute fragment before its predecessor!"); 357 358 ++stats::FragmentLayouts; 359 360 // Compute fragment offset and size. 361 if (Prev) 362 F->Offset = Prev->Offset + getAssembler().computeFragmentSize(*this, *Prev); 363 else 364 F->Offset = 0; 365 LastValidFragment[F->getParent()] = F; 366 367 // If bundling is enabled and this fragment has instructions in it, it has to 368 // obey the bundling restrictions. With padding, we'll have: 369 // 370 // 371 // BundlePadding 372 // ||| 373 // ------------------------------------- 374 // Prev |##########| F | 375 // ------------------------------------- 376 // ^ 377 // | 378 // F->Offset 379 // 380 // The fragment's offset will point to after the padding, and its computed 381 // size won't include the padding. 382 // 383 // When the -mc-relax-all flag is used, we optimize bundling by writting the 384 // padding directly into fragments when the instructions are emitted inside 385 // the streamer. When the fragment is larger than the bundle size, we need to 386 // ensure that it's bundle aligned. This means that if we end up with 387 // multiple fragments, we must emit bundle padding between fragments. 388 // 389 // ".align N" is an example of a directive that introduces multiple 390 // fragments. We could add a special case to handle ".align N" by emitting 391 // within-fragment padding (which would produce less padding when N is less 392 // than the bundle size), but for now we don't. 393 // 394 if (Assembler.isBundlingEnabled() && F->hasInstructions()) { 395 assert(isa<MCEncodedFragment>(F) && 396 "Only MCEncodedFragment implementations have instructions"); 397 uint64_t FSize = Assembler.computeFragmentSize(*this, *F); 398 399 if (!Assembler.getRelaxAll() && FSize > Assembler.getBundleAlignSize()) 400 report_fatal_error("Fragment can't be larger than a bundle size"); 401 402 uint64_t RequiredBundlePadding = computeBundlePadding(Assembler, F, 403 F->Offset, FSize); 404 if (RequiredBundlePadding > UINT8_MAX) 405 report_fatal_error("Padding cannot exceed 255 bytes"); 406 F->setBundlePadding(static_cast<uint8_t>(RequiredBundlePadding)); 407 F->Offset += RequiredBundlePadding; 408 } 409} 410 411void MCAssembler::registerSymbol(const MCSymbol &Symbol, bool *Created) { 412 bool New = !Symbol.isRegistered(); 413 if (Created) 414 *Created = New; 415 if (New) { 416 Symbol.setIsRegistered(true); 417 Symbols.push_back(&Symbol); 418 } 419} 420 421void MCAssembler::writeFragmentPadding(const MCFragment &F, uint64_t FSize, 422 MCObjectWriter *OW) const { 423 // Should NOP padding be written out before this fragment? 424 unsigned BundlePadding = F.getBundlePadding(); 425 if (BundlePadding > 0) { 426 assert(isBundlingEnabled() && 427 "Writing bundle padding with disabled bundling"); 428 assert(F.hasInstructions() && 429 "Writing bundle padding for a fragment without instructions"); 430 431 unsigned TotalLength = BundlePadding + static_cast<unsigned>(FSize); 432 if (F.alignToBundleEnd() && TotalLength > getBundleAlignSize()) { 433 // If the padding itself crosses a bundle boundary, it must be emitted 434 // in 2 pieces, since even nop instructions must not cross boundaries. 435 // v--------------v <- BundleAlignSize 436 // v---------v <- BundlePadding 437 // ---------------------------- 438 // | Prev |####|####| F | 439 // ---------------------------- 440 // ^-------------------^ <- TotalLength 441 unsigned DistanceToBoundary = TotalLength - getBundleAlignSize(); 442 if (!getBackend().writeNopData(DistanceToBoundary, OW)) 443 report_fatal_error("unable to write NOP sequence of " + 444 Twine(DistanceToBoundary) + " bytes"); 445 BundlePadding -= DistanceToBoundary; 446 } 447 if (!getBackend().writeNopData(BundlePadding, OW)) 448 report_fatal_error("unable to write NOP sequence of " + 449 Twine(BundlePadding) + " bytes"); 450 } 451} 452 453/// \brief Write the fragment \p F to the output file. 454static void writeFragment(const MCAssembler &Asm, const MCAsmLayout &Layout, 455 const MCFragment &F) { 456 MCObjectWriter *OW = &Asm.getWriter(); 457 458 // FIXME: Embed in fragments instead? 459 uint64_t FragmentSize = Asm.computeFragmentSize(Layout, F); 460 461 Asm.writeFragmentPadding(F, FragmentSize, OW); 462 463 // This variable (and its dummy usage) is to participate in the assert at 464 // the end of the function. 465 uint64_t Start = OW->getStream().tell(); 466 (void) Start; 467 468 ++stats::EmittedFragments; 469 470 switch (F.getKind()) { 471 case MCFragment::FT_Align: { 472 ++stats::EmittedAlignFragments; 473 const MCAlignFragment &AF = cast<MCAlignFragment>(F); 474 assert(AF.getValueSize() && "Invalid virtual align in concrete fragment!"); 475 476 uint64_t Count = FragmentSize / AF.getValueSize(); 477 478 // FIXME: This error shouldn't actually occur (the front end should emit 479 // multiple .align directives to enforce the semantics it wants), but is 480 // severe enough that we want to report it. How to handle this? 481 if (Count * AF.getValueSize() != FragmentSize) 482 report_fatal_error("undefined .align directive, value size '" + 483 Twine(AF.getValueSize()) + 484 "' is not a divisor of padding size '" + 485 Twine(FragmentSize) + "'"); 486 487 // See if we are aligning with nops, and if so do that first to try to fill 488 // the Count bytes. Then if that did not fill any bytes or there are any 489 // bytes left to fill use the Value and ValueSize to fill the rest. 490 // If we are aligning with nops, ask that target to emit the right data. 491 if (AF.hasEmitNops()) { 492 if (!Asm.getBackend().writeNopData(Count, OW)) 493 report_fatal_error("unable to write nop sequence of " + 494 Twine(Count) + " bytes"); 495 break; 496 } 497 498 // Otherwise, write out in multiples of the value size. 499 for (uint64_t i = 0; i != Count; ++i) { 500 switch (AF.getValueSize()) { 501 default: llvm_unreachable("Invalid size!"); 502 case 1: OW->write8 (uint8_t (AF.getValue())); break; 503 case 2: OW->write16(uint16_t(AF.getValue())); break; 504 case 4: OW->write32(uint32_t(AF.getValue())); break; 505 case 8: OW->write64(uint64_t(AF.getValue())); break; 506 } 507 } 508 break; 509 } 510 511 case MCFragment::FT_Data: 512 ++stats::EmittedDataFragments; 513 OW->writeBytes(cast<MCDataFragment>(F).getContents()); 514 break; 515 516 case MCFragment::FT_Relaxable: 517 ++stats::EmittedRelaxableFragments; 518 OW->writeBytes(cast<MCRelaxableFragment>(F).getContents()); 519 break; 520 521 case MCFragment::FT_CompactEncodedInst: 522 ++stats::EmittedCompactEncodedInstFragments; 523 OW->writeBytes(cast<MCCompactEncodedInstFragment>(F).getContents()); 524 break; 525 526 case MCFragment::FT_Fill: { 527 ++stats::EmittedFillFragments; 528 const MCFillFragment &FF = cast<MCFillFragment>(F); 529 uint8_t V = FF.getValue(); 530 const unsigned MaxChunkSize = 16; 531 char Data[MaxChunkSize]; 532 memcpy(Data, &V, 1); 533 for (unsigned I = 1; I < MaxChunkSize; ++I) 534 Data[I] = Data[0]; 535 536 uint64_t Size = FF.getSize(); 537 for (unsigned ChunkSize = MaxChunkSize; ChunkSize; ChunkSize /= 2) { 538 StringRef Ref(Data, ChunkSize); 539 for (uint64_t I = 0, E = Size / ChunkSize; I != E; ++I) 540 OW->writeBytes(Ref); 541 Size = Size % ChunkSize; 542 } 543 break; 544 } 545 546 case MCFragment::FT_LEB: { 547 const MCLEBFragment &LF = cast<MCLEBFragment>(F); 548 OW->writeBytes(LF.getContents()); 549 break; 550 } 551 552 case MCFragment::FT_SafeSEH: { 553 const MCSafeSEHFragment &SF = cast<MCSafeSEHFragment>(F); 554 OW->write32(SF.getSymbol()->getIndex()); 555 break; 556 } 557 558 case MCFragment::FT_Org: { 559 ++stats::EmittedOrgFragments; 560 const MCOrgFragment &OF = cast<MCOrgFragment>(F); 561 562 for (uint64_t i = 0, e = FragmentSize; i != e; ++i) 563 OW->write8(uint8_t(OF.getValue())); 564 565 break; 566 } 567 568 case MCFragment::FT_Dwarf: { 569 const MCDwarfLineAddrFragment &OF = cast<MCDwarfLineAddrFragment>(F); 570 OW->writeBytes(OF.getContents()); 571 break; 572 } 573 case MCFragment::FT_DwarfFrame: { 574 const MCDwarfCallFrameFragment &CF = cast<MCDwarfCallFrameFragment>(F); 575 OW->writeBytes(CF.getContents()); 576 break; 577 } 578 case MCFragment::FT_CVInlineLines: { 579 const auto &OF = cast<MCCVInlineLineTableFragment>(F); 580 OW->writeBytes(OF.getContents()); 581 break; 582 } 583 case MCFragment::FT_CVDefRange: { 584 const auto &DRF = cast<MCCVDefRangeFragment>(F); 585 OW->writeBytes(DRF.getContents()); 586 break; 587 } 588 case MCFragment::FT_Dummy: 589 llvm_unreachable("Should not have been added"); 590 } 591 592 assert(OW->getStream().tell() - Start == FragmentSize && 593 "The stream should advance by fragment size"); 594} 595 596void MCAssembler::writeSectionData(const MCSection *Sec, 597 const MCAsmLayout &Layout) const { 598 // Ignore virtual sections. 599 if (Sec->isVirtualSection()) { 600 assert(Layout.getSectionFileSize(Sec) == 0 && "Invalid size for section!"); 601 602 // Check that contents are only things legal inside a virtual section. 603 for (const MCFragment &F : *Sec) { 604 switch (F.getKind()) { 605 default: llvm_unreachable("Invalid fragment in virtual section!"); 606 case MCFragment::FT_Data: { 607 // Check that we aren't trying to write a non-zero contents (or fixups) 608 // into a virtual section. This is to support clients which use standard 609 // directives to fill the contents of virtual sections. 610 const MCDataFragment &DF = cast<MCDataFragment>(F); 611 if (DF.fixup_begin() != DF.fixup_end()) 612 report_fatal_error("cannot have fixups in virtual section!"); 613 for (unsigned i = 0, e = DF.getContents().size(); i != e; ++i) 614 if (DF.getContents()[i]) { 615 if (auto *ELFSec = dyn_cast<const MCSectionELF>(Sec)) 616 report_fatal_error("non-zero initializer found in section '" + 617 ELFSec->getSectionName() + "'"); 618 else 619 report_fatal_error("non-zero initializer found in virtual section"); 620 } 621 break; 622 } 623 case MCFragment::FT_Align: 624 // Check that we aren't trying to write a non-zero value into a virtual 625 // section. 626 assert((cast<MCAlignFragment>(F).getValueSize() == 0 || 627 cast<MCAlignFragment>(F).getValue() == 0) && 628 "Invalid align in virtual section!"); 629 break; 630 case MCFragment::FT_Fill: 631 assert((cast<MCFillFragment>(F).getValue() == 0) && 632 "Invalid fill in virtual section!"); 633 break; 634 } 635 } 636 637 return; 638 } 639 640 uint64_t Start = getWriter().getStream().tell(); 641 (void)Start; 642 643 for (const MCFragment &F : *Sec) 644 writeFragment(*this, Layout, F); 645 646 assert(getWriter().getStream().tell() - Start == 647 Layout.getSectionAddressSize(Sec)); 648} 649 650std::tuple<MCValue, uint64_t, bool> 651MCAssembler::handleFixup(const MCAsmLayout &Layout, MCFragment &F, 652 const MCFixup &Fixup) { 653 // Evaluate the fixup. 654 MCValue Target; 655 uint64_t FixedValue; 656 bool IsResolved = evaluateFixup(Layout, Fixup, &F, Target, FixedValue); 657 if (!IsResolved) { 658 // The fixup was unresolved, we need a relocation. Inform the object 659 // writer of the relocation, and give it an opportunity to adjust the 660 // fixup value if need be. 661 getWriter().recordRelocation(*this, Layout, &F, Fixup, Target, FixedValue); 662 } 663 return std::make_tuple(Target, FixedValue, IsResolved); 664} 665 666void MCAssembler::layout(MCAsmLayout &Layout) { 667 DEBUG_WITH_TYPE("mc-dump", { 668 errs() << "assembler backend - pre-layout\n--\n"; 669 dump(); }); 670 671 // Create dummy fragments and assign section ordinals. 672 unsigned SectionIndex = 0; 673 for (MCSection &Sec : *this) { 674 // Create dummy fragments to eliminate any empty sections, this simplifies 675 // layout. 676 if (Sec.getFragmentList().empty()) 677 new MCDataFragment(&Sec); 678 679 Sec.setOrdinal(SectionIndex++); 680 } 681 682 // Assign layout order indices to sections and fragments. 683 for (unsigned i = 0, e = Layout.getSectionOrder().size(); i != e; ++i) { 684 MCSection *Sec = Layout.getSectionOrder()[i]; 685 Sec->setLayoutOrder(i); 686 687 unsigned FragmentIndex = 0; 688 for (MCFragment &Frag : *Sec) 689 Frag.setLayoutOrder(FragmentIndex++); 690 } 691 692 // Layout until everything fits. 693 while (layoutOnce(Layout)) 694 if (getContext().hadError()) 695 return; 696 697 DEBUG_WITH_TYPE("mc-dump", { 698 errs() << "assembler backend - post-relaxation\n--\n"; 699 dump(); }); 700 701 // Finalize the layout, including fragment lowering. 702 finishLayout(Layout); 703 704 DEBUG_WITH_TYPE("mc-dump", { 705 errs() << "assembler backend - final-layout\n--\n"; 706 dump(); }); 707 708 // Allow the object writer a chance to perform post-layout binding (for 709 // example, to set the index fields in the symbol data). 710 getWriter().executePostLayoutBinding(*this, Layout); 711 712 // Evaluate and apply the fixups, generating relocation entries as necessary. 713 for (MCSection &Sec : *this) { 714 for (MCFragment &Frag : Sec) { 715 // Data and relaxable fragments both have fixups. So only process 716 // those here. 717 // FIXME: Is there a better way to do this? MCEncodedFragmentWithFixups 718 // being templated makes this tricky. 719 if (isa<MCEncodedFragment>(&Frag) && 720 isa<MCCompactEncodedInstFragment>(&Frag)) 721 continue; 722 if (!isa<MCEncodedFragment>(&Frag) && !isa<MCCVDefRangeFragment>(&Frag)) 723 continue; 724 ArrayRef<MCFixup> Fixups; 725 MutableArrayRef<char> Contents; 726 if (auto *FragWithFixups = dyn_cast<MCDataFragment>(&Frag)) { 727 Fixups = FragWithFixups->getFixups(); 728 Contents = FragWithFixups->getContents(); 729 } else if (auto *FragWithFixups = dyn_cast<MCRelaxableFragment>(&Frag)) { 730 Fixups = FragWithFixups->getFixups(); 731 Contents = FragWithFixups->getContents(); 732 } else if (auto *FragWithFixups = dyn_cast<MCCVDefRangeFragment>(&Frag)) { 733 Fixups = FragWithFixups->getFixups(); 734 Contents = FragWithFixups->getContents(); 735 } else 736 llvm_unreachable("Unknown fragment with fixups!"); 737 for (const MCFixup &Fixup : Fixups) { 738 uint64_t FixedValue; 739 bool IsResolved; 740 MCValue Target; 741 std::tie(Target, FixedValue, IsResolved) = 742 handleFixup(Layout, Frag, Fixup); 743 getBackend().applyFixup(*this, Fixup, Target, Contents, FixedValue, 744 IsResolved); 745 } 746 } 747 } 748} 749 750void MCAssembler::Finish() { 751 // Create the layout object. 752 MCAsmLayout Layout(*this); 753 layout(Layout); 754 755 raw_ostream &OS = getWriter().getStream(); 756 uint64_t StartOffset = OS.tell(); 757 758 // Write the object file. 759 getWriter().writeObject(*this, Layout); 760 761 stats::ObjectBytes += OS.tell() - StartOffset; 762} 763 764bool MCAssembler::fixupNeedsRelaxation(const MCFixup &Fixup, 765 const MCRelaxableFragment *DF, 766 const MCAsmLayout &Layout) const { 767 MCValue Target; 768 uint64_t Value; 769 bool Resolved = evaluateFixup(Layout, Fixup, DF, Target, Value); 770 if (Target.getSymA() && 771 Target.getSymA()->getKind() == MCSymbolRefExpr::VK_X86_ABS8 && 772 Fixup.getKind() == FK_Data_1) 773 return false; 774 return getBackend().fixupNeedsRelaxationAdvanced(Fixup, Resolved, Value, DF, 775 Layout); 776} 777 778bool MCAssembler::fragmentNeedsRelaxation(const MCRelaxableFragment *F, 779 const MCAsmLayout &Layout) const { 780 // If this inst doesn't ever need relaxation, ignore it. This occurs when we 781 // are intentionally pushing out inst fragments, or because we relaxed a 782 // previous instruction to one that doesn't need relaxation. 783 if (!getBackend().mayNeedRelaxation(F->getInst())) 784 return false; 785 786 for (const MCFixup &Fixup : F->getFixups()) 787 if (fixupNeedsRelaxation(Fixup, F, Layout)) 788 return true; 789 790 return false; 791} 792 793bool MCAssembler::relaxInstruction(MCAsmLayout &Layout, 794 MCRelaxableFragment &F) { 795 if (!fragmentNeedsRelaxation(&F, Layout)) 796 return false; 797 798 ++stats::RelaxedInstructions; 799 800 // FIXME-PERF: We could immediately lower out instructions if we can tell 801 // they are fully resolved, to avoid retesting on later passes. 802 803 // Relax the fragment. 804 805 MCInst Relaxed; 806 getBackend().relaxInstruction(F.getInst(), F.getSubtargetInfo(), Relaxed); 807 808 // Encode the new instruction. 809 // 810 // FIXME-PERF: If it matters, we could let the target do this. It can 811 // probably do so more efficiently in many cases. 812 SmallVector<MCFixup, 4> Fixups; 813 SmallString<256> Code; 814 raw_svector_ostream VecOS(Code); 815 getEmitter().encodeInstruction(Relaxed, VecOS, Fixups, F.getSubtargetInfo()); 816 817 // Update the fragment. 818 F.setInst(Relaxed); 819 F.getContents() = Code; 820 F.getFixups() = Fixups; 821 822 return true; 823} 824 825bool MCAssembler::relaxLEB(MCAsmLayout &Layout, MCLEBFragment &LF) { 826 uint64_t OldSize = LF.getContents().size(); 827 int64_t Value; 828 bool Abs = LF.getValue().evaluateKnownAbsolute(Value, Layout); 829 if (!Abs) 830 report_fatal_error("sleb128 and uleb128 expressions must be absolute"); 831 SmallString<8> &Data = LF.getContents(); 832 Data.clear(); 833 raw_svector_ostream OSE(Data); 834 if (LF.isSigned()) 835 encodeSLEB128(Value, OSE); 836 else 837 encodeULEB128(Value, OSE); 838 return OldSize != LF.getContents().size(); 839} 840 841bool MCAssembler::relaxDwarfLineAddr(MCAsmLayout &Layout, 842 MCDwarfLineAddrFragment &DF) { 843 MCContext &Context = Layout.getAssembler().getContext(); 844 uint64_t OldSize = DF.getContents().size(); 845 int64_t AddrDelta; 846 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 847 assert(Abs && "We created a line delta with an invalid expression"); 848 (void) Abs; 849 int64_t LineDelta; 850 LineDelta = DF.getLineDelta(); 851 SmallString<8> &Data = DF.getContents(); 852 Data.clear(); 853 raw_svector_ostream OSE(Data); 854 MCDwarfLineAddr::Encode(Context, getDWARFLinetableParams(), LineDelta, 855 AddrDelta, OSE); 856 return OldSize != Data.size(); 857} 858 859bool MCAssembler::relaxDwarfCallFrameFragment(MCAsmLayout &Layout, 860 MCDwarfCallFrameFragment &DF) { 861 MCContext &Context = Layout.getAssembler().getContext(); 862 uint64_t OldSize = DF.getContents().size(); 863 int64_t AddrDelta; 864 bool Abs = DF.getAddrDelta().evaluateKnownAbsolute(AddrDelta, Layout); 865 assert(Abs && "We created call frame with an invalid expression"); 866 (void) Abs; 867 SmallString<8> &Data = DF.getContents(); 868 Data.clear(); 869 raw_svector_ostream OSE(Data); 870 MCDwarfFrameEmitter::EncodeAdvanceLoc(Context, AddrDelta, OSE); 871 return OldSize != Data.size(); 872} 873 874bool MCAssembler::relaxCVInlineLineTable(MCAsmLayout &Layout, 875 MCCVInlineLineTableFragment &F) { 876 unsigned OldSize = F.getContents().size(); 877 getContext().getCVContext().encodeInlineLineTable(Layout, F); 878 return OldSize != F.getContents().size(); 879} 880 881bool MCAssembler::relaxCVDefRange(MCAsmLayout &Layout, 882 MCCVDefRangeFragment &F) { 883 unsigned OldSize = F.getContents().size(); 884 getContext().getCVContext().encodeDefRange(Layout, F); 885 return OldSize != F.getContents().size(); 886} 887 888bool MCAssembler::layoutSectionOnce(MCAsmLayout &Layout, MCSection &Sec) { 889 // Holds the first fragment which needed relaxing during this layout. It will 890 // remain NULL if none were relaxed. 891 // When a fragment is relaxed, all the fragments following it should get 892 // invalidated because their offset is going to change. 893 MCFragment *FirstRelaxedFragment = nullptr; 894 895 // Attempt to relax all the fragments in the section. 896 for (MCSection::iterator I = Sec.begin(), IE = Sec.end(); I != IE; ++I) { 897 // Check if this is a fragment that needs relaxation. 898 bool RelaxedFrag = false; 899 switch(I->getKind()) { 900 default: 901 break; 902 case MCFragment::FT_Relaxable: 903 assert(!getRelaxAll() && 904 "Did not expect a MCRelaxableFragment in RelaxAll mode"); 905 RelaxedFrag = relaxInstruction(Layout, *cast<MCRelaxableFragment>(I)); 906 break; 907 case MCFragment::FT_Dwarf: 908 RelaxedFrag = relaxDwarfLineAddr(Layout, 909 *cast<MCDwarfLineAddrFragment>(I)); 910 break; 911 case MCFragment::FT_DwarfFrame: 912 RelaxedFrag = 913 relaxDwarfCallFrameFragment(Layout, 914 *cast<MCDwarfCallFrameFragment>(I)); 915 break; 916 case MCFragment::FT_LEB: 917 RelaxedFrag = relaxLEB(Layout, *cast<MCLEBFragment>(I)); 918 break; 919 case MCFragment::FT_CVInlineLines: 920 RelaxedFrag = 921 relaxCVInlineLineTable(Layout, *cast<MCCVInlineLineTableFragment>(I)); 922 break; 923 case MCFragment::FT_CVDefRange: 924 RelaxedFrag = relaxCVDefRange(Layout, *cast<MCCVDefRangeFragment>(I)); 925 break; 926 } 927 if (RelaxedFrag && !FirstRelaxedFragment) 928 FirstRelaxedFragment = &*I; 929 } 930 if (FirstRelaxedFragment) { 931 Layout.invalidateFragmentsFrom(FirstRelaxedFragment); 932 return true; 933 } 934 return false; 935} 936 937bool MCAssembler::layoutOnce(MCAsmLayout &Layout) { 938 ++stats::RelaxationSteps; 939 940 bool WasRelaxed = false; 941 for (iterator it = begin(), ie = end(); it != ie; ++it) { 942 MCSection &Sec = *it; 943 while (layoutSectionOnce(Layout, Sec)) 944 WasRelaxed = true; 945 } 946 947 return WasRelaxed; 948} 949 950void MCAssembler::finishLayout(MCAsmLayout &Layout) { 951 // The layout is done. Mark every fragment as valid. 952 for (unsigned int i = 0, n = Layout.getSectionOrder().size(); i != n; ++i) { 953 MCSection &Section = *Layout.getSectionOrder()[i]; 954 Layout.getFragmentOffset(&*Section.rbegin()); 955 computeFragmentSize(Layout, *Section.rbegin()); 956 } 957 getBackend().finishLayout(*this, Layout); 958} 959