1//===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
2//
3//                     The LLVM Compiler Infrastructure
4//
5// This file is distributed under the University of Illinois Open Source
6// License. See LICENSE.TXT for details.
7//
8//===----------------------------------------------------------------------===//
9//
10// This file defines a MachineCodeEmitter object that is used by the JIT to
11// write machine code to memory and remember where relocatable values are.
12//
13//===----------------------------------------------------------------------===//
14
15#define DEBUG_TYPE "jit"
16#include "JIT.h"
17#include "llvm/ADT/DenseMap.h"
18#include "llvm/ADT/OwningPtr.h"
19#include "llvm/ADT/SmallPtrSet.h"
20#include "llvm/ADT/SmallVector.h"
21#include "llvm/ADT/Statistic.h"
22#include "llvm/ADT/ValueMap.h"
23#include "llvm/CodeGen/JITCodeEmitter.h"
24#include "llvm/CodeGen/MachineCodeInfo.h"
25#include "llvm/CodeGen/MachineConstantPool.h"
26#include "llvm/CodeGen/MachineFunction.h"
27#include "llvm/CodeGen/MachineJumpTableInfo.h"
28#include "llvm/CodeGen/MachineModuleInfo.h"
29#include "llvm/CodeGen/MachineRelocation.h"
30#include "llvm/DebugInfo.h"
31#include "llvm/ExecutionEngine/GenericValue.h"
32#include "llvm/ExecutionEngine/JITEventListener.h"
33#include "llvm/ExecutionEngine/JITMemoryManager.h"
34#include "llvm/IR/Constants.h"
35#include "llvm/IR/DataLayout.h"
36#include "llvm/IR/DerivedTypes.h"
37#include "llvm/IR/Module.h"
38#include "llvm/Support/Debug.h"
39#include "llvm/Support/Disassembler.h"
40#include "llvm/Support/ErrorHandling.h"
41#include "llvm/Support/ManagedStatic.h"
42#include "llvm/Support/Memory.h"
43#include "llvm/Support/MutexGuard.h"
44#include "llvm/Support/ValueHandle.h"
45#include "llvm/Support/raw_ostream.h"
46#include "llvm/Target/TargetInstrInfo.h"
47#include "llvm/Target/TargetJITInfo.h"
48#include "llvm/Target/TargetMachine.h"
49#include "llvm/Target/TargetOptions.h"
50#include <algorithm>
51#ifndef NDEBUG
52#include <iomanip>
53#endif
54using namespace llvm;
55
56STATISTIC(NumBytes, "Number of bytes of machine code compiled");
57STATISTIC(NumRelos, "Number of relocations applied");
58STATISTIC(NumRetries, "Number of retries with more memory");
59
60
61// A declaration may stop being a declaration once it's fully read from bitcode.
62// This function returns true if F is fully read and is still a declaration.
63static bool isNonGhostDeclaration(const Function *F) {
64  return F->isDeclaration() && !F->isMaterializable();
65}
66
67//===----------------------------------------------------------------------===//
68// JIT lazy compilation code.
69//
70namespace {
71  class JITEmitter;
72  class JITResolverState;
73
74  template<typename ValueTy>
75  struct NoRAUWValueMapConfig : public ValueMapConfig<ValueTy> {
76    typedef JITResolverState *ExtraData;
77    static void onRAUW(JITResolverState *, Value *Old, Value *New) {
78      llvm_unreachable("The JIT doesn't know how to handle a"
79                       " RAUW on a value it has emitted.");
80    }
81  };
82
83  struct CallSiteValueMapConfig : public NoRAUWValueMapConfig<Function*> {
84    typedef JITResolverState *ExtraData;
85    static void onDelete(JITResolverState *JRS, Function *F);
86  };
87
88  class JITResolverState {
89  public:
90    typedef ValueMap<Function*, void*, NoRAUWValueMapConfig<Function*> >
91      FunctionToLazyStubMapTy;
92    typedef std::map<void*, AssertingVH<Function> > CallSiteToFunctionMapTy;
93    typedef ValueMap<Function *, SmallPtrSet<void*, 1>,
94                     CallSiteValueMapConfig> FunctionToCallSitesMapTy;
95    typedef std::map<AssertingVH<GlobalValue>, void*> GlobalToIndirectSymMapTy;
96  private:
97    /// FunctionToLazyStubMap - Keep track of the lazy stub created for a
98    /// particular function so that we can reuse them if necessary.
99    FunctionToLazyStubMapTy FunctionToLazyStubMap;
100
101    /// CallSiteToFunctionMap - Keep track of the function that each lazy call
102    /// site corresponds to, and vice versa.
103    CallSiteToFunctionMapTy CallSiteToFunctionMap;
104    FunctionToCallSitesMapTy FunctionToCallSitesMap;
105
106    /// GlobalToIndirectSymMap - Keep track of the indirect symbol created for a
107    /// particular GlobalVariable so that we can reuse them if necessary.
108    GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
109
110#ifndef NDEBUG
111    /// Instance of the JIT this ResolverState serves.
112    JIT *TheJIT;
113#endif
114
115  public:
116    JITResolverState(JIT *jit) : FunctionToLazyStubMap(this),
117                                 FunctionToCallSitesMap(this) {
118#ifndef NDEBUG
119      TheJIT = jit;
120#endif
121    }
122
123    FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
124      const MutexGuard& locked) {
125      assert(locked.holds(TheJIT->lock));
126      return FunctionToLazyStubMap;
127    }
128
129    GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& lck) {
130      assert(lck.holds(TheJIT->lock));
131      return GlobalToIndirectSymMap;
132    }
133
134    std::pair<void *, Function *> LookupFunctionFromCallSite(
135        const MutexGuard &locked, void *CallSite) const {
136      assert(locked.holds(TheJIT->lock));
137
138      // The address given to us for the stub may not be exactly right, it
139      // might be a little bit after the stub.  As such, use upper_bound to
140      // find it.
141      CallSiteToFunctionMapTy::const_iterator I =
142        CallSiteToFunctionMap.upper_bound(CallSite);
143      assert(I != CallSiteToFunctionMap.begin() &&
144             "This is not a known call site!");
145      --I;
146      return *I;
147    }
148
149    void AddCallSite(const MutexGuard &locked, void *CallSite, Function *F) {
150      assert(locked.holds(TheJIT->lock));
151
152      bool Inserted = CallSiteToFunctionMap.insert(
153          std::make_pair(CallSite, F)).second;
154      (void)Inserted;
155      assert(Inserted && "Pair was already in CallSiteToFunctionMap");
156      FunctionToCallSitesMap[F].insert(CallSite);
157    }
158
159    void EraseAllCallSitesForPrelocked(Function *F);
160
161    // Erases _all_ call sites regardless of their function.  This is used to
162    // unregister the stub addresses from the StubToResolverMap in
163    // ~JITResolver().
164    void EraseAllCallSitesPrelocked();
165  };
166
167  /// JITResolver - Keep track of, and resolve, call sites for functions that
168  /// have not yet been compiled.
169  class JITResolver {
170    typedef JITResolverState::FunctionToLazyStubMapTy FunctionToLazyStubMapTy;
171    typedef JITResolverState::CallSiteToFunctionMapTy CallSiteToFunctionMapTy;
172    typedef JITResolverState::GlobalToIndirectSymMapTy GlobalToIndirectSymMapTy;
173
174    /// LazyResolverFn - The target lazy resolver function that we actually
175    /// rewrite instructions to use.
176    TargetJITInfo::LazyResolverFn LazyResolverFn;
177
178    JITResolverState state;
179
180    /// ExternalFnToStubMap - This is the equivalent of FunctionToLazyStubMap
181    /// for external functions.  TODO: Of course, external functions don't need
182    /// a lazy stub.  It's actually here to make it more likely that far calls
183    /// succeed, but no single stub can guarantee that.  I'll remove this in a
184    /// subsequent checkin when I actually fix far calls.
185    std::map<void*, void*> ExternalFnToStubMap;
186
187    /// revGOTMap - map addresses to indexes in the GOT
188    std::map<void*, unsigned> revGOTMap;
189    unsigned nextGOTIndex;
190
191    JITEmitter &JE;
192
193    /// Instance of JIT corresponding to this Resolver.
194    JIT *TheJIT;
195
196  public:
197    explicit JITResolver(JIT &jit, JITEmitter &je)
198      : state(&jit), nextGOTIndex(0), JE(je), TheJIT(&jit) {
199      LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
200    }
201
202    ~JITResolver();
203
204    /// getLazyFunctionStubIfAvailable - This returns a pointer to a function's
205    /// lazy-compilation stub if it has already been created.
206    void *getLazyFunctionStubIfAvailable(Function *F);
207
208    /// getLazyFunctionStub - This returns a pointer to a function's
209    /// lazy-compilation stub, creating one on demand as needed.
210    void *getLazyFunctionStub(Function *F);
211
212    /// getExternalFunctionStub - Return a stub for the function at the
213    /// specified address, created lazily on demand.
214    void *getExternalFunctionStub(void *FnAddr);
215
216    /// getGlobalValueIndirectSym - Return an indirect symbol containing the
217    /// specified GV address.
218    void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress);
219
220    /// getGOTIndexForAddress - Return a new or existing index in the GOT for
221    /// an address.  This function only manages slots, it does not manage the
222    /// contents of the slots or the memory associated with the GOT.
223    unsigned getGOTIndexForAddr(void *addr);
224
225    /// JITCompilerFn - This function is called to resolve a stub to a compiled
226    /// address.  If the LLVM Function corresponding to the stub has not yet
227    /// been compiled, this function compiles it first.
228    static void *JITCompilerFn(void *Stub);
229  };
230
231  class StubToResolverMapTy {
232    /// Map a stub address to a specific instance of a JITResolver so that
233    /// lazily-compiled functions can find the right resolver to use.
234    ///
235    /// Guarded by Lock.
236    std::map<void*, JITResolver*> Map;
237
238    /// Guards Map from concurrent accesses.
239    mutable sys::Mutex Lock;
240
241  public:
242    /// Registers a Stub to be resolved by Resolver.
243    void RegisterStubResolver(void *Stub, JITResolver *Resolver) {
244      MutexGuard guard(Lock);
245      Map.insert(std::make_pair(Stub, Resolver));
246    }
247    /// Unregisters the Stub when it's invalidated.
248    void UnregisterStubResolver(void *Stub) {
249      MutexGuard guard(Lock);
250      Map.erase(Stub);
251    }
252    /// Returns the JITResolver instance that owns the Stub.
253    JITResolver *getResolverFromStub(void *Stub) const {
254      MutexGuard guard(Lock);
255      // The address given to us for the stub may not be exactly right, it might
256      // be a little bit after the stub.  As such, use upper_bound to find it.
257      // This is the same trick as in LookupFunctionFromCallSite from
258      // JITResolverState.
259      std::map<void*, JITResolver*>::const_iterator I = Map.upper_bound(Stub);
260      assert(I != Map.begin() && "This is not a known stub!");
261      --I;
262      return I->second;
263    }
264    /// True if any stubs refer to the given resolver. Only used in an assert().
265    /// O(N)
266    bool ResolverHasStubs(JITResolver* Resolver) const {
267      MutexGuard guard(Lock);
268      for (std::map<void*, JITResolver*>::const_iterator I = Map.begin(),
269             E = Map.end(); I != E; ++I) {
270        if (I->second == Resolver)
271          return true;
272      }
273      return false;
274    }
275  };
276  /// This needs to be static so that a lazy call stub can access it with no
277  /// context except the address of the stub.
278  ManagedStatic<StubToResolverMapTy> StubToResolverMap;
279
280  /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
281  /// used to output functions to memory for execution.
282  class JITEmitter : public JITCodeEmitter {
283    JITMemoryManager *MemMgr;
284
285    // When outputting a function stub in the context of some other function, we
286    // save BufferBegin/BufferEnd/CurBufferPtr here.
287    uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
288
289    // When reattempting to JIT a function after running out of space, we store
290    // the estimated size of the function we're trying to JIT here, so we can
291    // ask the memory manager for at least this much space.  When we
292    // successfully emit the function, we reset this back to zero.
293    uintptr_t SizeEstimate;
294
295    /// Relocations - These are the relocations that the function needs, as
296    /// emitted.
297    std::vector<MachineRelocation> Relocations;
298
299    /// MBBLocations - This vector is a mapping from MBB ID's to their address.
300    /// It is filled in by the StartMachineBasicBlock callback and queried by
301    /// the getMachineBasicBlockAddress callback.
302    std::vector<uintptr_t> MBBLocations;
303
304    /// ConstantPool - The constant pool for the current function.
305    ///
306    MachineConstantPool *ConstantPool;
307
308    /// ConstantPoolBase - A pointer to the first entry in the constant pool.
309    ///
310    void *ConstantPoolBase;
311
312    /// ConstPoolAddresses - Addresses of individual constant pool entries.
313    ///
314    SmallVector<uintptr_t, 8> ConstPoolAddresses;
315
316    /// JumpTable - The jump tables for the current function.
317    ///
318    MachineJumpTableInfo *JumpTable;
319
320    /// JumpTableBase - A pointer to the first entry in the jump table.
321    ///
322    void *JumpTableBase;
323
324    /// Resolver - This contains info about the currently resolved functions.
325    JITResolver Resolver;
326
327    /// LabelLocations - This vector is a mapping from Label ID's to their
328    /// address.
329    DenseMap<MCSymbol*, uintptr_t> LabelLocations;
330
331    /// MMI - Machine module info for exception informations
332    MachineModuleInfo* MMI;
333
334    // CurFn - The llvm function being emitted.  Only valid during
335    // finishFunction().
336    const Function *CurFn;
337
338    /// Information about emitted code, which is passed to the
339    /// JITEventListeners.  This is reset in startFunction and used in
340    /// finishFunction.
341    JITEvent_EmittedFunctionDetails EmissionDetails;
342
343    struct EmittedCode {
344      void *FunctionBody;  // Beginning of the function's allocation.
345      void *Code;  // The address the function's code actually starts at.
346      void *ExceptionTable;
347      EmittedCode() : FunctionBody(0), Code(0), ExceptionTable(0) {}
348    };
349    struct EmittedFunctionConfig : public ValueMapConfig<const Function*> {
350      typedef JITEmitter *ExtraData;
351      static void onDelete(JITEmitter *, const Function*);
352      static void onRAUW(JITEmitter *, const Function*, const Function*);
353    };
354    ValueMap<const Function *, EmittedCode,
355             EmittedFunctionConfig> EmittedFunctions;
356
357    DebugLoc PrevDL;
358
359    /// Instance of the JIT
360    JIT *TheJIT;
361
362  public:
363    JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
364      : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
365        EmittedFunctions(this), TheJIT(&jit) {
366      MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
367      if (jit.getJITInfo().needsGOT()) {
368        MemMgr->AllocateGOT();
369        DEBUG(dbgs() << "JIT is managing a GOT\n");
370      }
371
372    }
373    ~JITEmitter() {
374      delete MemMgr;
375    }
376
377    JITResolver &getJITResolver() { return Resolver; }
378
379    virtual void startFunction(MachineFunction &F);
380    virtual bool finishFunction(MachineFunction &F);
381
382    void emitConstantPool(MachineConstantPool *MCP);
383    void initJumpTableInfo(MachineJumpTableInfo *MJTI);
384    void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
385
386    void startGVStub(const GlobalValue* GV,
387                     unsigned StubSize, unsigned Alignment = 1);
388    void startGVStub(void *Buffer, unsigned StubSize);
389    void finishGVStub();
390    virtual void *allocIndirectGV(const GlobalValue *GV,
391                                  const uint8_t *Buffer, size_t Size,
392                                  unsigned Alignment);
393
394    /// allocateSpace - Reserves space in the current block if any, or
395    /// allocate a new one of the given size.
396    virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
397
398    /// allocateGlobal - Allocate memory for a global.  Unlike allocateSpace,
399    /// this method does not allocate memory in the current output buffer,
400    /// because a global may live longer than the current function.
401    virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment);
402
403    virtual void addRelocation(const MachineRelocation &MR) {
404      Relocations.push_back(MR);
405    }
406
407    virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
408      if (MBBLocations.size() <= (unsigned)MBB->getNumber())
409        MBBLocations.resize((MBB->getNumber()+1)*2);
410      MBBLocations[MBB->getNumber()] = getCurrentPCValue();
411      if (MBB->hasAddressTaken())
412        TheJIT->addPointerToBasicBlock(MBB->getBasicBlock(),
413                                       (void*)getCurrentPCValue());
414      DEBUG(dbgs() << "JIT: Emitting BB" << MBB->getNumber() << " at ["
415                   << (void*) getCurrentPCValue() << "]\n");
416    }
417
418    virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
419    virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
420
421    virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
422      assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
423             MBBLocations[MBB->getNumber()] && "MBB not emitted!");
424      return MBBLocations[MBB->getNumber()];
425    }
426
427    /// retryWithMoreMemory - Log a retry and deallocate all memory for the
428    /// given function.  Increase the minimum allocation size so that we get
429    /// more memory next time.
430    void retryWithMoreMemory(MachineFunction &F);
431
432    /// deallocateMemForFunction - Deallocate all memory for the specified
433    /// function body.
434    void deallocateMemForFunction(const Function *F);
435
436    virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
437
438    virtual void emitLabel(MCSymbol *Label) {
439      LabelLocations[Label] = getCurrentPCValue();
440    }
441
442    virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
443      return &LabelLocations;
444    }
445
446    virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
447      assert(LabelLocations.count(Label) && "Label not emitted!");
448      return LabelLocations.find(Label)->second;
449    }
450
451    virtual void setModuleInfo(MachineModuleInfo* Info) {
452      MMI = Info;
453    }
454
455  private:
456    void *getPointerToGlobal(GlobalValue *GV, void *Reference,
457                             bool MayNeedFarStub);
458    void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
459  };
460}
461
462void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
463  JRS->EraseAllCallSitesForPrelocked(F);
464}
465
466void JITResolverState::EraseAllCallSitesForPrelocked(Function *F) {
467  FunctionToCallSitesMapTy::iterator F2C = FunctionToCallSitesMap.find(F);
468  if (F2C == FunctionToCallSitesMap.end())
469    return;
470  StubToResolverMapTy &S2RMap = *StubToResolverMap;
471  for (SmallPtrSet<void*, 1>::const_iterator I = F2C->second.begin(),
472         E = F2C->second.end(); I != E; ++I) {
473    S2RMap.UnregisterStubResolver(*I);
474    bool Erased = CallSiteToFunctionMap.erase(*I);
475    (void)Erased;
476    assert(Erased && "Missing call site->function mapping");
477  }
478  FunctionToCallSitesMap.erase(F2C);
479}
480
481void JITResolverState::EraseAllCallSitesPrelocked() {
482  StubToResolverMapTy &S2RMap = *StubToResolverMap;
483  for (CallSiteToFunctionMapTy::const_iterator
484         I = CallSiteToFunctionMap.begin(),
485         E = CallSiteToFunctionMap.end(); I != E; ++I) {
486    S2RMap.UnregisterStubResolver(I->first);
487  }
488  CallSiteToFunctionMap.clear();
489  FunctionToCallSitesMap.clear();
490}
491
492JITResolver::~JITResolver() {
493  // No need to lock because we're in the destructor, and state isn't shared.
494  state.EraseAllCallSitesPrelocked();
495  assert(!StubToResolverMap->ResolverHasStubs(this) &&
496         "Resolver destroyed with stubs still alive.");
497}
498
499/// getLazyFunctionStubIfAvailable - This returns a pointer to a function stub
500/// if it has already been created.
501void *JITResolver::getLazyFunctionStubIfAvailable(Function *F) {
502  MutexGuard locked(TheJIT->lock);
503
504  // If we already have a stub for this function, recycle it.
505  return state.getFunctionToLazyStubMap(locked).lookup(F);
506}
507
508/// getFunctionStub - This returns a pointer to a function stub, creating
509/// one on demand as needed.
510void *JITResolver::getLazyFunctionStub(Function *F) {
511  MutexGuard locked(TheJIT->lock);
512
513  // If we already have a lazy stub for this function, recycle it.
514  void *&Stub = state.getFunctionToLazyStubMap(locked)[F];
515  if (Stub) return Stub;
516
517  // Call the lazy resolver function if we are JIT'ing lazily.  Otherwise we
518  // must resolve the symbol now.
519  void *Actual = TheJIT->isCompilingLazily()
520    ? (void *)(intptr_t)LazyResolverFn : (void *)0;
521
522  // If this is an external declaration, attempt to resolve the address now
523  // to place in the stub.
524  if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage()) {
525    Actual = TheJIT->getPointerToFunction(F);
526
527    // If we resolved the symbol to a null address (eg. a weak external)
528    // don't emit a stub. Return a null pointer to the application.
529    if (!Actual) return 0;
530  }
531
532  TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
533  JE.startGVStub(F, SL.Size, SL.Alignment);
534  // Codegen a new stub, calling the lazy resolver or the actual address of the
535  // external function, if it was resolved.
536  Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, JE);
537  JE.finishGVStub();
538
539  if (Actual != (void*)(intptr_t)LazyResolverFn) {
540    // If we are getting the stub for an external function, we really want the
541    // address of the stub in the GlobalAddressMap for the JIT, not the address
542    // of the external function.
543    TheJIT->updateGlobalMapping(F, Stub);
544  }
545
546  DEBUG(dbgs() << "JIT: Lazy stub emitted at [" << Stub << "] for function '"
547        << F->getName() << "'\n");
548
549  if (TheJIT->isCompilingLazily()) {
550    // Register this JITResolver as the one corresponding to this call site so
551    // JITCompilerFn will be able to find it.
552    StubToResolverMap->RegisterStubResolver(Stub, this);
553
554    // Finally, keep track of the stub-to-Function mapping so that the
555    // JITCompilerFn knows which function to compile!
556    state.AddCallSite(locked, Stub, F);
557  } else if (!Actual) {
558    // If we are JIT'ing non-lazily but need to call a function that does not
559    // exist yet, add it to the JIT's work list so that we can fill in the
560    // stub address later.
561    assert(!isNonGhostDeclaration(F) && !F->hasAvailableExternallyLinkage() &&
562           "'Actual' should have been set above.");
563    TheJIT->addPendingFunction(F);
564  }
565
566  return Stub;
567}
568
569/// getGlobalValueIndirectSym - Return a lazy pointer containing the specified
570/// GV address.
571void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
572  MutexGuard locked(TheJIT->lock);
573
574  // If we already have a stub for this global variable, recycle it.
575  void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
576  if (IndirectSym) return IndirectSym;
577
578  // Otherwise, codegen a new indirect symbol.
579  IndirectSym = TheJIT->getJITInfo().emitGlobalValueIndirectSym(GV, GVAddress,
580                                                                JE);
581
582  DEBUG(dbgs() << "JIT: Indirect symbol emitted at [" << IndirectSym
583        << "] for GV '" << GV->getName() << "'\n");
584
585  return IndirectSym;
586}
587
588/// getExternalFunctionStub - Return a stub for the function at the
589/// specified address, created lazily on demand.
590void *JITResolver::getExternalFunctionStub(void *FnAddr) {
591  // If we already have a stub for this function, recycle it.
592  void *&Stub = ExternalFnToStubMap[FnAddr];
593  if (Stub) return Stub;
594
595  TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
596  JE.startGVStub(0, SL.Size, SL.Alignment);
597  Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE);
598  JE.finishGVStub();
599
600  DEBUG(dbgs() << "JIT: Stub emitted at [" << Stub
601               << "] for external function at '" << FnAddr << "'\n");
602  return Stub;
603}
604
605unsigned JITResolver::getGOTIndexForAddr(void* addr) {
606  unsigned idx = revGOTMap[addr];
607  if (!idx) {
608    idx = ++nextGOTIndex;
609    revGOTMap[addr] = idx;
610    DEBUG(dbgs() << "JIT: Adding GOT entry " << idx << " for addr ["
611                 << addr << "]\n");
612  }
613  return idx;
614}
615
616/// JITCompilerFn - This function is called when a lazy compilation stub has
617/// been entered.  It looks up which function this stub corresponds to, compiles
618/// it if necessary, then returns the resultant function pointer.
619void *JITResolver::JITCompilerFn(void *Stub) {
620  JITResolver *JR = StubToResolverMap->getResolverFromStub(Stub);
621  assert(JR && "Unable to find the corresponding JITResolver to the call site");
622
623  Function* F = 0;
624  void* ActualPtr = 0;
625
626  {
627    // Only lock for getting the Function. The call getPointerToFunction made
628    // in this function might trigger function materializing, which requires
629    // JIT lock to be unlocked.
630    MutexGuard locked(JR->TheJIT->lock);
631
632    // The address given to us for the stub may not be exactly right, it might
633    // be a little bit after the stub.  As such, use upper_bound to find it.
634    std::pair<void*, Function*> I =
635      JR->state.LookupFunctionFromCallSite(locked, Stub);
636    F = I.second;
637    ActualPtr = I.first;
638  }
639
640  // If we have already code generated the function, just return the address.
641  void *Result = JR->TheJIT->getPointerToGlobalIfAvailable(F);
642
643  if (!Result) {
644    // Otherwise we don't have it, do lazy compilation now.
645
646    // If lazy compilation is disabled, emit a useful error message and abort.
647    if (!JR->TheJIT->isCompilingLazily()) {
648      report_fatal_error("LLVM JIT requested to do lazy compilation of"
649                         " function '"
650                        + F->getName() + "' when lazy compiles are disabled!");
651    }
652
653    DEBUG(dbgs() << "JIT: Lazily resolving function '" << F->getName()
654          << "' In stub ptr = " << Stub << " actual ptr = "
655          << ActualPtr << "\n");
656    (void)ActualPtr;
657
658    Result = JR->TheJIT->getPointerToFunction(F);
659  }
660
661  // Reacquire the lock to update the GOT map.
662  MutexGuard locked(JR->TheJIT->lock);
663
664  // We might like to remove the call site from the CallSiteToFunction map, but
665  // we can't do that! Multiple threads could be stuck, waiting to acquire the
666  // lock above. As soon as the 1st function finishes compiling the function,
667  // the next one will be released, and needs to be able to find the function it
668  // needs to call.
669
670  // FIXME: We could rewrite all references to this stub if we knew them.
671
672  // What we will do is set the compiled function address to map to the
673  // same GOT entry as the stub so that later clients may update the GOT
674  // if they see it still using the stub address.
675  // Note: this is done so the Resolver doesn't have to manage GOT memory
676  // Do this without allocating map space if the target isn't using a GOT
677  if(JR->revGOTMap.find(Stub) != JR->revGOTMap.end())
678    JR->revGOTMap[Result] = JR->revGOTMap[Stub];
679
680  return Result;
681}
682
683//===----------------------------------------------------------------------===//
684// JITEmitter code.
685//
686void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
687                                     bool MayNeedFarStub) {
688  if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
689    return TheJIT->getOrEmitGlobalVariable(GV);
690
691  if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
692    return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
693
694  // If we have already compiled the function, return a pointer to its body.
695  Function *F = cast<Function>(V);
696
697  void *FnStub = Resolver.getLazyFunctionStubIfAvailable(F);
698  if (FnStub) {
699    // Return the function stub if it's already created.  We do this first so
700    // that we're returning the same address for the function as any previous
701    // call.  TODO: Yes, this is wrong. The lazy stub isn't guaranteed to be
702    // close enough to call.
703    return FnStub;
704  }
705
706  // If we know the target can handle arbitrary-distance calls, try to
707  // return a direct pointer.
708  if (!MayNeedFarStub) {
709    // If we have code, go ahead and return that.
710    void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
711    if (ResultPtr) return ResultPtr;
712
713    // If this is an external function pointer, we can force the JIT to
714    // 'compile' it, which really just adds it to the map.
715    if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage())
716      return TheJIT->getPointerToFunction(F);
717  }
718
719  // Otherwise, we may need a to emit a stub, and, conservatively, we always do
720  // so.  Note that it's possible to return null from getLazyFunctionStub in the
721  // case of a weak extern that fails to resolve.
722  return Resolver.getLazyFunctionStub(F);
723}
724
725void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference) {
726  // Make sure GV is emitted first, and create a stub containing the fully
727  // resolved address.
728  void *GVAddress = getPointerToGlobal(V, Reference, false);
729  void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress);
730  return StubAddr;
731}
732
733void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
734  if (DL.isUnknown()) return;
735  if (!BeforePrintingInsn) return;
736
737  const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
738
739  if (DL.getScope(Context) != 0 && PrevDL != DL) {
740    JITEvent_EmittedFunctionDetails::LineStart NextLine;
741    NextLine.Address = getCurrentPCValue();
742    NextLine.Loc = DL;
743    EmissionDetails.LineStarts.push_back(NextLine);
744  }
745
746  PrevDL = DL;
747}
748
749static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
750                                           const DataLayout *TD) {
751  const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
752  if (Constants.empty()) return 0;
753
754  unsigned Size = 0;
755  for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
756    MachineConstantPoolEntry CPE = Constants[i];
757    unsigned AlignMask = CPE.getAlignment() - 1;
758    Size = (Size + AlignMask) & ~AlignMask;
759    Type *Ty = CPE.getType();
760    Size += TD->getTypeAllocSize(Ty);
761  }
762  return Size;
763}
764
765void JITEmitter::startFunction(MachineFunction &F) {
766  DEBUG(dbgs() << "JIT: Starting CodeGen of Function "
767        << F.getName() << "\n");
768
769  uintptr_t ActualSize = 0;
770  // Set the memory writable, if it's not already
771  MemMgr->setMemoryWritable();
772
773  if (SizeEstimate > 0) {
774    // SizeEstimate will be non-zero on reallocation attempts.
775    ActualSize = SizeEstimate;
776  }
777
778  BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
779                                                         ActualSize);
780  BufferEnd = BufferBegin+ActualSize;
781  EmittedFunctions[F.getFunction()].FunctionBody = BufferBegin;
782
783  // Ensure the constant pool/jump table info is at least 4-byte aligned.
784  emitAlignment(16);
785
786  emitConstantPool(F.getConstantPool());
787  if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
788    initJumpTableInfo(MJTI);
789
790  // About to start emitting the machine code for the function.
791  emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
792  TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
793  EmittedFunctions[F.getFunction()].Code = CurBufferPtr;
794
795  MBBLocations.clear();
796
797  EmissionDetails.MF = &F;
798  EmissionDetails.LineStarts.clear();
799}
800
801bool JITEmitter::finishFunction(MachineFunction &F) {
802  if (CurBufferPtr == BufferEnd) {
803    // We must call endFunctionBody before retrying, because
804    // deallocateMemForFunction requires it.
805    MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
806    retryWithMoreMemory(F);
807    return true;
808  }
809
810  if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
811    emitJumpTableInfo(MJTI);
812
813  // FnStart is the start of the text, not the start of the constant pool and
814  // other per-function data.
815  uint8_t *FnStart =
816    (uint8_t *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
817
818  // FnEnd is the end of the function's machine code.
819  uint8_t *FnEnd = CurBufferPtr;
820
821  if (!Relocations.empty()) {
822    CurFn = F.getFunction();
823    NumRelos += Relocations.size();
824
825    // Resolve the relocations to concrete pointers.
826    for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
827      MachineRelocation &MR = Relocations[i];
828      void *ResultPtr = 0;
829      if (!MR.letTargetResolve()) {
830        if (MR.isExternalSymbol()) {
831          ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
832                                                        false);
833          DEBUG(dbgs() << "JIT: Map \'" << MR.getExternalSymbol() << "\' to ["
834                       << ResultPtr << "]\n");
835
836          // If the target REALLY wants a stub for this function, emit it now.
837          if (MR.mayNeedFarStub()) {
838            ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
839          }
840        } else if (MR.isGlobalValue()) {
841          ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
842                                         BufferBegin+MR.getMachineCodeOffset(),
843                                         MR.mayNeedFarStub());
844        } else if (MR.isIndirectSymbol()) {
845          ResultPtr = getPointerToGVIndirectSym(
846              MR.getGlobalValue(), BufferBegin+MR.getMachineCodeOffset());
847        } else if (MR.isBasicBlock()) {
848          ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
849        } else if (MR.isConstantPoolIndex()) {
850          ResultPtr =
851            (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
852        } else {
853          assert(MR.isJumpTableIndex());
854          ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
855        }
856
857        MR.setResultPointer(ResultPtr);
858      }
859
860      // if we are managing the GOT and the relocation wants an index,
861      // give it one
862      if (MR.isGOTRelative() && MemMgr->isManagingGOT()) {
863        unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
864        MR.setGOTIndex(idx);
865        if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
866          DEBUG(dbgs() << "JIT: GOT was out of date for " << ResultPtr
867                       << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
868                       << "\n");
869          ((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
870        }
871      }
872    }
873
874    CurFn = 0;
875    TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
876                                  Relocations.size(), MemMgr->getGOTBase());
877  }
878
879  // Update the GOT entry for F to point to the new code.
880  if (MemMgr->isManagingGOT()) {
881    unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
882    if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
883      DEBUG(dbgs() << "JIT: GOT was out of date for " << (void*)BufferBegin
884                   << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
885                   << "\n");
886      ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
887    }
888  }
889
890  // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
891  // global variables that were referenced in the relocations.
892  MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
893
894  if (CurBufferPtr == BufferEnd) {
895    retryWithMoreMemory(F);
896    return true;
897  } else {
898    // Now that we've succeeded in emitting the function, reset the
899    // SizeEstimate back down to zero.
900    SizeEstimate = 0;
901  }
902
903  BufferBegin = CurBufferPtr = 0;
904  NumBytes += FnEnd-FnStart;
905
906  // Invalidate the icache if necessary.
907  sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
908
909  TheJIT->NotifyFunctionEmitted(*F.getFunction(), FnStart, FnEnd-FnStart,
910                                EmissionDetails);
911
912  // Reset the previous debug location.
913  PrevDL = DebugLoc();
914
915  DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart
916        << "] Function: " << F.getName()
917        << ": " << (FnEnd-FnStart) << " bytes of text, "
918        << Relocations.size() << " relocations\n");
919
920  Relocations.clear();
921  ConstPoolAddresses.clear();
922
923  // Mark code region readable and executable if it's not so already.
924  MemMgr->setMemoryExecutable();
925
926  DEBUG({
927      if (sys::hasDisassembler()) {
928        dbgs() << "JIT: Disassembled code:\n";
929        dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
930                                         (uintptr_t)FnStart);
931      } else {
932        dbgs() << "JIT: Binary code:\n";
933        uint8_t* q = FnStart;
934        for (int i = 0; q < FnEnd; q += 4, ++i) {
935          if (i == 4)
936            i = 0;
937          if (i == 0)
938            dbgs() << "JIT: " << (long)(q - FnStart) << ": ";
939          bool Done = false;
940          for (int j = 3; j >= 0; --j) {
941            if (q + j >= FnEnd)
942              Done = true;
943            else
944              dbgs() << (unsigned short)q[j];
945          }
946          if (Done)
947            break;
948          dbgs() << ' ';
949          if (i == 3)
950            dbgs() << '\n';
951        }
952        dbgs()<< '\n';
953      }
954    });
955
956  if (MMI)
957    MMI->EndFunction();
958
959  return false;
960}
961
962void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
963  DEBUG(dbgs() << "JIT: Ran out of space for native code.  Reattempting.\n");
964  Relocations.clear();  // Clear the old relocations or we'll reapply them.
965  ConstPoolAddresses.clear();
966  ++NumRetries;
967  deallocateMemForFunction(F.getFunction());
968  // Try again with at least twice as much free space.
969  SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
970
971  for (MachineFunction::iterator MBB = F.begin(), E = F.end(); MBB != E; ++MBB){
972    if (MBB->hasAddressTaken())
973      TheJIT->clearPointerToBasicBlock(MBB->getBasicBlock());
974  }
975}
976
977/// deallocateMemForFunction - Deallocate all memory for the specified
978/// function body.  Also drop any references the function has to stubs.
979/// May be called while the Function is being destroyed inside ~Value().
980void JITEmitter::deallocateMemForFunction(const Function *F) {
981  ValueMap<const Function *, EmittedCode, EmittedFunctionConfig>::iterator
982    Emitted = EmittedFunctions.find(F);
983  if (Emitted != EmittedFunctions.end()) {
984    MemMgr->deallocateFunctionBody(Emitted->second.FunctionBody);
985    TheJIT->NotifyFreeingMachineCode(Emitted->second.Code);
986
987    EmittedFunctions.erase(Emitted);
988  }
989}
990
991
992void *JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
993  if (BufferBegin)
994    return JITCodeEmitter::allocateSpace(Size, Alignment);
995
996  // create a new memory block if there is no active one.
997  // care must be taken so that BufferBegin is invalidated when a
998  // block is trimmed
999  BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment);
1000  BufferEnd = BufferBegin+Size;
1001  return CurBufferPtr;
1002}
1003
1004void *JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1005  // Delegate this call through the memory manager.
1006  return MemMgr->allocateGlobal(Size, Alignment);
1007}
1008
1009void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
1010  if (TheJIT->getJITInfo().hasCustomConstantPool())
1011    return;
1012
1013  const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
1014  if (Constants.empty()) return;
1015
1016  unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getDataLayout());
1017  unsigned Align = MCP->getConstantPoolAlignment();
1018  ConstantPoolBase = allocateSpace(Size, Align);
1019  ConstantPool = MCP;
1020
1021  if (ConstantPoolBase == 0) return;  // Buffer overflow.
1022
1023  DEBUG(dbgs() << "JIT: Emitted constant pool at [" << ConstantPoolBase
1024               << "] (size: " << Size << ", alignment: " << Align << ")\n");
1025
1026  // Initialize the memory for all of the constant pool entries.
1027  unsigned Offset = 0;
1028  for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1029    MachineConstantPoolEntry CPE = Constants[i];
1030    unsigned AlignMask = CPE.getAlignment() - 1;
1031    Offset = (Offset + AlignMask) & ~AlignMask;
1032
1033    uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset;
1034    ConstPoolAddresses.push_back(CAddr);
1035    if (CPE.isMachineConstantPoolEntry()) {
1036      // FIXME: add support to lower machine constant pool values into bytes!
1037      report_fatal_error("Initialize memory with machine specific constant pool"
1038                        "entry has not been implemented!");
1039    }
1040    TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
1041    DEBUG(dbgs() << "JIT:   CP" << i << " at [0x";
1042          dbgs().write_hex(CAddr) << "]\n");
1043
1044    Type *Ty = CPE.Val.ConstVal->getType();
1045    Offset += TheJIT->getDataLayout()->getTypeAllocSize(Ty);
1046  }
1047}
1048
1049void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
1050  if (TheJIT->getJITInfo().hasCustomJumpTables())
1051    return;
1052  if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline)
1053    return;
1054
1055  const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1056  if (JT.empty()) return;
1057
1058  unsigned NumEntries = 0;
1059  for (unsigned i = 0, e = JT.size(); i != e; ++i)
1060    NumEntries += JT[i].MBBs.size();
1061
1062  unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getDataLayout());
1063
1064  // Just allocate space for all the jump tables now.  We will fix up the actual
1065  // MBB entries in the tables after we emit the code for each block, since then
1066  // we will know the final locations of the MBBs in memory.
1067  JumpTable = MJTI;
1068  JumpTableBase = allocateSpace(NumEntries * EntrySize,
1069                             MJTI->getEntryAlignment(*TheJIT->getDataLayout()));
1070}
1071
1072void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
1073  if (TheJIT->getJITInfo().hasCustomJumpTables())
1074    return;
1075
1076  const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1077  if (JT.empty() || JumpTableBase == 0) return;
1078
1079
1080  switch (MJTI->getEntryKind()) {
1081  case MachineJumpTableInfo::EK_Inline:
1082    return;
1083  case MachineJumpTableInfo::EK_BlockAddress: {
1084    // EK_BlockAddress - Each entry is a plain address of block, e.g.:
1085    //     .word LBB123
1086    assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == sizeof(void*) &&
1087           "Cross JIT'ing?");
1088
1089    // For each jump table, map each target in the jump table to the address of
1090    // an emitted MachineBasicBlock.
1091    intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
1092
1093    for (unsigned i = 0, e = JT.size(); i != e; ++i) {
1094      const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
1095      // Store the address of the basic block for this jump table slot in the
1096      // memory we allocated for the jump table in 'initJumpTableInfo'
1097      for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
1098        *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
1099    }
1100    break;
1101  }
1102
1103  case MachineJumpTableInfo::EK_Custom32:
1104  case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1105  case MachineJumpTableInfo::EK_LabelDifference32: {
1106    assert(MJTI->getEntrySize(*TheJIT->getDataLayout()) == 4&&"Cross JIT'ing?");
1107    // For each jump table, place the offset from the beginning of the table
1108    // to the target address.
1109    int *SlotPtr = (int*)JumpTableBase;
1110
1111    for (unsigned i = 0, e = JT.size(); i != e; ++i) {
1112      const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
1113      // Store the offset of the basic block for this jump table slot in the
1114      // memory we allocated for the jump table in 'initJumpTableInfo'
1115      uintptr_t Base = (uintptr_t)SlotPtr;
1116      for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
1117        uintptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]);
1118        /// FIXME: USe EntryKind instead of magic "getPICJumpTableEntry" hook.
1119        *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base);
1120      }
1121    }
1122    break;
1123  }
1124  case MachineJumpTableInfo::EK_GPRel64BlockAddress:
1125    llvm_unreachable(
1126           "JT Info emission not implemented for GPRel64BlockAddress yet.");
1127  }
1128}
1129
1130void JITEmitter::startGVStub(const GlobalValue* GV,
1131                             unsigned StubSize, unsigned Alignment) {
1132  SavedBufferBegin = BufferBegin;
1133  SavedBufferEnd = BufferEnd;
1134  SavedCurBufferPtr = CurBufferPtr;
1135
1136  BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment);
1137  BufferEnd = BufferBegin+StubSize+1;
1138}
1139
1140void JITEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1141  SavedBufferBegin = BufferBegin;
1142  SavedBufferEnd = BufferEnd;
1143  SavedCurBufferPtr = CurBufferPtr;
1144
1145  BufferBegin = CurBufferPtr = (uint8_t *)Buffer;
1146  BufferEnd = BufferBegin+StubSize+1;
1147}
1148
1149void JITEmitter::finishGVStub() {
1150  assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1151  NumBytes += getCurrentPCOffset();
1152  BufferBegin = SavedBufferBegin;
1153  BufferEnd = SavedBufferEnd;
1154  CurBufferPtr = SavedCurBufferPtr;
1155}
1156
1157void *JITEmitter::allocIndirectGV(const GlobalValue *GV,
1158                                  const uint8_t *Buffer, size_t Size,
1159                                  unsigned Alignment) {
1160  uint8_t *IndGV = MemMgr->allocateStub(GV, Size, Alignment);
1161  memcpy(IndGV, Buffer, Size);
1162  return IndGV;
1163}
1164
1165// getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
1166// in the constant pool that was last emitted with the 'emitConstantPool'
1167// method.
1168//
1169uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
1170  assert(ConstantNum < ConstantPool->getConstants().size() &&
1171         "Invalid ConstantPoolIndex!");
1172  return ConstPoolAddresses[ConstantNum];
1173}
1174
1175// getJumpTableEntryAddress - Return the address of the JumpTable with index
1176// 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
1177//
1178uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
1179  const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
1180  assert(Index < JT.size() && "Invalid jump table index!");
1181
1182  unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getDataLayout());
1183
1184  unsigned Offset = 0;
1185  for (unsigned i = 0; i < Index; ++i)
1186    Offset += JT[i].MBBs.size();
1187
1188   Offset *= EntrySize;
1189
1190  return (uintptr_t)((char *)JumpTableBase + Offset);
1191}
1192
1193void JITEmitter::EmittedFunctionConfig::onDelete(
1194  JITEmitter *Emitter, const Function *F) {
1195  Emitter->deallocateMemForFunction(F);
1196}
1197void JITEmitter::EmittedFunctionConfig::onRAUW(
1198  JITEmitter *, const Function*, const Function*) {
1199  llvm_unreachable("The JIT doesn't know how to handle a"
1200                   " RAUW on a value it has emitted.");
1201}
1202
1203
1204//===----------------------------------------------------------------------===//
1205//  Public interface to this file
1206//===----------------------------------------------------------------------===//
1207
1208JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM,
1209                                   TargetMachine &tm) {
1210  return new JITEmitter(jit, JMM, tm);
1211}
1212
1213// getPointerToFunctionOrStub - If the specified function has been
1214// code-gen'd, return a pointer to the function.  If not, compile it, or use
1215// a stub to implement lazy compilation if available.
1216//
1217void *JIT::getPointerToFunctionOrStub(Function *F) {
1218  // If we have already code generated the function, just return the address.
1219  if (void *Addr = getPointerToGlobalIfAvailable(F))
1220    return Addr;
1221
1222  // Get a stub if the target supports it.
1223  JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
1224  return JE->getJITResolver().getLazyFunctionStub(F);
1225}
1226
1227void JIT::updateFunctionStub(Function *F) {
1228  // Get the empty stub we generated earlier.
1229  JITEmitter *JE = static_cast<JITEmitter*>(getCodeEmitter());
1230  void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
1231  void *Addr = getPointerToGlobalIfAvailable(F);
1232  assert(Addr != Stub && "Function must have non-stub address to be updated.");
1233
1234  // Tell the target jit info to rewrite the stub at the specified address,
1235  // rather than creating a new one.
1236  TargetJITInfo::StubLayout layout = getJITInfo().getStubLayout();
1237  JE->startGVStub(Stub, layout.Size);
1238  getJITInfo().emitFunctionStub(F, Addr, *getCodeEmitter());
1239  JE->finishGVStub();
1240}
1241
1242/// freeMachineCodeForFunction - release machine code memory for given Function.
1243///
1244void JIT::freeMachineCodeForFunction(Function *F) {
1245  // Delete translation for this from the ExecutionEngine, so it will get
1246  // retranslated next time it is used.
1247  updateGlobalMapping(F, 0);
1248
1249  // Free the actual memory for the function body and related stuff.
1250  static_cast<JITEmitter*>(JCE)->deallocateMemForFunction(F);
1251}
1252