• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===-- JITEmitter.cpp - Write machine code to executable memory ----------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This file defines a MachineCodeEmitter object that is used by the JIT to
11 // write machine code to memory and remember where relocatable values are.
12 //
13 //===----------------------------------------------------------------------===//
14 
15 #define DEBUG_TYPE "jit"
16 #include "JIT.h"
17 #include "JITDebugRegisterer.h"
18 #include "JITDwarfEmitter.h"
19 #include "llvm/ADT/OwningPtr.h"
20 #include "llvm/Constants.h"
21 #include "llvm/Module.h"
22 #include "llvm/DerivedTypes.h"
23 #include "llvm/Analysis/DebugInfo.h"
24 #include "llvm/CodeGen/JITCodeEmitter.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineCodeInfo.h"
27 #include "llvm/CodeGen/MachineConstantPool.h"
28 #include "llvm/CodeGen/MachineJumpTableInfo.h"
29 #include "llvm/CodeGen/MachineModuleInfo.h"
30 #include "llvm/CodeGen/MachineRelocation.h"
31 #include "llvm/ExecutionEngine/GenericValue.h"
32 #include "llvm/ExecutionEngine/JITEventListener.h"
33 #include "llvm/ExecutionEngine/JITMemoryManager.h"
34 #include "llvm/Target/TargetData.h"
35 #include "llvm/Target/TargetInstrInfo.h"
36 #include "llvm/Target/TargetJITInfo.h"
37 #include "llvm/Target/TargetMachine.h"
38 #include "llvm/Target/TargetOptions.h"
39 #include "llvm/Support/Debug.h"
40 #include "llvm/Support/ErrorHandling.h"
41 #include "llvm/Support/ManagedStatic.h"
42 #include "llvm/Support/MutexGuard.h"
43 #include "llvm/Support/ValueHandle.h"
44 #include "llvm/Support/raw_ostream.h"
45 #include "llvm/Support/Disassembler.h"
46 #include "llvm/Support/Memory.h"
47 #include "llvm/ADT/DenseMap.h"
48 #include "llvm/ADT/SmallPtrSet.h"
49 #include "llvm/ADT/SmallVector.h"
50 #include "llvm/ADT/Statistic.h"
51 #include "llvm/ADT/ValueMap.h"
52 #include <algorithm>
53 #ifndef NDEBUG
54 #include <iomanip>
55 #endif
56 using namespace llvm;
57 
58 STATISTIC(NumBytes, "Number of bytes of machine code compiled");
59 STATISTIC(NumRelos, "Number of relocations applied");
60 STATISTIC(NumRetries, "Number of retries with more memory");
61 
62 
63 // A declaration may stop being a declaration once it's fully read from bitcode.
64 // This function returns true if F is fully read and is still a declaration.
isNonGhostDeclaration(const Function * F)65 static bool isNonGhostDeclaration(const Function *F) {
66   return F->isDeclaration() && !F->isMaterializable();
67 }
68 
69 //===----------------------------------------------------------------------===//
70 // JIT lazy compilation code.
71 //
72 namespace {
73   class JITEmitter;
74   class JITResolverState;
75 
76   template<typename ValueTy>
77   struct NoRAUWValueMapConfig : public ValueMapConfig<ValueTy> {
78     typedef JITResolverState *ExtraData;
onRAUW__anona707f8570111::NoRAUWValueMapConfig79     static void onRAUW(JITResolverState *, Value *Old, Value *New) {
80       assert(false && "The JIT doesn't know how to handle a"
81              " RAUW on a value it has emitted.");
82     }
83   };
84 
85   struct CallSiteValueMapConfig : public NoRAUWValueMapConfig<Function*> {
86     typedef JITResolverState *ExtraData;
87     static void onDelete(JITResolverState *JRS, Function *F);
88   };
89 
90   class JITResolverState {
91   public:
92     typedef ValueMap<Function*, void*, NoRAUWValueMapConfig<Function*> >
93       FunctionToLazyStubMapTy;
94     typedef std::map<void*, AssertingVH<Function> > CallSiteToFunctionMapTy;
95     typedef ValueMap<Function *, SmallPtrSet<void*, 1>,
96                      CallSiteValueMapConfig> FunctionToCallSitesMapTy;
97     typedef std::map<AssertingVH<GlobalValue>, void*> GlobalToIndirectSymMapTy;
98   private:
99     /// FunctionToLazyStubMap - Keep track of the lazy stub created for a
100     /// particular function so that we can reuse them if necessary.
101     FunctionToLazyStubMapTy FunctionToLazyStubMap;
102 
103     /// CallSiteToFunctionMap - Keep track of the function that each lazy call
104     /// site corresponds to, and vice versa.
105     CallSiteToFunctionMapTy CallSiteToFunctionMap;
106     FunctionToCallSitesMapTy FunctionToCallSitesMap;
107 
108     /// GlobalToIndirectSymMap - Keep track of the indirect symbol created for a
109     /// particular GlobalVariable so that we can reuse them if necessary.
110     GlobalToIndirectSymMapTy GlobalToIndirectSymMap;
111 
112     /// Instance of the JIT this ResolverState serves.
113     JIT *TheJIT;
114 
115   public:
JITResolverState(JIT * jit)116     JITResolverState(JIT *jit) : FunctionToLazyStubMap(this),
117                                  FunctionToCallSitesMap(this),
118                                  TheJIT(jit) {}
119 
getFunctionToLazyStubMap(const MutexGuard & locked)120     FunctionToLazyStubMapTy& getFunctionToLazyStubMap(
121       const MutexGuard& locked) {
122       assert(locked.holds(TheJIT->lock));
123       return FunctionToLazyStubMap;
124     }
125 
getGlobalToIndirectSymMap(const MutexGuard & lck)126     GlobalToIndirectSymMapTy& getGlobalToIndirectSymMap(const MutexGuard& lck) {
127       assert(lck.holds(TheJIT->lock));
128       return GlobalToIndirectSymMap;
129     }
130 
LookupFunctionFromCallSite(const MutexGuard & locked,void * CallSite) const131     std::pair<void *, Function *> LookupFunctionFromCallSite(
132         const MutexGuard &locked, void *CallSite) const {
133       assert(locked.holds(TheJIT->lock));
134 
135       // The address given to us for the stub may not be exactly right, it
136       // might be a little bit after the stub.  As such, use upper_bound to
137       // find it.
138       CallSiteToFunctionMapTy::const_iterator I =
139         CallSiteToFunctionMap.upper_bound(CallSite);
140       assert(I != CallSiteToFunctionMap.begin() &&
141              "This is not a known call site!");
142       --I;
143       return *I;
144     }
145 
AddCallSite(const MutexGuard & locked,void * CallSite,Function * F)146     void AddCallSite(const MutexGuard &locked, void *CallSite, Function *F) {
147       assert(locked.holds(TheJIT->lock));
148 
149       bool Inserted = CallSiteToFunctionMap.insert(
150           std::make_pair(CallSite, F)).second;
151       (void)Inserted;
152       assert(Inserted && "Pair was already in CallSiteToFunctionMap");
153       FunctionToCallSitesMap[F].insert(CallSite);
154     }
155 
156     void EraseAllCallSitesForPrelocked(Function *F);
157 
158     // Erases _all_ call sites regardless of their function.  This is used to
159     // unregister the stub addresses from the StubToResolverMap in
160     // ~JITResolver().
161     void EraseAllCallSitesPrelocked();
162   };
163 
164   /// JITResolver - Keep track of, and resolve, call sites for functions that
165   /// have not yet been compiled.
166   class JITResolver {
167     typedef JITResolverState::FunctionToLazyStubMapTy FunctionToLazyStubMapTy;
168     typedef JITResolverState::CallSiteToFunctionMapTy CallSiteToFunctionMapTy;
169     typedef JITResolverState::GlobalToIndirectSymMapTy GlobalToIndirectSymMapTy;
170 
171     /// LazyResolverFn - The target lazy resolver function that we actually
172     /// rewrite instructions to use.
173     TargetJITInfo::LazyResolverFn LazyResolverFn;
174 
175     JITResolverState state;
176 
177     /// ExternalFnToStubMap - This is the equivalent of FunctionToLazyStubMap
178     /// for external functions.  TODO: Of course, external functions don't need
179     /// a lazy stub.  It's actually here to make it more likely that far calls
180     /// succeed, but no single stub can guarantee that.  I'll remove this in a
181     /// subsequent checkin when I actually fix far calls.
182     std::map<void*, void*> ExternalFnToStubMap;
183 
184     /// revGOTMap - map addresses to indexes in the GOT
185     std::map<void*, unsigned> revGOTMap;
186     unsigned nextGOTIndex;
187 
188     JITEmitter &JE;
189 
190     /// Instance of JIT corresponding to this Resolver.
191     JIT *TheJIT;
192 
193   public:
JITResolver(JIT & jit,JITEmitter & je)194     explicit JITResolver(JIT &jit, JITEmitter &je)
195       : state(&jit), nextGOTIndex(0), JE(je), TheJIT(&jit) {
196       LazyResolverFn = jit.getJITInfo().getLazyResolverFunction(JITCompilerFn);
197     }
198 
199     ~JITResolver();
200 
201     /// getLazyFunctionStubIfAvailable - This returns a pointer to a function's
202     /// lazy-compilation stub if it has already been created.
203     void *getLazyFunctionStubIfAvailable(Function *F);
204 
205     /// getLazyFunctionStub - This returns a pointer to a function's
206     /// lazy-compilation stub, creating one on demand as needed.
207     void *getLazyFunctionStub(Function *F);
208 
209     /// getExternalFunctionStub - Return a stub for the function at the
210     /// specified address, created lazily on demand.
211     void *getExternalFunctionStub(void *FnAddr);
212 
213     /// getGlobalValueIndirectSym - Return an indirect symbol containing the
214     /// specified GV address.
215     void *getGlobalValueIndirectSym(GlobalValue *V, void *GVAddress);
216 
217     /// getGOTIndexForAddress - Return a new or existing index in the GOT for
218     /// an address.  This function only manages slots, it does not manage the
219     /// contents of the slots or the memory associated with the GOT.
220     unsigned getGOTIndexForAddr(void *addr);
221 
222     /// JITCompilerFn - This function is called to resolve a stub to a compiled
223     /// address.  If the LLVM Function corresponding to the stub has not yet
224     /// been compiled, this function compiles it first.
225     static void *JITCompilerFn(void *Stub);
226   };
227 
228   class StubToResolverMapTy {
229     /// Map a stub address to a specific instance of a JITResolver so that
230     /// lazily-compiled functions can find the right resolver to use.
231     ///
232     /// Guarded by Lock.
233     std::map<void*, JITResolver*> Map;
234 
235     /// Guards Map from concurrent accesses.
236     mutable sys::Mutex Lock;
237 
238   public:
239     /// Registers a Stub to be resolved by Resolver.
RegisterStubResolver(void * Stub,JITResolver * Resolver)240     void RegisterStubResolver(void *Stub, JITResolver *Resolver) {
241       MutexGuard guard(Lock);
242       Map.insert(std::make_pair(Stub, Resolver));
243     }
244     /// Unregisters the Stub when it's invalidated.
UnregisterStubResolver(void * Stub)245     void UnregisterStubResolver(void *Stub) {
246       MutexGuard guard(Lock);
247       Map.erase(Stub);
248     }
249     /// Returns the JITResolver instance that owns the Stub.
getResolverFromStub(void * Stub) const250     JITResolver *getResolverFromStub(void *Stub) const {
251       MutexGuard guard(Lock);
252       // The address given to us for the stub may not be exactly right, it might
253       // be a little bit after the stub.  As such, use upper_bound to find it.
254       // This is the same trick as in LookupFunctionFromCallSite from
255       // JITResolverState.
256       std::map<void*, JITResolver*>::const_iterator I = Map.upper_bound(Stub);
257       assert(I != Map.begin() && "This is not a known stub!");
258       --I;
259       return I->second;
260     }
261     /// True if any stubs refer to the given resolver. Only used in an assert().
262     /// O(N)
ResolverHasStubs(JITResolver * Resolver) const263     bool ResolverHasStubs(JITResolver* Resolver) const {
264       MutexGuard guard(Lock);
265       for (std::map<void*, JITResolver*>::const_iterator I = Map.begin(),
266              E = Map.end(); I != E; ++I) {
267         if (I->second == Resolver)
268           return true;
269       }
270       return false;
271     }
272   };
273   /// This needs to be static so that a lazy call stub can access it with no
274   /// context except the address of the stub.
275   ManagedStatic<StubToResolverMapTy> StubToResolverMap;
276 
277   /// JITEmitter - The JIT implementation of the MachineCodeEmitter, which is
278   /// used to output functions to memory for execution.
279   class JITEmitter : public JITCodeEmitter {
280     JITMemoryManager *MemMgr;
281 
282     // When outputting a function stub in the context of some other function, we
283     // save BufferBegin/BufferEnd/CurBufferPtr here.
284     uint8_t *SavedBufferBegin, *SavedBufferEnd, *SavedCurBufferPtr;
285 
286     // When reattempting to JIT a function after running out of space, we store
287     // the estimated size of the function we're trying to JIT here, so we can
288     // ask the memory manager for at least this much space.  When we
289     // successfully emit the function, we reset this back to zero.
290     uintptr_t SizeEstimate;
291 
292     /// Relocations - These are the relocations that the function needs, as
293     /// emitted.
294     std::vector<MachineRelocation> Relocations;
295 
296     /// MBBLocations - This vector is a mapping from MBB ID's to their address.
297     /// It is filled in by the StartMachineBasicBlock callback and queried by
298     /// the getMachineBasicBlockAddress callback.
299     std::vector<uintptr_t> MBBLocations;
300 
301     /// ConstantPool - The constant pool for the current function.
302     ///
303     MachineConstantPool *ConstantPool;
304 
305     /// ConstantPoolBase - A pointer to the first entry in the constant pool.
306     ///
307     void *ConstantPoolBase;
308 
309     /// ConstPoolAddresses - Addresses of individual constant pool entries.
310     ///
311     SmallVector<uintptr_t, 8> ConstPoolAddresses;
312 
313     /// JumpTable - The jump tables for the current function.
314     ///
315     MachineJumpTableInfo *JumpTable;
316 
317     /// JumpTableBase - A pointer to the first entry in the jump table.
318     ///
319     void *JumpTableBase;
320 
321     /// Resolver - This contains info about the currently resolved functions.
322     JITResolver Resolver;
323 
324     /// DE - The dwarf emitter for the jit.
325     OwningPtr<JITDwarfEmitter> DE;
326 
327     /// DR - The debug registerer for the jit.
328     OwningPtr<JITDebugRegisterer> DR;
329 
330     /// LabelLocations - This vector is a mapping from Label ID's to their
331     /// address.
332     DenseMap<MCSymbol*, uintptr_t> LabelLocations;
333 
334     /// MMI - Machine module info for exception informations
335     MachineModuleInfo* MMI;
336 
337     // CurFn - The llvm function being emitted.  Only valid during
338     // finishFunction().
339     const Function *CurFn;
340 
341     /// Information about emitted code, which is passed to the
342     /// JITEventListeners.  This is reset in startFunction and used in
343     /// finishFunction.
344     JITEvent_EmittedFunctionDetails EmissionDetails;
345 
346     struct EmittedCode {
347       void *FunctionBody;  // Beginning of the function's allocation.
348       void *Code;  // The address the function's code actually starts at.
349       void *ExceptionTable;
EmittedCode__anona707f8570111::JITEmitter::EmittedCode350       EmittedCode() : FunctionBody(0), Code(0), ExceptionTable(0) {}
351     };
352     struct EmittedFunctionConfig : public ValueMapConfig<const Function*> {
353       typedef JITEmitter *ExtraData;
354       static void onDelete(JITEmitter *, const Function*);
355       static void onRAUW(JITEmitter *, const Function*, const Function*);
356     };
357     ValueMap<const Function *, EmittedCode,
358              EmittedFunctionConfig> EmittedFunctions;
359 
360     DebugLoc PrevDL;
361 
362     /// Instance of the JIT
363     JIT *TheJIT;
364 
365   public:
JITEmitter(JIT & jit,JITMemoryManager * JMM,TargetMachine & TM)366     JITEmitter(JIT &jit, JITMemoryManager *JMM, TargetMachine &TM)
367       : SizeEstimate(0), Resolver(jit, *this), MMI(0), CurFn(0),
368         EmittedFunctions(this), TheJIT(&jit) {
369       MemMgr = JMM ? JMM : JITMemoryManager::CreateDefaultMemManager();
370       if (jit.getJITInfo().needsGOT()) {
371         MemMgr->AllocateGOT();
372         DEBUG(dbgs() << "JIT is managing a GOT\n");
373       }
374 
375       if (JITExceptionHandling || JITEmitDebugInfo) {
376         DE.reset(new JITDwarfEmitter(jit));
377       }
378       if (JITEmitDebugInfo) {
379         DR.reset(new JITDebugRegisterer(TM));
380       }
381     }
~JITEmitter()382     ~JITEmitter() {
383       delete MemMgr;
384     }
385 
386     /// classof - Methods for support type inquiry through isa, cast, and
387     /// dyn_cast:
388     ///
classof(const MachineCodeEmitter *)389     static inline bool classof(const MachineCodeEmitter*) { return true; }
390 
getJITResolver()391     JITResolver &getJITResolver() { return Resolver; }
392 
393     virtual void startFunction(MachineFunction &F);
394     virtual bool finishFunction(MachineFunction &F);
395 
396     void emitConstantPool(MachineConstantPool *MCP);
397     void initJumpTableInfo(MachineJumpTableInfo *MJTI);
398     void emitJumpTableInfo(MachineJumpTableInfo *MJTI);
399 
400     void startGVStub(const GlobalValue* GV,
401                      unsigned StubSize, unsigned Alignment = 1);
402     void startGVStub(void *Buffer, unsigned StubSize);
403     void finishGVStub();
404     virtual void *allocIndirectGV(const GlobalValue *GV,
405                                   const uint8_t *Buffer, size_t Size,
406                                   unsigned Alignment);
407 
408     /// allocateSpace - Reserves space in the current block if any, or
409     /// allocate a new one of the given size.
410     virtual void *allocateSpace(uintptr_t Size, unsigned Alignment);
411 
412     /// allocateGlobal - Allocate memory for a global.  Unlike allocateSpace,
413     /// this method does not allocate memory in the current output buffer,
414     /// because a global may live longer than the current function.
415     virtual void *allocateGlobal(uintptr_t Size, unsigned Alignment);
416 
addRelocation(const MachineRelocation & MR)417     virtual void addRelocation(const MachineRelocation &MR) {
418       Relocations.push_back(MR);
419     }
420 
StartMachineBasicBlock(MachineBasicBlock * MBB)421     virtual void StartMachineBasicBlock(MachineBasicBlock *MBB) {
422       if (MBBLocations.size() <= (unsigned)MBB->getNumber())
423         MBBLocations.resize((MBB->getNumber()+1)*2);
424       MBBLocations[MBB->getNumber()] = getCurrentPCValue();
425       if (MBB->hasAddressTaken())
426         TheJIT->addPointerToBasicBlock(MBB->getBasicBlock(),
427                                        (void*)getCurrentPCValue());
428       DEBUG(dbgs() << "JIT: Emitting BB" << MBB->getNumber() << " at ["
429                    << (void*) getCurrentPCValue() << "]\n");
430     }
431 
432     virtual uintptr_t getConstantPoolEntryAddress(unsigned Entry) const;
433     virtual uintptr_t getJumpTableEntryAddress(unsigned Entry) const;
434 
getMachineBasicBlockAddress(MachineBasicBlock * MBB) const435     virtual uintptr_t getMachineBasicBlockAddress(MachineBasicBlock *MBB) const{
436       assert(MBBLocations.size() > (unsigned)MBB->getNumber() &&
437              MBBLocations[MBB->getNumber()] && "MBB not emitted!");
438       return MBBLocations[MBB->getNumber()];
439     }
440 
441     /// retryWithMoreMemory - Log a retry and deallocate all memory for the
442     /// given function.  Increase the minimum allocation size so that we get
443     /// more memory next time.
444     void retryWithMoreMemory(MachineFunction &F);
445 
446     /// deallocateMemForFunction - Deallocate all memory for the specified
447     /// function body.
448     void deallocateMemForFunction(const Function *F);
449 
450     virtual void processDebugLoc(DebugLoc DL, bool BeforePrintingInsn);
451 
emitLabel(MCSymbol * Label)452     virtual void emitLabel(MCSymbol *Label) {
453       LabelLocations[Label] = getCurrentPCValue();
454     }
455 
getLabelLocations()456     virtual DenseMap<MCSymbol*, uintptr_t> *getLabelLocations() {
457       return &LabelLocations;
458     }
459 
getLabelAddress(MCSymbol * Label) const460     virtual uintptr_t getLabelAddress(MCSymbol *Label) const {
461       assert(LabelLocations.count(Label) && "Label not emitted!");
462       return LabelLocations.find(Label)->second;
463     }
464 
setModuleInfo(MachineModuleInfo * Info)465     virtual void setModuleInfo(MachineModuleInfo* Info) {
466       MMI = Info;
467       if (DE.get()) DE->setModuleInfo(Info);
468     }
469 
470   private:
471     void *getPointerToGlobal(GlobalValue *GV, void *Reference,
472                              bool MayNeedFarStub);
473     void *getPointerToGVIndirectSym(GlobalValue *V, void *Reference);
474   };
475 }
476 
onDelete(JITResolverState * JRS,Function * F)477 void CallSiteValueMapConfig::onDelete(JITResolverState *JRS, Function *F) {
478   JRS->EraseAllCallSitesForPrelocked(F);
479 }
480 
EraseAllCallSitesForPrelocked(Function * F)481 void JITResolverState::EraseAllCallSitesForPrelocked(Function *F) {
482   FunctionToCallSitesMapTy::iterator F2C = FunctionToCallSitesMap.find(F);
483   if (F2C == FunctionToCallSitesMap.end())
484     return;
485   StubToResolverMapTy &S2RMap = *StubToResolverMap;
486   for (SmallPtrSet<void*, 1>::const_iterator I = F2C->second.begin(),
487          E = F2C->second.end(); I != E; ++I) {
488     S2RMap.UnregisterStubResolver(*I);
489     bool Erased = CallSiteToFunctionMap.erase(*I);
490     (void)Erased;
491     assert(Erased && "Missing call site->function mapping");
492   }
493   FunctionToCallSitesMap.erase(F2C);
494 }
495 
EraseAllCallSitesPrelocked()496 void JITResolverState::EraseAllCallSitesPrelocked() {
497   StubToResolverMapTy &S2RMap = *StubToResolverMap;
498   for (CallSiteToFunctionMapTy::const_iterator
499          I = CallSiteToFunctionMap.begin(),
500          E = CallSiteToFunctionMap.end(); I != E; ++I) {
501     S2RMap.UnregisterStubResolver(I->first);
502   }
503   CallSiteToFunctionMap.clear();
504   FunctionToCallSitesMap.clear();
505 }
506 
~JITResolver()507 JITResolver::~JITResolver() {
508   // No need to lock because we're in the destructor, and state isn't shared.
509   state.EraseAllCallSitesPrelocked();
510   assert(!StubToResolverMap->ResolverHasStubs(this) &&
511          "Resolver destroyed with stubs still alive.");
512 }
513 
514 /// getLazyFunctionStubIfAvailable - This returns a pointer to a function stub
515 /// if it has already been created.
getLazyFunctionStubIfAvailable(Function * F)516 void *JITResolver::getLazyFunctionStubIfAvailable(Function *F) {
517   MutexGuard locked(TheJIT->lock);
518 
519   // If we already have a stub for this function, recycle it.
520   return state.getFunctionToLazyStubMap(locked).lookup(F);
521 }
522 
523 /// getFunctionStub - This returns a pointer to a function stub, creating
524 /// one on demand as needed.
getLazyFunctionStub(Function * F)525 void *JITResolver::getLazyFunctionStub(Function *F) {
526   MutexGuard locked(TheJIT->lock);
527 
528   // If we already have a lazy stub for this function, recycle it.
529   void *&Stub = state.getFunctionToLazyStubMap(locked)[F];
530   if (Stub) return Stub;
531 
532   // Call the lazy resolver function if we are JIT'ing lazily.  Otherwise we
533   // must resolve the symbol now.
534   void *Actual = TheJIT->isCompilingLazily()
535     ? (void *)(intptr_t)LazyResolverFn : (void *)0;
536 
537   // If this is an external declaration, attempt to resolve the address now
538   // to place in the stub.
539   if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage()) {
540     Actual = TheJIT->getPointerToFunction(F);
541 
542     // If we resolved the symbol to a null address (eg. a weak external)
543     // don't emit a stub. Return a null pointer to the application.
544     if (!Actual) return 0;
545   }
546 
547   TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
548   JE.startGVStub(F, SL.Size, SL.Alignment);
549   // Codegen a new stub, calling the lazy resolver or the actual address of the
550   // external function, if it was resolved.
551   Stub = TheJIT->getJITInfo().emitFunctionStub(F, Actual, JE);
552   JE.finishGVStub();
553 
554   if (Actual != (void*)(intptr_t)LazyResolverFn) {
555     // If we are getting the stub for an external function, we really want the
556     // address of the stub in the GlobalAddressMap for the JIT, not the address
557     // of the external function.
558     TheJIT->updateGlobalMapping(F, Stub);
559   }
560 
561   DEBUG(dbgs() << "JIT: Lazy stub emitted at [" << Stub << "] for function '"
562         << F->getName() << "'\n");
563 
564   if (TheJIT->isCompilingLazily()) {
565     // Register this JITResolver as the one corresponding to this call site so
566     // JITCompilerFn will be able to find it.
567     StubToResolverMap->RegisterStubResolver(Stub, this);
568 
569     // Finally, keep track of the stub-to-Function mapping so that the
570     // JITCompilerFn knows which function to compile!
571     state.AddCallSite(locked, Stub, F);
572   } else if (!Actual) {
573     // If we are JIT'ing non-lazily but need to call a function that does not
574     // exist yet, add it to the JIT's work list so that we can fill in the
575     // stub address later.
576     assert(!isNonGhostDeclaration(F) && !F->hasAvailableExternallyLinkage() &&
577            "'Actual' should have been set above.");
578     TheJIT->addPendingFunction(F);
579   }
580 
581   return Stub;
582 }
583 
584 /// getGlobalValueIndirectSym - Return a lazy pointer containing the specified
585 /// GV address.
getGlobalValueIndirectSym(GlobalValue * GV,void * GVAddress)586 void *JITResolver::getGlobalValueIndirectSym(GlobalValue *GV, void *GVAddress) {
587   MutexGuard locked(TheJIT->lock);
588 
589   // If we already have a stub for this global variable, recycle it.
590   void *&IndirectSym = state.getGlobalToIndirectSymMap(locked)[GV];
591   if (IndirectSym) return IndirectSym;
592 
593   // Otherwise, codegen a new indirect symbol.
594   IndirectSym = TheJIT->getJITInfo().emitGlobalValueIndirectSym(GV, GVAddress,
595                                                                 JE);
596 
597   DEBUG(dbgs() << "JIT: Indirect symbol emitted at [" << IndirectSym
598         << "] for GV '" << GV->getName() << "'\n");
599 
600   return IndirectSym;
601 }
602 
603 /// getExternalFunctionStub - Return a stub for the function at the
604 /// specified address, created lazily on demand.
getExternalFunctionStub(void * FnAddr)605 void *JITResolver::getExternalFunctionStub(void *FnAddr) {
606   // If we already have a stub for this function, recycle it.
607   void *&Stub = ExternalFnToStubMap[FnAddr];
608   if (Stub) return Stub;
609 
610   TargetJITInfo::StubLayout SL = TheJIT->getJITInfo().getStubLayout();
611   JE.startGVStub(0, SL.Size, SL.Alignment);
612   Stub = TheJIT->getJITInfo().emitFunctionStub(0, FnAddr, JE);
613   JE.finishGVStub();
614 
615   DEBUG(dbgs() << "JIT: Stub emitted at [" << Stub
616                << "] for external function at '" << FnAddr << "'\n");
617   return Stub;
618 }
619 
getGOTIndexForAddr(void * addr)620 unsigned JITResolver::getGOTIndexForAddr(void* addr) {
621   unsigned idx = revGOTMap[addr];
622   if (!idx) {
623     idx = ++nextGOTIndex;
624     revGOTMap[addr] = idx;
625     DEBUG(dbgs() << "JIT: Adding GOT entry " << idx << " for addr ["
626                  << addr << "]\n");
627   }
628   return idx;
629 }
630 
631 /// JITCompilerFn - This function is called when a lazy compilation stub has
632 /// been entered.  It looks up which function this stub corresponds to, compiles
633 /// it if necessary, then returns the resultant function pointer.
JITCompilerFn(void * Stub)634 void *JITResolver::JITCompilerFn(void *Stub) {
635   JITResolver *JR = StubToResolverMap->getResolverFromStub(Stub);
636   assert(JR && "Unable to find the corresponding JITResolver to the call site");
637 
638   Function* F = 0;
639   void* ActualPtr = 0;
640 
641   {
642     // Only lock for getting the Function. The call getPointerToFunction made
643     // in this function might trigger function materializing, which requires
644     // JIT lock to be unlocked.
645     MutexGuard locked(JR->TheJIT->lock);
646 
647     // The address given to us for the stub may not be exactly right, it might
648     // be a little bit after the stub.  As such, use upper_bound to find it.
649     std::pair<void*, Function*> I =
650       JR->state.LookupFunctionFromCallSite(locked, Stub);
651     F = I.second;
652     ActualPtr = I.first;
653   }
654 
655   // If we have already code generated the function, just return the address.
656   void *Result = JR->TheJIT->getPointerToGlobalIfAvailable(F);
657 
658   if (!Result) {
659     // Otherwise we don't have it, do lazy compilation now.
660 
661     // If lazy compilation is disabled, emit a useful error message and abort.
662     if (!JR->TheJIT->isCompilingLazily()) {
663       report_fatal_error("LLVM JIT requested to do lazy compilation of"
664                          " function '"
665                         + F->getName() + "' when lazy compiles are disabled!");
666     }
667 
668     DEBUG(dbgs() << "JIT: Lazily resolving function '" << F->getName()
669           << "' In stub ptr = " << Stub << " actual ptr = "
670           << ActualPtr << "\n");
671     (void)ActualPtr;
672 
673     Result = JR->TheJIT->getPointerToFunction(F);
674   }
675 
676   // Reacquire the lock to update the GOT map.
677   MutexGuard locked(JR->TheJIT->lock);
678 
679   // We might like to remove the call site from the CallSiteToFunction map, but
680   // we can't do that! Multiple threads could be stuck, waiting to acquire the
681   // lock above. As soon as the 1st function finishes compiling the function,
682   // the next one will be released, and needs to be able to find the function it
683   // needs to call.
684 
685   // FIXME: We could rewrite all references to this stub if we knew them.
686 
687   // What we will do is set the compiled function address to map to the
688   // same GOT entry as the stub so that later clients may update the GOT
689   // if they see it still using the stub address.
690   // Note: this is done so the Resolver doesn't have to manage GOT memory
691   // Do this without allocating map space if the target isn't using a GOT
692   if(JR->revGOTMap.find(Stub) != JR->revGOTMap.end())
693     JR->revGOTMap[Result] = JR->revGOTMap[Stub];
694 
695   return Result;
696 }
697 
698 //===----------------------------------------------------------------------===//
699 // JITEmitter code.
700 //
getPointerToGlobal(GlobalValue * V,void * Reference,bool MayNeedFarStub)701 void *JITEmitter::getPointerToGlobal(GlobalValue *V, void *Reference,
702                                      bool MayNeedFarStub) {
703   if (GlobalVariable *GV = dyn_cast<GlobalVariable>(V))
704     return TheJIT->getOrEmitGlobalVariable(GV);
705 
706   if (GlobalAlias *GA = dyn_cast<GlobalAlias>(V))
707     return TheJIT->getPointerToGlobal(GA->resolveAliasedGlobal(false));
708 
709   // If we have already compiled the function, return a pointer to its body.
710   Function *F = cast<Function>(V);
711 
712   void *FnStub = Resolver.getLazyFunctionStubIfAvailable(F);
713   if (FnStub) {
714     // Return the function stub if it's already created.  We do this first so
715     // that we're returning the same address for the function as any previous
716     // call.  TODO: Yes, this is wrong. The lazy stub isn't guaranteed to be
717     // close enough to call.
718     return FnStub;
719   }
720 
721   // If we know the target can handle arbitrary-distance calls, try to
722   // return a direct pointer.
723   if (!MayNeedFarStub) {
724     // If we have code, go ahead and return that.
725     void *ResultPtr = TheJIT->getPointerToGlobalIfAvailable(F);
726     if (ResultPtr) return ResultPtr;
727 
728     // If this is an external function pointer, we can force the JIT to
729     // 'compile' it, which really just adds it to the map.
730     if (isNonGhostDeclaration(F) || F->hasAvailableExternallyLinkage())
731       return TheJIT->getPointerToFunction(F);
732   }
733 
734   // Otherwise, we may need a to emit a stub, and, conservatively, we always do
735   // so.  Note that it's possible to return null from getLazyFunctionStub in the
736   // case of a weak extern that fails to resolve.
737   return Resolver.getLazyFunctionStub(F);
738 }
739 
getPointerToGVIndirectSym(GlobalValue * V,void * Reference)740 void *JITEmitter::getPointerToGVIndirectSym(GlobalValue *V, void *Reference) {
741   // Make sure GV is emitted first, and create a stub containing the fully
742   // resolved address.
743   void *GVAddress = getPointerToGlobal(V, Reference, false);
744   void *StubAddr = Resolver.getGlobalValueIndirectSym(V, GVAddress);
745   return StubAddr;
746 }
747 
processDebugLoc(DebugLoc DL,bool BeforePrintingInsn)748 void JITEmitter::processDebugLoc(DebugLoc DL, bool BeforePrintingInsn) {
749   if (DL.isUnknown()) return;
750   if (!BeforePrintingInsn) return;
751 
752   const LLVMContext &Context = EmissionDetails.MF->getFunction()->getContext();
753 
754   if (DL.getScope(Context) != 0 && PrevDL != DL) {
755     JITEvent_EmittedFunctionDetails::LineStart NextLine;
756     NextLine.Address = getCurrentPCValue();
757     NextLine.Loc = DL;
758     EmissionDetails.LineStarts.push_back(NextLine);
759   }
760 
761   PrevDL = DL;
762 }
763 
GetConstantPoolSizeInBytes(MachineConstantPool * MCP,const TargetData * TD)764 static unsigned GetConstantPoolSizeInBytes(MachineConstantPool *MCP,
765                                            const TargetData *TD) {
766   const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
767   if (Constants.empty()) return 0;
768 
769   unsigned Size = 0;
770   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
771     MachineConstantPoolEntry CPE = Constants[i];
772     unsigned AlignMask = CPE.getAlignment() - 1;
773     Size = (Size + AlignMask) & ~AlignMask;
774     Type *Ty = CPE.getType();
775     Size += TD->getTypeAllocSize(Ty);
776   }
777   return Size;
778 }
779 
startFunction(MachineFunction & F)780 void JITEmitter::startFunction(MachineFunction &F) {
781   DEBUG(dbgs() << "JIT: Starting CodeGen of Function "
782         << F.getFunction()->getName() << "\n");
783 
784   uintptr_t ActualSize = 0;
785   // Set the memory writable, if it's not already
786   MemMgr->setMemoryWritable();
787 
788   if (SizeEstimate > 0) {
789     // SizeEstimate will be non-zero on reallocation attempts.
790     ActualSize = SizeEstimate;
791   }
792 
793   BufferBegin = CurBufferPtr = MemMgr->startFunctionBody(F.getFunction(),
794                                                          ActualSize);
795   BufferEnd = BufferBegin+ActualSize;
796   EmittedFunctions[F.getFunction()].FunctionBody = BufferBegin;
797 
798   // Ensure the constant pool/jump table info is at least 4-byte aligned.
799   emitAlignment(16);
800 
801   emitConstantPool(F.getConstantPool());
802   if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
803     initJumpTableInfo(MJTI);
804 
805   // About to start emitting the machine code for the function.
806   emitAlignment(std::max(F.getFunction()->getAlignment(), 8U));
807   TheJIT->updateGlobalMapping(F.getFunction(), CurBufferPtr);
808   EmittedFunctions[F.getFunction()].Code = CurBufferPtr;
809 
810   MBBLocations.clear();
811 
812   EmissionDetails.MF = &F;
813   EmissionDetails.LineStarts.clear();
814 }
815 
finishFunction(MachineFunction & F)816 bool JITEmitter::finishFunction(MachineFunction &F) {
817   if (CurBufferPtr == BufferEnd) {
818     // We must call endFunctionBody before retrying, because
819     // deallocateMemForFunction requires it.
820     MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
821     retryWithMoreMemory(F);
822     return true;
823   }
824 
825   if (MachineJumpTableInfo *MJTI = F.getJumpTableInfo())
826     emitJumpTableInfo(MJTI);
827 
828   // FnStart is the start of the text, not the start of the constant pool and
829   // other per-function data.
830   uint8_t *FnStart =
831     (uint8_t *)TheJIT->getPointerToGlobalIfAvailable(F.getFunction());
832 
833   // FnEnd is the end of the function's machine code.
834   uint8_t *FnEnd = CurBufferPtr;
835 
836   if (!Relocations.empty()) {
837     CurFn = F.getFunction();
838     NumRelos += Relocations.size();
839 
840     // Resolve the relocations to concrete pointers.
841     for (unsigned i = 0, e = Relocations.size(); i != e; ++i) {
842       MachineRelocation &MR = Relocations[i];
843       void *ResultPtr = 0;
844       if (!MR.letTargetResolve()) {
845         if (MR.isExternalSymbol()) {
846           ResultPtr = TheJIT->getPointerToNamedFunction(MR.getExternalSymbol(),
847                                                         false);
848           DEBUG(dbgs() << "JIT: Map \'" << MR.getExternalSymbol() << "\' to ["
849                        << ResultPtr << "]\n");
850 
851           // If the target REALLY wants a stub for this function, emit it now.
852           if (MR.mayNeedFarStub()) {
853             ResultPtr = Resolver.getExternalFunctionStub(ResultPtr);
854           }
855         } else if (MR.isGlobalValue()) {
856           ResultPtr = getPointerToGlobal(MR.getGlobalValue(),
857                                          BufferBegin+MR.getMachineCodeOffset(),
858                                          MR.mayNeedFarStub());
859         } else if (MR.isIndirectSymbol()) {
860           ResultPtr = getPointerToGVIndirectSym(
861               MR.getGlobalValue(), BufferBegin+MR.getMachineCodeOffset());
862         } else if (MR.isBasicBlock()) {
863           ResultPtr = (void*)getMachineBasicBlockAddress(MR.getBasicBlock());
864         } else if (MR.isConstantPoolIndex()) {
865           ResultPtr =
866             (void*)getConstantPoolEntryAddress(MR.getConstantPoolIndex());
867         } else {
868           assert(MR.isJumpTableIndex());
869           ResultPtr=(void*)getJumpTableEntryAddress(MR.getJumpTableIndex());
870         }
871 
872         MR.setResultPointer(ResultPtr);
873       }
874 
875       // if we are managing the GOT and the relocation wants an index,
876       // give it one
877       if (MR.isGOTRelative() && MemMgr->isManagingGOT()) {
878         unsigned idx = Resolver.getGOTIndexForAddr(ResultPtr);
879         MR.setGOTIndex(idx);
880         if (((void**)MemMgr->getGOTBase())[idx] != ResultPtr) {
881           DEBUG(dbgs() << "JIT: GOT was out of date for " << ResultPtr
882                        << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
883                        << "\n");
884           ((void**)MemMgr->getGOTBase())[idx] = ResultPtr;
885         }
886       }
887     }
888 
889     CurFn = 0;
890     TheJIT->getJITInfo().relocate(BufferBegin, &Relocations[0],
891                                   Relocations.size(), MemMgr->getGOTBase());
892   }
893 
894   // Update the GOT entry for F to point to the new code.
895   if (MemMgr->isManagingGOT()) {
896     unsigned idx = Resolver.getGOTIndexForAddr((void*)BufferBegin);
897     if (((void**)MemMgr->getGOTBase())[idx] != (void*)BufferBegin) {
898       DEBUG(dbgs() << "JIT: GOT was out of date for " << (void*)BufferBegin
899                    << " pointing at " << ((void**)MemMgr->getGOTBase())[idx]
900                    << "\n");
901       ((void**)MemMgr->getGOTBase())[idx] = (void*)BufferBegin;
902     }
903   }
904 
905   // CurBufferPtr may have moved beyond FnEnd, due to memory allocation for
906   // global variables that were referenced in the relocations.
907   MemMgr->endFunctionBody(F.getFunction(), BufferBegin, CurBufferPtr);
908 
909   if (CurBufferPtr == BufferEnd) {
910     retryWithMoreMemory(F);
911     return true;
912   } else {
913     // Now that we've succeeded in emitting the function, reset the
914     // SizeEstimate back down to zero.
915     SizeEstimate = 0;
916   }
917 
918   BufferBegin = CurBufferPtr = 0;
919   NumBytes += FnEnd-FnStart;
920 
921   // Invalidate the icache if necessary.
922   sys::Memory::InvalidateInstructionCache(FnStart, FnEnd-FnStart);
923 
924   TheJIT->NotifyFunctionEmitted(*F.getFunction(), FnStart, FnEnd-FnStart,
925                                 EmissionDetails);
926 
927   // Reset the previous debug location.
928   PrevDL = DebugLoc();
929 
930   DEBUG(dbgs() << "JIT: Finished CodeGen of [" << (void*)FnStart
931         << "] Function: " << F.getFunction()->getName()
932         << ": " << (FnEnd-FnStart) << " bytes of text, "
933         << Relocations.size() << " relocations\n");
934 
935   Relocations.clear();
936   ConstPoolAddresses.clear();
937 
938   // Mark code region readable and executable if it's not so already.
939   MemMgr->setMemoryExecutable();
940 
941   DEBUG({
942       if (sys::hasDisassembler()) {
943         dbgs() << "JIT: Disassembled code:\n";
944         dbgs() << sys::disassembleBuffer(FnStart, FnEnd-FnStart,
945                                          (uintptr_t)FnStart);
946       } else {
947         dbgs() << "JIT: Binary code:\n";
948         uint8_t* q = FnStart;
949         for (int i = 0; q < FnEnd; q += 4, ++i) {
950           if (i == 4)
951             i = 0;
952           if (i == 0)
953             dbgs() << "JIT: " << (long)(q - FnStart) << ": ";
954           bool Done = false;
955           for (int j = 3; j >= 0; --j) {
956             if (q + j >= FnEnd)
957               Done = true;
958             else
959               dbgs() << (unsigned short)q[j];
960           }
961           if (Done)
962             break;
963           dbgs() << ' ';
964           if (i == 3)
965             dbgs() << '\n';
966         }
967         dbgs()<< '\n';
968       }
969     });
970 
971   if (JITExceptionHandling || JITEmitDebugInfo) {
972     uintptr_t ActualSize = 0;
973     SavedBufferBegin = BufferBegin;
974     SavedBufferEnd = BufferEnd;
975     SavedCurBufferPtr = CurBufferPtr;
976 
977     BufferBegin = CurBufferPtr = MemMgr->startExceptionTable(F.getFunction(),
978                                                              ActualSize);
979     BufferEnd = BufferBegin+ActualSize;
980     EmittedFunctions[F.getFunction()].ExceptionTable = BufferBegin;
981     uint8_t *EhStart;
982     uint8_t *FrameRegister = DE->EmitDwarfTable(F, *this, FnStart, FnEnd,
983                                                 EhStart);
984     MemMgr->endExceptionTable(F.getFunction(), BufferBegin, CurBufferPtr,
985                               FrameRegister);
986     uint8_t *EhEnd = CurBufferPtr;
987     BufferBegin = SavedBufferBegin;
988     BufferEnd = SavedBufferEnd;
989     CurBufferPtr = SavedCurBufferPtr;
990 
991     if (JITExceptionHandling) {
992       TheJIT->RegisterTable(F.getFunction(), FrameRegister);
993     }
994 
995     if (JITEmitDebugInfo) {
996       DebugInfo I;
997       I.FnStart = FnStart;
998       I.FnEnd = FnEnd;
999       I.EhStart = EhStart;
1000       I.EhEnd = EhEnd;
1001       DR->RegisterFunction(F.getFunction(), I);
1002     }
1003   }
1004 
1005   if (MMI)
1006     MMI->EndFunction();
1007 
1008   return false;
1009 }
1010 
retryWithMoreMemory(MachineFunction & F)1011 void JITEmitter::retryWithMoreMemory(MachineFunction &F) {
1012   DEBUG(dbgs() << "JIT: Ran out of space for native code.  Reattempting.\n");
1013   Relocations.clear();  // Clear the old relocations or we'll reapply them.
1014   ConstPoolAddresses.clear();
1015   ++NumRetries;
1016   deallocateMemForFunction(F.getFunction());
1017   // Try again with at least twice as much free space.
1018   SizeEstimate = (uintptr_t)(2 * (BufferEnd - BufferBegin));
1019 
1020   for (MachineFunction::iterator MBB = F.begin(), E = F.end(); MBB != E; ++MBB){
1021     if (MBB->hasAddressTaken())
1022       TheJIT->clearPointerToBasicBlock(MBB->getBasicBlock());
1023   }
1024 }
1025 
1026 /// deallocateMemForFunction - Deallocate all memory for the specified
1027 /// function body.  Also drop any references the function has to stubs.
1028 /// May be called while the Function is being destroyed inside ~Value().
deallocateMemForFunction(const Function * F)1029 void JITEmitter::deallocateMemForFunction(const Function *F) {
1030   ValueMap<const Function *, EmittedCode, EmittedFunctionConfig>::iterator
1031     Emitted = EmittedFunctions.find(F);
1032   if (Emitted != EmittedFunctions.end()) {
1033     MemMgr->deallocateFunctionBody(Emitted->second.FunctionBody);
1034     MemMgr->deallocateExceptionTable(Emitted->second.ExceptionTable);
1035     TheJIT->NotifyFreeingMachineCode(Emitted->second.Code);
1036 
1037     EmittedFunctions.erase(Emitted);
1038   }
1039 
1040   if(JITExceptionHandling) {
1041     TheJIT->DeregisterTable(F);
1042   }
1043 
1044   if (JITEmitDebugInfo) {
1045     DR->UnregisterFunction(F);
1046   }
1047 }
1048 
1049 
allocateSpace(uintptr_t Size,unsigned Alignment)1050 void* JITEmitter::allocateSpace(uintptr_t Size, unsigned Alignment) {
1051   if (BufferBegin)
1052     return JITCodeEmitter::allocateSpace(Size, Alignment);
1053 
1054   // create a new memory block if there is no active one.
1055   // care must be taken so that BufferBegin is invalidated when a
1056   // block is trimmed
1057   BufferBegin = CurBufferPtr = MemMgr->allocateSpace(Size, Alignment);
1058   BufferEnd = BufferBegin+Size;
1059   return CurBufferPtr;
1060 }
1061 
allocateGlobal(uintptr_t Size,unsigned Alignment)1062 void* JITEmitter::allocateGlobal(uintptr_t Size, unsigned Alignment) {
1063   // Delegate this call through the memory manager.
1064   return MemMgr->allocateGlobal(Size, Alignment);
1065 }
1066 
emitConstantPool(MachineConstantPool * MCP)1067 void JITEmitter::emitConstantPool(MachineConstantPool *MCP) {
1068   if (TheJIT->getJITInfo().hasCustomConstantPool())
1069     return;
1070 
1071   const std::vector<MachineConstantPoolEntry> &Constants = MCP->getConstants();
1072   if (Constants.empty()) return;
1073 
1074   unsigned Size = GetConstantPoolSizeInBytes(MCP, TheJIT->getTargetData());
1075   unsigned Align = MCP->getConstantPoolAlignment();
1076   ConstantPoolBase = allocateSpace(Size, Align);
1077   ConstantPool = MCP;
1078 
1079   if (ConstantPoolBase == 0) return;  // Buffer overflow.
1080 
1081   DEBUG(dbgs() << "JIT: Emitted constant pool at [" << ConstantPoolBase
1082                << "] (size: " << Size << ", alignment: " << Align << ")\n");
1083 
1084   // Initialize the memory for all of the constant pool entries.
1085   unsigned Offset = 0;
1086   for (unsigned i = 0, e = Constants.size(); i != e; ++i) {
1087     MachineConstantPoolEntry CPE = Constants[i];
1088     unsigned AlignMask = CPE.getAlignment() - 1;
1089     Offset = (Offset + AlignMask) & ~AlignMask;
1090 
1091     uintptr_t CAddr = (uintptr_t)ConstantPoolBase + Offset;
1092     ConstPoolAddresses.push_back(CAddr);
1093     if (CPE.isMachineConstantPoolEntry()) {
1094       // FIXME: add support to lower machine constant pool values into bytes!
1095       report_fatal_error("Initialize memory with machine specific constant pool"
1096                         "entry has not been implemented!");
1097     }
1098     TheJIT->InitializeMemory(CPE.Val.ConstVal, (void*)CAddr);
1099     DEBUG(dbgs() << "JIT:   CP" << i << " at [0x";
1100           dbgs().write_hex(CAddr) << "]\n");
1101 
1102     Type *Ty = CPE.Val.ConstVal->getType();
1103     Offset += TheJIT->getTargetData()->getTypeAllocSize(Ty);
1104   }
1105 }
1106 
initJumpTableInfo(MachineJumpTableInfo * MJTI)1107 void JITEmitter::initJumpTableInfo(MachineJumpTableInfo *MJTI) {
1108   if (TheJIT->getJITInfo().hasCustomJumpTables())
1109     return;
1110   if (MJTI->getEntryKind() == MachineJumpTableInfo::EK_Inline)
1111     return;
1112 
1113   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1114   if (JT.empty()) return;
1115 
1116   unsigned NumEntries = 0;
1117   for (unsigned i = 0, e = JT.size(); i != e; ++i)
1118     NumEntries += JT[i].MBBs.size();
1119 
1120   unsigned EntrySize = MJTI->getEntrySize(*TheJIT->getTargetData());
1121 
1122   // Just allocate space for all the jump tables now.  We will fix up the actual
1123   // MBB entries in the tables after we emit the code for each block, since then
1124   // we will know the final locations of the MBBs in memory.
1125   JumpTable = MJTI;
1126   JumpTableBase = allocateSpace(NumEntries * EntrySize,
1127                              MJTI->getEntryAlignment(*TheJIT->getTargetData()));
1128 }
1129 
emitJumpTableInfo(MachineJumpTableInfo * MJTI)1130 void JITEmitter::emitJumpTableInfo(MachineJumpTableInfo *MJTI) {
1131   if (TheJIT->getJITInfo().hasCustomJumpTables())
1132     return;
1133 
1134   const std::vector<MachineJumpTableEntry> &JT = MJTI->getJumpTables();
1135   if (JT.empty() || JumpTableBase == 0) return;
1136 
1137 
1138   switch (MJTI->getEntryKind()) {
1139   case MachineJumpTableInfo::EK_Inline:
1140     return;
1141   case MachineJumpTableInfo::EK_BlockAddress: {
1142     // EK_BlockAddress - Each entry is a plain address of block, e.g.:
1143     //     .word LBB123
1144     assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == sizeof(void*) &&
1145            "Cross JIT'ing?");
1146 
1147     // For each jump table, map each target in the jump table to the address of
1148     // an emitted MachineBasicBlock.
1149     intptr_t *SlotPtr = (intptr_t*)JumpTableBase;
1150 
1151     for (unsigned i = 0, e = JT.size(); i != e; ++i) {
1152       const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
1153       // Store the address of the basic block for this jump table slot in the
1154       // memory we allocated for the jump table in 'initJumpTableInfo'
1155       for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi)
1156         *SlotPtr++ = getMachineBasicBlockAddress(MBBs[mi]);
1157     }
1158     break;
1159   }
1160 
1161   case MachineJumpTableInfo::EK_Custom32:
1162   case MachineJumpTableInfo::EK_GPRel32BlockAddress:
1163   case MachineJumpTableInfo::EK_LabelDifference32: {
1164     assert(MJTI->getEntrySize(*TheJIT->getTargetData()) == 4&&"Cross JIT'ing?");
1165     // For each jump table, place the offset from the beginning of the table
1166     // to the target address.
1167     int *SlotPtr = (int*)JumpTableBase;
1168 
1169     for (unsigned i = 0, e = JT.size(); i != e; ++i) {
1170       const std::vector<MachineBasicBlock*> &MBBs = JT[i].MBBs;
1171       // Store the offset of the basic block for this jump table slot in the
1172       // memory we allocated for the jump table in 'initJumpTableInfo'
1173       uintptr_t Base = (uintptr_t)SlotPtr;
1174       for (unsigned mi = 0, me = MBBs.size(); mi != me; ++mi) {
1175         uintptr_t MBBAddr = getMachineBasicBlockAddress(MBBs[mi]);
1176         /// FIXME: USe EntryKind instead of magic "getPICJumpTableEntry" hook.
1177         *SlotPtr++ = TheJIT->getJITInfo().getPICJumpTableEntry(MBBAddr, Base);
1178       }
1179     }
1180     break;
1181   }
1182   }
1183 }
1184 
startGVStub(const GlobalValue * GV,unsigned StubSize,unsigned Alignment)1185 void JITEmitter::startGVStub(const GlobalValue* GV,
1186                              unsigned StubSize, unsigned Alignment) {
1187   SavedBufferBegin = BufferBegin;
1188   SavedBufferEnd = BufferEnd;
1189   SavedCurBufferPtr = CurBufferPtr;
1190 
1191   BufferBegin = CurBufferPtr = MemMgr->allocateStub(GV, StubSize, Alignment);
1192   BufferEnd = BufferBegin+StubSize+1;
1193 }
1194 
startGVStub(void * Buffer,unsigned StubSize)1195 void JITEmitter::startGVStub(void *Buffer, unsigned StubSize) {
1196   SavedBufferBegin = BufferBegin;
1197   SavedBufferEnd = BufferEnd;
1198   SavedCurBufferPtr = CurBufferPtr;
1199 
1200   BufferBegin = CurBufferPtr = (uint8_t *)Buffer;
1201   BufferEnd = BufferBegin+StubSize+1;
1202 }
1203 
finishGVStub()1204 void JITEmitter::finishGVStub() {
1205   assert(CurBufferPtr != BufferEnd && "Stub overflowed allocated space.");
1206   NumBytes += getCurrentPCOffset();
1207   BufferBegin = SavedBufferBegin;
1208   BufferEnd = SavedBufferEnd;
1209   CurBufferPtr = SavedCurBufferPtr;
1210 }
1211 
allocIndirectGV(const GlobalValue * GV,const uint8_t * Buffer,size_t Size,unsigned Alignment)1212 void *JITEmitter::allocIndirectGV(const GlobalValue *GV,
1213                                   const uint8_t *Buffer, size_t Size,
1214                                   unsigned Alignment) {
1215   uint8_t *IndGV = MemMgr->allocateStub(GV, Size, Alignment);
1216   memcpy(IndGV, Buffer, Size);
1217   return IndGV;
1218 }
1219 
1220 // getConstantPoolEntryAddress - Return the address of the 'ConstantNum' entry
1221 // in the constant pool that was last emitted with the 'emitConstantPool'
1222 // method.
1223 //
getConstantPoolEntryAddress(unsigned ConstantNum) const1224 uintptr_t JITEmitter::getConstantPoolEntryAddress(unsigned ConstantNum) const {
1225   assert(ConstantNum < ConstantPool->getConstants().size() &&
1226          "Invalid ConstantPoolIndex!");
1227   return ConstPoolAddresses[ConstantNum];
1228 }
1229 
1230 // getJumpTableEntryAddress - Return the address of the JumpTable with index
1231 // 'Index' in the jumpp table that was last initialized with 'initJumpTableInfo'
1232 //
getJumpTableEntryAddress(unsigned Index) const1233 uintptr_t JITEmitter::getJumpTableEntryAddress(unsigned Index) const {
1234   const std::vector<MachineJumpTableEntry> &JT = JumpTable->getJumpTables();
1235   assert(Index < JT.size() && "Invalid jump table index!");
1236 
1237   unsigned EntrySize = JumpTable->getEntrySize(*TheJIT->getTargetData());
1238 
1239   unsigned Offset = 0;
1240   for (unsigned i = 0; i < Index; ++i)
1241     Offset += JT[i].MBBs.size();
1242 
1243    Offset *= EntrySize;
1244 
1245   return (uintptr_t)((char *)JumpTableBase + Offset);
1246 }
1247 
onDelete(JITEmitter * Emitter,const Function * F)1248 void JITEmitter::EmittedFunctionConfig::onDelete(
1249   JITEmitter *Emitter, const Function *F) {
1250   Emitter->deallocateMemForFunction(F);
1251 }
onRAUW(JITEmitter *,const Function *,const Function *)1252 void JITEmitter::EmittedFunctionConfig::onRAUW(
1253   JITEmitter *, const Function*, const Function*) {
1254   llvm_unreachable("The JIT doesn't know how to handle a"
1255                    " RAUW on a value it has emitted.");
1256 }
1257 
1258 
1259 //===----------------------------------------------------------------------===//
1260 //  Public interface to this file
1261 //===----------------------------------------------------------------------===//
1262 
createEmitter(JIT & jit,JITMemoryManager * JMM,TargetMachine & tm)1263 JITCodeEmitter *JIT::createEmitter(JIT &jit, JITMemoryManager *JMM,
1264                                    TargetMachine &tm) {
1265   return new JITEmitter(jit, JMM, tm);
1266 }
1267 
1268 // getPointerToFunctionOrStub - If the specified function has been
1269 // code-gen'd, return a pointer to the function.  If not, compile it, or use
1270 // a stub to implement lazy compilation if available.
1271 //
getPointerToFunctionOrStub(Function * F)1272 void *JIT::getPointerToFunctionOrStub(Function *F) {
1273   // If we have already code generated the function, just return the address.
1274   if (void *Addr = getPointerToGlobalIfAvailable(F))
1275     return Addr;
1276 
1277   // Get a stub if the target supports it.
1278   assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
1279   JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
1280   return JE->getJITResolver().getLazyFunctionStub(F);
1281 }
1282 
updateFunctionStub(Function * F)1283 void JIT::updateFunctionStub(Function *F) {
1284   // Get the empty stub we generated earlier.
1285   assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
1286   JITEmitter *JE = cast<JITEmitter>(getCodeEmitter());
1287   void *Stub = JE->getJITResolver().getLazyFunctionStub(F);
1288   void *Addr = getPointerToGlobalIfAvailable(F);
1289   assert(Addr != Stub && "Function must have non-stub address to be updated.");
1290 
1291   // Tell the target jit info to rewrite the stub at the specified address,
1292   // rather than creating a new one.
1293   TargetJITInfo::StubLayout layout = getJITInfo().getStubLayout();
1294   JE->startGVStub(Stub, layout.Size);
1295   getJITInfo().emitFunctionStub(F, Addr, *getCodeEmitter());
1296   JE->finishGVStub();
1297 }
1298 
1299 /// freeMachineCodeForFunction - release machine code memory for given Function.
1300 ///
freeMachineCodeForFunction(Function * F)1301 void JIT::freeMachineCodeForFunction(Function *F) {
1302   // Delete translation for this from the ExecutionEngine, so it will get
1303   // retranslated next time it is used.
1304   updateGlobalMapping(F, 0);
1305 
1306   // Free the actual memory for the function body and related stuff.
1307   assert(isa<JITEmitter>(JCE) && "Unexpected MCE?");
1308   cast<JITEmitter>(JCE)->deallocateMemForFunction(F);
1309 }
1310