• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- GlobalMerge.cpp - Internal globals merging -------------------------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 // This pass merges globals with internal linkage into one. This way all the
11 // globals which were merged into a biggest one can be addressed using offsets
12 // from the same base pointer (no need for separate base pointer for each of the
13 // global). Such a transformation can significantly reduce the register pressure
14 // when many globals are involved.
15 //
16 // For example, consider the code which touches several global variables at
17 // once:
18 //
19 // static int foo[N], bar[N], baz[N];
20 //
21 // for (i = 0; i < N; ++i) {
22 //    foo[i] = bar[i] * baz[i];
23 // }
24 //
25 //  On ARM the addresses of 3 arrays should be kept in the registers, thus
26 //  this code has quite large register pressure (loop body):
27 //
28 //  ldr     r1, [r5], #4
29 //  ldr     r2, [r6], #4
30 //  mul     r1, r2, r1
31 //  str     r1, [r0], #4
32 //
33 //  Pass converts the code to something like:
34 //
35 //  static struct {
36 //    int foo[N];
37 //    int bar[N];
38 //    int baz[N];
39 //  } merged;
40 //
41 //  for (i = 0; i < N; ++i) {
42 //    merged.foo[i] = merged.bar[i] * merged.baz[i];
43 //  }
44 //
45 //  and in ARM code this becomes:
46 //
47 //  ldr     r0, [r5, #40]
48 //  ldr     r1, [r5, #80]
49 //  mul     r0, r1, r0
50 //  str     r0, [r5], #4
51 //
52 //  note that we saved 2 registers here almostly "for free".
53 //
54 // However, merging globals can have tradeoffs:
55 // - it confuses debuggers, tools, and users
56 // - it makes linker optimizations less useful (order files, LOHs, ...)
57 // - it forces usage of indexed addressing (which isn't necessarily "free")
58 // - it can increase register pressure when the uses are disparate enough.
59 //
60 // We use heuristics to discover the best global grouping we can (cf cl::opts).
61 //
62 // ===---------------------------------------------------------------------===//
63 
64 #include "llvm/ADT/BitVector.h"
65 #include "llvm/ADT/DenseMap.h"
66 #include "llvm/ADT/SmallPtrSet.h"
67 #include "llvm/ADT/SmallVector.h"
68 #include "llvm/ADT/Statistic.h"
69 #include "llvm/ADT/StringRef.h"
70 #include "llvm/ADT/Triple.h"
71 #include "llvm/ADT/Twine.h"
72 #include "llvm/CodeGen/Passes.h"
73 #include "llvm/IR/BasicBlock.h"
74 #include "llvm/IR/Constants.h"
75 #include "llvm/IR/DataLayout.h"
76 #include "llvm/IR/DerivedTypes.h"
77 #include "llvm/IR/Function.h"
78 #include "llvm/IR/GlobalAlias.h"
79 #include "llvm/IR/GlobalValue.h"
80 #include "llvm/IR/GlobalVariable.h"
81 #include "llvm/IR/Instruction.h"
82 #include "llvm/IR/Module.h"
83 #include "llvm/IR/Type.h"
84 #include "llvm/IR/Use.h"
85 #include "llvm/IR/User.h"
86 #include "llvm/Pass.h"
87 #include "llvm/Support/Casting.h"
88 #include "llvm/Support/CommandLine.h"
89 #include "llvm/Support/Debug.h"
90 #include "llvm/Support/raw_ostream.h"
91 #include "llvm/Target/TargetLoweringObjectFile.h"
92 #include "llvm/Target/TargetMachine.h"
93 #include <algorithm>
94 #include <cassert>
95 #include <cstddef>
96 #include <cstdint>
97 #include <string>
98 #include <vector>
99 
100 using namespace llvm;
101 
102 #define DEBUG_TYPE "global-merge"
103 
104 // FIXME: This is only useful as a last-resort way to disable the pass.
105 static cl::opt<bool>
106 EnableGlobalMerge("enable-global-merge", cl::Hidden,
107                   cl::desc("Enable the global merge pass"),
108                   cl::init(true));
109 
110 static cl::opt<unsigned>
111 GlobalMergeMaxOffset("global-merge-max-offset", cl::Hidden,
112                      cl::desc("Set maximum offset for global merge pass"),
113                      cl::init(0));
114 
115 static cl::opt<bool> GlobalMergeGroupByUse(
116     "global-merge-group-by-use", cl::Hidden,
117     cl::desc("Improve global merge pass to look at uses"), cl::init(true));
118 
119 static cl::opt<bool> GlobalMergeIgnoreSingleUse(
120     "global-merge-ignore-single-use", cl::Hidden,
121     cl::desc("Improve global merge pass to ignore globals only used alone"),
122     cl::init(true));
123 
124 static cl::opt<bool>
125 EnableGlobalMergeOnConst("global-merge-on-const", cl::Hidden,
126                          cl::desc("Enable global merge pass on constants"),
127                          cl::init(false));
128 
129 // FIXME: this could be a transitional option, and we probably need to remove
130 // it if only we are sure this optimization could always benefit all targets.
131 static cl::opt<cl::boolOrDefault>
132 EnableGlobalMergeOnExternal("global-merge-on-external", cl::Hidden,
133      cl::desc("Enable global merge pass on external linkage"));
134 
135 STATISTIC(NumMerged, "Number of globals merged");
136 
137 namespace {
138 
139   class GlobalMerge : public FunctionPass {
140     const TargetMachine *TM = nullptr;
141 
142     // FIXME: Infer the maximum possible offset depending on the actual users
143     // (these max offsets are different for the users inside Thumb or ARM
144     // functions), see the code that passes in the offset in the ARM backend
145     // for more information.
146     unsigned MaxOffset;
147 
148     /// Whether we should try to optimize for size only.
149     /// Currently, this applies a dead simple heuristic: only consider globals
150     /// used in minsize functions for merging.
151     /// FIXME: This could learn about optsize, and be used in the cost model.
152     bool OnlyOptimizeForSize = false;
153 
154     /// Whether we should merge global variables that have external linkage.
155     bool MergeExternalGlobals = false;
156 
157     bool IsMachO;
158 
159     bool doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
160                  Module &M, bool isConst, unsigned AddrSpace) const;
161 
162     /// Merge everything in \p Globals for which the corresponding bit
163     /// in \p GlobalSet is set.
164     bool doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
165                  const BitVector &GlobalSet, Module &M, bool isConst,
166                  unsigned AddrSpace) const;
167 
168     /// Check if the given variable has been identified as must keep
169     /// \pre setMustKeepGlobalVariables must have been called on the Module that
170     ///      contains GV
isMustKeepGlobalVariable(const GlobalVariable * GV) const171     bool isMustKeepGlobalVariable(const GlobalVariable *GV) const {
172       return MustKeepGlobalVariables.count(GV);
173     }
174 
175     /// Collect every variables marked as "used" or used in a landing pad
176     /// instruction for this Module.
177     void setMustKeepGlobalVariables(Module &M);
178 
179     /// Collect every variables marked as "used"
180     void collectUsedGlobalVariables(Module &M, StringRef Name);
181 
182     /// Keep track of the GlobalVariable that must not be merged away
183     SmallPtrSet<const GlobalVariable *, 16> MustKeepGlobalVariables;
184 
185   public:
186     static char ID;             // Pass identification, replacement for typeid.
187 
GlobalMerge()188     explicit GlobalMerge()
189         : FunctionPass(ID), MaxOffset(GlobalMergeMaxOffset) {
190       initializeGlobalMergePass(*PassRegistry::getPassRegistry());
191     }
192 
GlobalMerge(const TargetMachine * TM,unsigned MaximalOffset,bool OnlyOptimizeForSize,bool MergeExternalGlobals)193     explicit GlobalMerge(const TargetMachine *TM, unsigned MaximalOffset,
194                          bool OnlyOptimizeForSize, bool MergeExternalGlobals)
195         : FunctionPass(ID), TM(TM), MaxOffset(MaximalOffset),
196           OnlyOptimizeForSize(OnlyOptimizeForSize),
197           MergeExternalGlobals(MergeExternalGlobals) {
198       initializeGlobalMergePass(*PassRegistry::getPassRegistry());
199     }
200 
201     bool doInitialization(Module &M) override;
202     bool runOnFunction(Function &F) override;
203     bool doFinalization(Module &M) override;
204 
getPassName() const205     StringRef getPassName() const override { return "Merge internal globals"; }
206 
getAnalysisUsage(AnalysisUsage & AU) const207     void getAnalysisUsage(AnalysisUsage &AU) const override {
208       AU.setPreservesCFG();
209       FunctionPass::getAnalysisUsage(AU);
210     }
211   };
212 
213 } // end anonymous namespace
214 
215 char GlobalMerge::ID = 0;
216 
217 INITIALIZE_PASS(GlobalMerge, DEBUG_TYPE, "Merge global variables", false, false)
218 
doMerge(SmallVectorImpl<GlobalVariable * > & Globals,Module & M,bool isConst,unsigned AddrSpace) const219 bool GlobalMerge::doMerge(SmallVectorImpl<GlobalVariable*> &Globals,
220                           Module &M, bool isConst, unsigned AddrSpace) const {
221   auto &DL = M.getDataLayout();
222   // FIXME: Find better heuristics
223   std::stable_sort(Globals.begin(), Globals.end(),
224                    [&DL](const GlobalVariable *GV1, const GlobalVariable *GV2) {
225                      return DL.getTypeAllocSize(GV1->getValueType()) <
226                             DL.getTypeAllocSize(GV2->getValueType());
227                    });
228 
229   // If we want to just blindly group all globals together, do so.
230   if (!GlobalMergeGroupByUse) {
231     BitVector AllGlobals(Globals.size());
232     AllGlobals.set();
233     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
234   }
235 
236   // If we want to be smarter, look at all uses of each global, to try to
237   // discover all sets of globals used together, and how many times each of
238   // these sets occurred.
239   //
240   // Keep this reasonably efficient, by having an append-only list of all sets
241   // discovered so far (UsedGlobalSet), and mapping each "together-ness" unit of
242   // code (currently, a Function) to the set of globals seen so far that are
243   // used together in that unit (GlobalUsesByFunction).
244   //
245   // When we look at the Nth global, we know that any new set is either:
246   // - the singleton set {N}, containing this global only, or
247   // - the union of {N} and a previously-discovered set, containing some
248   //   combination of the previous N-1 globals.
249   // Using that knowledge, when looking at the Nth global, we can keep:
250   // - a reference to the singleton set {N} (CurGVOnlySetIdx)
251   // - a list mapping each previous set to its union with {N} (EncounteredUGS),
252   //   if it actually occurs.
253 
254   // We keep track of the sets of globals used together "close enough".
255   struct UsedGlobalSet {
256     BitVector Globals;
257     unsigned UsageCount = 1;
258 
259     UsedGlobalSet(size_t Size) : Globals(Size) {}
260   };
261 
262   // Each set is unique in UsedGlobalSets.
263   std::vector<UsedGlobalSet> UsedGlobalSets;
264 
265   // Avoid repeating the create-global-set pattern.
266   auto CreateGlobalSet = [&]() -> UsedGlobalSet & {
267     UsedGlobalSets.emplace_back(Globals.size());
268     return UsedGlobalSets.back();
269   };
270 
271   // The first set is the empty set.
272   CreateGlobalSet().UsageCount = 0;
273 
274   // We define "close enough" to be "in the same function".
275   // FIXME: Grouping uses by function is way too aggressive, so we should have
276   // a better metric for distance between uses.
277   // The obvious alternative would be to group by BasicBlock, but that's in
278   // turn too conservative..
279   // Anything in between wouldn't be trivial to compute, so just stick with
280   // per-function grouping.
281 
282   // The value type is an index into UsedGlobalSets.
283   // The default (0) conveniently points to the empty set.
284   DenseMap<Function *, size_t /*UsedGlobalSetIdx*/> GlobalUsesByFunction;
285 
286   // Now, look at each merge-eligible global in turn.
287 
288   // Keep track of the sets we already encountered to which we added the
289   // current global.
290   // Each element matches the same-index element in UsedGlobalSets.
291   // This lets us efficiently tell whether a set has already been expanded to
292   // include the current global.
293   std::vector<size_t> EncounteredUGS;
294 
295   for (size_t GI = 0, GE = Globals.size(); GI != GE; ++GI) {
296     GlobalVariable *GV = Globals[GI];
297 
298     // Reset the encountered sets for this global...
299     std::fill(EncounteredUGS.begin(), EncounteredUGS.end(), 0);
300     // ...and grow it in case we created new sets for the previous global.
301     EncounteredUGS.resize(UsedGlobalSets.size());
302 
303     // We might need to create a set that only consists of the current global.
304     // Keep track of its index into UsedGlobalSets.
305     size_t CurGVOnlySetIdx = 0;
306 
307     // For each global, look at all its Uses.
308     for (auto &U : GV->uses()) {
309       // This Use might be a ConstantExpr.  We're interested in Instruction
310       // users, so look through ConstantExpr...
311       Use *UI, *UE;
312       if (ConstantExpr *CE = dyn_cast<ConstantExpr>(U.getUser())) {
313         if (CE->use_empty())
314           continue;
315         UI = &*CE->use_begin();
316         UE = nullptr;
317       } else if (isa<Instruction>(U.getUser())) {
318         UI = &U;
319         UE = UI->getNext();
320       } else {
321         continue;
322       }
323 
324       // ...to iterate on all the instruction users of the global.
325       // Note that we iterate on Uses and not on Users to be able to getNext().
326       for (; UI != UE; UI = UI->getNext()) {
327         Instruction *I = dyn_cast<Instruction>(UI->getUser());
328         if (!I)
329           continue;
330 
331         Function *ParentFn = I->getParent()->getParent();
332 
333         // If we're only optimizing for size, ignore non-minsize functions.
334         if (OnlyOptimizeForSize && !ParentFn->optForMinSize())
335           continue;
336 
337         size_t UGSIdx = GlobalUsesByFunction[ParentFn];
338 
339         // If this is the first global the basic block uses, map it to the set
340         // consisting of this global only.
341         if (!UGSIdx) {
342           // If that set doesn't exist yet, create it.
343           if (!CurGVOnlySetIdx) {
344             CurGVOnlySetIdx = UsedGlobalSets.size();
345             CreateGlobalSet().Globals.set(GI);
346           } else {
347             ++UsedGlobalSets[CurGVOnlySetIdx].UsageCount;
348           }
349 
350           GlobalUsesByFunction[ParentFn] = CurGVOnlySetIdx;
351           continue;
352         }
353 
354         // If we already encountered this BB, just increment the counter.
355         if (UsedGlobalSets[UGSIdx].Globals.test(GI)) {
356           ++UsedGlobalSets[UGSIdx].UsageCount;
357           continue;
358         }
359 
360         // If not, the previous set wasn't actually used in this function.
361         --UsedGlobalSets[UGSIdx].UsageCount;
362 
363         // If we already expanded the previous set to include this global, just
364         // reuse that expanded set.
365         if (size_t ExpandedIdx = EncounteredUGS[UGSIdx]) {
366           ++UsedGlobalSets[ExpandedIdx].UsageCount;
367           GlobalUsesByFunction[ParentFn] = ExpandedIdx;
368           continue;
369         }
370 
371         // If not, create a new set consisting of the union of the previous set
372         // and this global.  Mark it as encountered, so we can reuse it later.
373         GlobalUsesByFunction[ParentFn] = EncounteredUGS[UGSIdx] =
374             UsedGlobalSets.size();
375 
376         UsedGlobalSet &NewUGS = CreateGlobalSet();
377         NewUGS.Globals.set(GI);
378         NewUGS.Globals |= UsedGlobalSets[UGSIdx].Globals;
379       }
380     }
381   }
382 
383   // Now we found a bunch of sets of globals used together.  We accumulated
384   // the number of times we encountered the sets (i.e., the number of blocks
385   // that use that exact set of globals).
386   //
387   // Multiply that by the size of the set to give us a crude profitability
388   // metric.
389   std::stable_sort(UsedGlobalSets.begin(), UsedGlobalSets.end(),
390             [](const UsedGlobalSet &UGS1, const UsedGlobalSet &UGS2) {
391               return UGS1.Globals.count() * UGS1.UsageCount <
392                      UGS2.Globals.count() * UGS2.UsageCount;
393             });
394 
395   // We can choose to merge all globals together, but ignore globals never used
396   // with another global.  This catches the obviously non-profitable cases of
397   // having a single global, but is aggressive enough for any other case.
398   if (GlobalMergeIgnoreSingleUse) {
399     BitVector AllGlobals(Globals.size());
400     for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
401       const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
402       if (UGS.UsageCount == 0)
403         continue;
404       if (UGS.Globals.count() > 1)
405         AllGlobals |= UGS.Globals;
406     }
407     return doMerge(Globals, AllGlobals, M, isConst, AddrSpace);
408   }
409 
410   // Starting from the sets with the best (=biggest) profitability, find a
411   // good combination.
412   // The ideal (and expensive) solution can only be found by trying all
413   // combinations, looking for the one with the best profitability.
414   // Don't be smart about it, and just pick the first compatible combination,
415   // starting with the sets with the best profitability.
416   BitVector PickedGlobals(Globals.size());
417   bool Changed = false;
418 
419   for (size_t i = 0, e = UsedGlobalSets.size(); i != e; ++i) {
420     const UsedGlobalSet &UGS = UsedGlobalSets[e - i - 1];
421     if (UGS.UsageCount == 0)
422       continue;
423     if (PickedGlobals.anyCommon(UGS.Globals))
424       continue;
425     PickedGlobals |= UGS.Globals;
426     // If the set only contains one global, there's no point in merging.
427     // Ignore the global for inclusion in other sets though, so keep it in
428     // PickedGlobals.
429     if (UGS.Globals.count() < 2)
430       continue;
431     Changed |= doMerge(Globals, UGS.Globals, M, isConst, AddrSpace);
432   }
433 
434   return Changed;
435 }
436 
doMerge(const SmallVectorImpl<GlobalVariable * > & Globals,const BitVector & GlobalSet,Module & M,bool isConst,unsigned AddrSpace) const437 bool GlobalMerge::doMerge(const SmallVectorImpl<GlobalVariable *> &Globals,
438                           const BitVector &GlobalSet, Module &M, bool isConst,
439                           unsigned AddrSpace) const {
440   assert(Globals.size() > 1);
441 
442   Type *Int32Ty = Type::getInt32Ty(M.getContext());
443   Type *Int8Ty = Type::getInt8Ty(M.getContext());
444   auto &DL = M.getDataLayout();
445 
446   LLVM_DEBUG(dbgs() << " Trying to merge set, starts with #"
447                     << GlobalSet.find_first() << "\n");
448 
449   bool Changed = false;
450   ssize_t i = GlobalSet.find_first();
451   while (i != -1) {
452     ssize_t j = 0;
453     uint64_t MergedSize = 0;
454     std::vector<Type*> Tys;
455     std::vector<Constant*> Inits;
456     std::vector<unsigned> StructIdxs;
457 
458     bool HasExternal = false;
459     StringRef FirstExternalName;
460     unsigned MaxAlign = 1;
461     unsigned CurIdx = 0;
462     for (j = i; j != -1; j = GlobalSet.find_next(j)) {
463       Type *Ty = Globals[j]->getValueType();
464       unsigned Align = DL.getPreferredAlignment(Globals[j]);
465       unsigned Padding = alignTo(MergedSize, Align) - MergedSize;
466       MergedSize += Padding;
467       MergedSize += DL.getTypeAllocSize(Ty);
468       if (MergedSize > MaxOffset) {
469         break;
470       }
471       if (Padding) {
472         Tys.push_back(ArrayType::get(Int8Ty, Padding));
473         Inits.push_back(ConstantAggregateZero::get(Tys.back()));
474         ++CurIdx;
475       }
476       Tys.push_back(Ty);
477       Inits.push_back(Globals[j]->getInitializer());
478       StructIdxs.push_back(CurIdx++);
479 
480       MaxAlign = std::max(MaxAlign, Align);
481 
482       if (Globals[j]->hasExternalLinkage() && !HasExternal) {
483         HasExternal = true;
484         FirstExternalName = Globals[j]->getName();
485       }
486     }
487 
488     // Exit early if there is only one global to merge.
489     if (Tys.size() < 2) {
490       i = j;
491       continue;
492     }
493 
494     // If merged variables doesn't have external linkage, we needn't to expose
495     // the symbol after merging.
496     GlobalValue::LinkageTypes Linkage = HasExternal
497                                             ? GlobalValue::ExternalLinkage
498                                             : GlobalValue::InternalLinkage;
499     // Use a packed struct so we can control alignment.
500     StructType *MergedTy = StructType::get(M.getContext(), Tys, true);
501     Constant *MergedInit = ConstantStruct::get(MergedTy, Inits);
502 
503     // On Darwin external linkage needs to be preserved, otherwise
504     // dsymutil cannot preserve the debug info for the merged
505     // variables.  If they have external linkage, use the symbol name
506     // of the first variable merged as the suffix of global symbol
507     // name.  This avoids a link-time naming conflict for the
508     // _MergedGlobals symbols.
509     Twine MergedName =
510         (IsMachO && HasExternal)
511             ? "_MergedGlobals_" + FirstExternalName
512             : "_MergedGlobals";
513     auto MergedLinkage = IsMachO ? Linkage : GlobalValue::PrivateLinkage;
514     auto *MergedGV = new GlobalVariable(
515         M, MergedTy, isConst, MergedLinkage, MergedInit, MergedName, nullptr,
516         GlobalVariable::NotThreadLocal, AddrSpace);
517 
518     MergedGV->setAlignment(MaxAlign);
519 
520     const StructLayout *MergedLayout = DL.getStructLayout(MergedTy);
521     for (ssize_t k = i, idx = 0; k != j; k = GlobalSet.find_next(k), ++idx) {
522       GlobalValue::LinkageTypes Linkage = Globals[k]->getLinkage();
523       std::string Name = Globals[k]->getName();
524       GlobalValue::DLLStorageClassTypes DLLStorage =
525           Globals[k]->getDLLStorageClass();
526 
527       // Copy metadata while adjusting any debug info metadata by the original
528       // global's offset within the merged global.
529       MergedGV->copyMetadata(Globals[k],
530                              MergedLayout->getElementOffset(StructIdxs[idx]));
531 
532       Constant *Idx[2] = {
533           ConstantInt::get(Int32Ty, 0),
534           ConstantInt::get(Int32Ty, StructIdxs[idx]),
535       };
536       Constant *GEP =
537           ConstantExpr::getInBoundsGetElementPtr(MergedTy, MergedGV, Idx);
538       Globals[k]->replaceAllUsesWith(GEP);
539       Globals[k]->eraseFromParent();
540 
541       // When the linkage is not internal we must emit an alias for the original
542       // variable name as it may be accessed from another object. On non-Mach-O
543       // we can also emit an alias for internal linkage as it's safe to do so.
544       // It's not safe on Mach-O as the alias (and thus the portion of the
545       // MergedGlobals variable) may be dead stripped at link time.
546       if (Linkage != GlobalValue::InternalLinkage || !IsMachO) {
547         GlobalAlias *GA = GlobalAlias::create(Tys[StructIdxs[idx]], AddrSpace,
548                                               Linkage, Name, GEP, &M);
549         GA->setDLLStorageClass(DLLStorage);
550       }
551 
552       NumMerged++;
553     }
554     Changed = true;
555     i = j;
556   }
557 
558   return Changed;
559 }
560 
collectUsedGlobalVariables(Module & M,StringRef Name)561 void GlobalMerge::collectUsedGlobalVariables(Module &M, StringRef Name) {
562   // Extract global variables from llvm.used array
563   const GlobalVariable *GV = M.getGlobalVariable(Name);
564   if (!GV || !GV->hasInitializer()) return;
565 
566   // Should be an array of 'i8*'.
567   const ConstantArray *InitList = cast<ConstantArray>(GV->getInitializer());
568 
569   for (unsigned i = 0, e = InitList->getNumOperands(); i != e; ++i)
570     if (const GlobalVariable *G =
571         dyn_cast<GlobalVariable>(InitList->getOperand(i)->stripPointerCasts()))
572       MustKeepGlobalVariables.insert(G);
573 }
574 
setMustKeepGlobalVariables(Module & M)575 void GlobalMerge::setMustKeepGlobalVariables(Module &M) {
576   collectUsedGlobalVariables(M, "llvm.used");
577   collectUsedGlobalVariables(M, "llvm.compiler.used");
578 
579   for (Function &F : M) {
580     for (BasicBlock &BB : F) {
581       Instruction *Pad = BB.getFirstNonPHI();
582       if (!Pad->isEHPad())
583         continue;
584 
585       // Keep globals used by landingpads and catchpads.
586       for (const Use &U : Pad->operands()) {
587         if (const GlobalVariable *GV =
588                 dyn_cast<GlobalVariable>(U->stripPointerCasts()))
589           MustKeepGlobalVariables.insert(GV);
590       }
591     }
592   }
593 }
594 
doInitialization(Module & M)595 bool GlobalMerge::doInitialization(Module &M) {
596   if (!EnableGlobalMerge)
597     return false;
598 
599   IsMachO = Triple(M.getTargetTriple()).isOSBinFormatMachO();
600 
601   auto &DL = M.getDataLayout();
602   DenseMap<unsigned, SmallVector<GlobalVariable *, 16>> Globals, ConstGlobals,
603                                                         BSSGlobals;
604   bool Changed = false;
605   setMustKeepGlobalVariables(M);
606 
607   // Grab all non-const globals.
608   for (auto &GV : M.globals()) {
609     // Merge is safe for "normal" internal or external globals only
610     if (GV.isDeclaration() || GV.isThreadLocal() ||
611         GV.hasSection() || GV.hasImplicitSection())
612       continue;
613 
614     // It's not safe to merge globals that may be preempted
615     if (TM && !TM->shouldAssumeDSOLocal(M, &GV))
616       continue;
617 
618     if (!(MergeExternalGlobals && GV.hasExternalLinkage()) &&
619         !GV.hasInternalLinkage())
620       continue;
621 
622     PointerType *PT = dyn_cast<PointerType>(GV.getType());
623     assert(PT && "Global variable is not a pointer!");
624 
625     unsigned AddressSpace = PT->getAddressSpace();
626 
627     // Ignore all 'special' globals.
628     if (GV.getName().startswith("llvm.") ||
629         GV.getName().startswith(".llvm."))
630       continue;
631 
632     // Ignore all "required" globals:
633     if (isMustKeepGlobalVariable(&GV))
634       continue;
635 
636     Type *Ty = GV.getValueType();
637     if (DL.getTypeAllocSize(Ty) < MaxOffset) {
638       if (TM &&
639           TargetLoweringObjectFile::getKindForGlobal(&GV, *TM).isBSSLocal())
640         BSSGlobals[AddressSpace].push_back(&GV);
641       else if (GV.isConstant())
642         ConstGlobals[AddressSpace].push_back(&GV);
643       else
644         Globals[AddressSpace].push_back(&GV);
645     }
646   }
647 
648   for (auto &P : Globals)
649     if (P.second.size() > 1)
650       Changed |= doMerge(P.second, M, false, P.first);
651 
652   for (auto &P : BSSGlobals)
653     if (P.second.size() > 1)
654       Changed |= doMerge(P.second, M, false, P.first);
655 
656   if (EnableGlobalMergeOnConst)
657     for (auto &P : ConstGlobals)
658       if (P.second.size() > 1)
659         Changed |= doMerge(P.second, M, true, P.first);
660 
661   return Changed;
662 }
663 
runOnFunction(Function & F)664 bool GlobalMerge::runOnFunction(Function &F) {
665   return false;
666 }
667 
doFinalization(Module & M)668 bool GlobalMerge::doFinalization(Module &M) {
669   MustKeepGlobalVariables.clear();
670   return false;
671 }
672 
createGlobalMergePass(const TargetMachine * TM,unsigned Offset,bool OnlyOptimizeForSize,bool MergeExternalByDefault)673 Pass *llvm::createGlobalMergePass(const TargetMachine *TM, unsigned Offset,
674                                   bool OnlyOptimizeForSize,
675                                   bool MergeExternalByDefault) {
676   bool MergeExternal = (EnableGlobalMergeOnExternal == cl::BOU_UNSET) ?
677     MergeExternalByDefault : (EnableGlobalMergeOnExternal == cl::BOU_TRUE);
678   return new GlobalMerge(TM, Offset, OnlyOptimizeForSize, MergeExternal);
679 }
680