• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //===- HWAddressSanitizer.cpp - detector of uninitialized reads -------===//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 //
10 /// \file
11 /// This file is a part of HWAddressSanitizer, an address sanity checker
12 /// based on tagged addressing.
13 //===----------------------------------------------------------------------===//
14 
15 #include "llvm/ADT/SmallVector.h"
16 #include "llvm/ADT/StringExtras.h"
17 #include "llvm/ADT/StringRef.h"
18 #include "llvm/ADT/Triple.h"
19 #include "llvm/IR/Attributes.h"
20 #include "llvm/IR/BasicBlock.h"
21 #include "llvm/IR/Constant.h"
22 #include "llvm/IR/Constants.h"
23 #include "llvm/IR/DataLayout.h"
24 #include "llvm/IR/DerivedTypes.h"
25 #include "llvm/IR/Function.h"
26 #include "llvm/IR/IRBuilder.h"
27 #include "llvm/IR/InlineAsm.h"
28 #include "llvm/IR/InstVisitor.h"
29 #include "llvm/IR/Instruction.h"
30 #include "llvm/IR/Instructions.h"
31 #include "llvm/IR/IntrinsicInst.h"
32 #include "llvm/IR/Intrinsics.h"
33 #include "llvm/IR/LLVMContext.h"
34 #include "llvm/IR/MDBuilder.h"
35 #include "llvm/IR/Module.h"
36 #include "llvm/IR/Type.h"
37 #include "llvm/IR/Value.h"
38 #include "llvm/Pass.h"
39 #include "llvm/Support/Casting.h"
40 #include "llvm/Support/CommandLine.h"
41 #include "llvm/Support/Debug.h"
42 #include "llvm/Support/raw_ostream.h"
43 #include "llvm/Transforms/Instrumentation.h"
44 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
45 #include "llvm/Transforms/Utils/ModuleUtils.h"
46 #include "llvm/Transforms/Utils/PromoteMemToReg.h"
47 
48 using namespace llvm;
49 
50 #define DEBUG_TYPE "hwasan"
51 
52 static const char *const kHwasanModuleCtorName = "hwasan.module_ctor";
53 static const char *const kHwasanInitName = "__hwasan_init";
54 
55 static const char *const kHwasanShadowMemoryDynamicAddress =
56     "__hwasan_shadow_memory_dynamic_address";
57 
58 // Accesses sizes are powers of two: 1, 2, 4, 8, 16.
59 static const size_t kNumberOfAccessSizes = 5;
60 
61 static const size_t kDefaultShadowScale = 4;
62 static const uint64_t kDynamicShadowSentinel =
63     std::numeric_limits<uint64_t>::max();
64 static const unsigned kPointerTagShift = 56;
65 
66 static cl::opt<std::string> ClMemoryAccessCallbackPrefix(
67     "hwasan-memory-access-callback-prefix",
68     cl::desc("Prefix for memory access callbacks"), cl::Hidden,
69     cl::init("__hwasan_"));
70 
71 static cl::opt<bool>
72     ClInstrumentWithCalls("hwasan-instrument-with-calls",
73                 cl::desc("instrument reads and writes with callbacks"),
74                 cl::Hidden, cl::init(false));
75 
76 static cl::opt<bool> ClInstrumentReads("hwasan-instrument-reads",
77                                        cl::desc("instrument read instructions"),
78                                        cl::Hidden, cl::init(true));
79 
80 static cl::opt<bool> ClInstrumentWrites(
81     "hwasan-instrument-writes", cl::desc("instrument write instructions"),
82     cl::Hidden, cl::init(true));
83 
84 static cl::opt<bool> ClInstrumentAtomics(
85     "hwasan-instrument-atomics",
86     cl::desc("instrument atomic instructions (rmw, cmpxchg)"), cl::Hidden,
87     cl::init(true));
88 
89 static cl::opt<bool> ClRecover(
90     "hwasan-recover",
91     cl::desc("Enable recovery mode (continue-after-error)."),
92     cl::Hidden, cl::init(false));
93 
94 static cl::opt<bool> ClInstrumentStack("hwasan-instrument-stack",
95                                        cl::desc("instrument stack (allocas)"),
96                                        cl::Hidden, cl::init(true));
97 
98 static cl::opt<bool> ClUARRetagToZero(
99     "hwasan-uar-retag-to-zero",
100     cl::desc("Clear alloca tags before returning from the function to allow "
101              "non-instrumented and instrumented function calls mix. When set "
102              "to false, allocas are retagged before returning from the "
103              "function to detect use after return."),
104     cl::Hidden, cl::init(true));
105 
106 static cl::opt<bool> ClGenerateTagsWithCalls(
107     "hwasan-generate-tags-with-calls",
108     cl::desc("generate new tags with runtime library calls"), cl::Hidden,
109     cl::init(false));
110 
111 static cl::opt<int> ClMatchAllTag(
112     "hwasan-match-all-tag",
113     cl::desc("don't report bad accesses via pointers with this tag"),
114     cl::Hidden, cl::init(-1));
115 
116 static cl::opt<bool> ClEnableKhwasan(
117     "hwasan-kernel",
118     cl::desc("Enable KernelHWAddressSanitizer instrumentation"),
119     cl::Hidden, cl::init(false));
120 
121 // These flags allow to change the shadow mapping and control how shadow memory
122 // is accessed. The shadow mapping looks like:
123 //    Shadow = (Mem >> scale) + offset
124 
125 static cl::opt<unsigned long long> ClMappingOffset(
126     "hwasan-mapping-offset",
127     cl::desc("HWASan shadow mapping offset [EXPERIMENTAL]"), cl::Hidden,
128     cl::init(0));
129 
130 namespace {
131 
132 /// An instrumentation pass implementing detection of addressability bugs
133 /// using tagged pointers.
134 class HWAddressSanitizer : public FunctionPass {
135 public:
136   // Pass identification, replacement for typeid.
137   static char ID;
138 
HWAddressSanitizer(bool CompileKernel=false,bool Recover=false)139   explicit HWAddressSanitizer(bool CompileKernel = false, bool Recover = false)
140       : FunctionPass(ID) {
141     this->Recover = ClRecover.getNumOccurrences() > 0 ? ClRecover : Recover;
142     this->CompileKernel = ClEnableKhwasan.getNumOccurrences() > 0 ?
143         ClEnableKhwasan : CompileKernel;
144   }
145 
getPassName() const146   StringRef getPassName() const override { return "HWAddressSanitizer"; }
147 
148   bool runOnFunction(Function &F) override;
149   bool doInitialization(Module &M) override;
150 
151   void initializeCallbacks(Module &M);
152 
153   void maybeInsertDynamicShadowAtFunctionEntry(Function &F);
154 
155   void untagPointerOperand(Instruction *I, Value *Addr);
156   Value *memToShadow(Value *Shadow, Type *Ty, IRBuilder<> &IRB);
157   void instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
158                                  unsigned AccessSizeIndex,
159                                  Instruction *InsertBefore);
160   bool instrumentMemAccess(Instruction *I);
161   Value *isInterestingMemoryAccess(Instruction *I, bool *IsWrite,
162                                    uint64_t *TypeSize, unsigned *Alignment,
163                                    Value **MaybeMask);
164 
165   bool isInterestingAlloca(const AllocaInst &AI);
166   bool tagAlloca(IRBuilder<> &IRB, AllocaInst *AI, Value *Tag);
167   Value *tagPointer(IRBuilder<> &IRB, Type *Ty, Value *PtrLong, Value *Tag);
168   Value *untagPointer(IRBuilder<> &IRB, Value *PtrLong);
169   bool instrumentStack(SmallVectorImpl<AllocaInst *> &Allocas,
170                        SmallVectorImpl<Instruction *> &RetVec);
171   Value *getNextTagWithCall(IRBuilder<> &IRB);
172   Value *getStackBaseTag(IRBuilder<> &IRB);
173   Value *getAllocaTag(IRBuilder<> &IRB, Value *StackTag, AllocaInst *AI,
174                      unsigned AllocaNo);
175   Value *getUARTag(IRBuilder<> &IRB, Value *StackTag);
176 
177 private:
178   LLVMContext *C;
179   Triple TargetTriple;
180 
181   /// This struct defines the shadow mapping using the rule:
182   ///   shadow = (mem >> Scale) + Offset.
183   /// If InGlobal is true, then
184   ///   extern char __hwasan_shadow[];
185   ///   shadow = (mem >> Scale) + &__hwasan_shadow
186   struct ShadowMapping {
187     int Scale;
188     uint64_t Offset;
189     bool InGlobal;
190 
191     void init(Triple &TargetTriple);
getAllocaAlignment__anon466e5f120111::HWAddressSanitizer::ShadowMapping192     unsigned getAllocaAlignment() const { return 1U << Scale; }
193   };
194   ShadowMapping Mapping;
195 
196   Type *IntptrTy;
197   Type *Int8Ty;
198 
199   bool CompileKernel;
200   bool Recover;
201 
202   Function *HwasanCtorFunction;
203 
204   Function *HwasanMemoryAccessCallback[2][kNumberOfAccessSizes];
205   Function *HwasanMemoryAccessCallbackSized[2];
206 
207   Function *HwasanTagMemoryFunc;
208   Function *HwasanGenerateTagFunc;
209 
210   Constant *ShadowGlobal;
211 
212   Value *LocalDynamicShadow = nullptr;
213 };
214 
215 } // end anonymous namespace
216 
217 char HWAddressSanitizer::ID = 0;
218 
219 INITIALIZE_PASS_BEGIN(
220     HWAddressSanitizer, "hwasan",
221     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
222     false)
223 INITIALIZE_PASS_END(
224     HWAddressSanitizer, "hwasan",
225     "HWAddressSanitizer: detect memory bugs using tagged addressing.", false,
226     false)
227 
createHWAddressSanitizerPass(bool CompileKernel,bool Recover)228 FunctionPass *llvm::createHWAddressSanitizerPass(bool CompileKernel,
229                                                  bool Recover) {
230   assert(!CompileKernel || Recover);
231   return new HWAddressSanitizer(CompileKernel, Recover);
232 }
233 
234 /// Module-level initialization.
235 ///
236 /// inserts a call to __hwasan_init to the module's constructor list.
doInitialization(Module & M)237 bool HWAddressSanitizer::doInitialization(Module &M) {
238   LLVM_DEBUG(dbgs() << "Init " << M.getName() << "\n");
239   auto &DL = M.getDataLayout();
240 
241   TargetTriple = Triple(M.getTargetTriple());
242 
243   Mapping.init(TargetTriple);
244 
245   C = &(M.getContext());
246   IRBuilder<> IRB(*C);
247   IntptrTy = IRB.getIntPtrTy(DL);
248   Int8Ty = IRB.getInt8Ty();
249 
250   HwasanCtorFunction = nullptr;
251   if (!CompileKernel) {
252     std::tie(HwasanCtorFunction, std::ignore) =
253         createSanitizerCtorAndInitFunctions(M, kHwasanModuleCtorName,
254                                             kHwasanInitName,
255                                             /*InitArgTypes=*/{},
256                                             /*InitArgs=*/{});
257     appendToGlobalCtors(M, HwasanCtorFunction, 0);
258   }
259   return true;
260 }
261 
initializeCallbacks(Module & M)262 void HWAddressSanitizer::initializeCallbacks(Module &M) {
263   IRBuilder<> IRB(*C);
264   for (size_t AccessIsWrite = 0; AccessIsWrite <= 1; AccessIsWrite++) {
265     const std::string TypeStr = AccessIsWrite ? "store" : "load";
266     const std::string EndingStr = Recover ? "_noabort" : "";
267 
268     HwasanMemoryAccessCallbackSized[AccessIsWrite] =
269         checkSanitizerInterfaceFunction(M.getOrInsertFunction(
270             ClMemoryAccessCallbackPrefix + TypeStr + "N" + EndingStr,
271             FunctionType::get(IRB.getVoidTy(), {IntptrTy, IntptrTy}, false)));
272 
273     for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
274          AccessSizeIndex++) {
275       HwasanMemoryAccessCallback[AccessIsWrite][AccessSizeIndex] =
276           checkSanitizerInterfaceFunction(M.getOrInsertFunction(
277               ClMemoryAccessCallbackPrefix + TypeStr +
278                   itostr(1ULL << AccessSizeIndex) + EndingStr,
279               FunctionType::get(IRB.getVoidTy(), {IntptrTy}, false)));
280     }
281   }
282 
283   HwasanTagMemoryFunc = checkSanitizerInterfaceFunction(M.getOrInsertFunction(
284       "__hwasan_tag_memory", IRB.getVoidTy(), IntptrTy, Int8Ty, IntptrTy));
285   HwasanGenerateTagFunc = checkSanitizerInterfaceFunction(
286       M.getOrInsertFunction("__hwasan_generate_tag", Int8Ty));
287 
288   if (Mapping.InGlobal)
289     ShadowGlobal = M.getOrInsertGlobal("__hwasan_shadow",
290                                        ArrayType::get(IRB.getInt8Ty(), 0));
291 }
292 
maybeInsertDynamicShadowAtFunctionEntry(Function & F)293 void HWAddressSanitizer::maybeInsertDynamicShadowAtFunctionEntry(Function &F) {
294   // Generate code only when dynamic addressing is needed.
295   if (Mapping.Offset != kDynamicShadowSentinel)
296     return;
297 
298   IRBuilder<> IRB(&F.front().front());
299   if (Mapping.InGlobal) {
300     // An empty inline asm with input reg == output reg.
301     // An opaque pointer-to-int cast, basically.
302     InlineAsm *Asm = InlineAsm::get(
303         FunctionType::get(IntptrTy, {ShadowGlobal->getType()}, false),
304         StringRef(""), StringRef("=r,0"),
305         /*hasSideEffects=*/false);
306     LocalDynamicShadow = IRB.CreateCall(Asm, {ShadowGlobal}, ".hwasan.shadow");
307   } else {
308     Value *GlobalDynamicAddress = F.getParent()->getOrInsertGlobal(
309         kHwasanShadowMemoryDynamicAddress, IntptrTy);
310     LocalDynamicShadow = IRB.CreateLoad(GlobalDynamicAddress);
311   }
312 }
313 
isInterestingMemoryAccess(Instruction * I,bool * IsWrite,uint64_t * TypeSize,unsigned * Alignment,Value ** MaybeMask)314 Value *HWAddressSanitizer::isInterestingMemoryAccess(Instruction *I,
315                                                      bool *IsWrite,
316                                                      uint64_t *TypeSize,
317                                                      unsigned *Alignment,
318                                                      Value **MaybeMask) {
319   // Skip memory accesses inserted by another instrumentation.
320   if (I->getMetadata("nosanitize")) return nullptr;
321 
322   // Do not instrument the load fetching the dynamic shadow address.
323   if (LocalDynamicShadow == I)
324     return nullptr;
325 
326   Value *PtrOperand = nullptr;
327   const DataLayout &DL = I->getModule()->getDataLayout();
328   if (LoadInst *LI = dyn_cast<LoadInst>(I)) {
329     if (!ClInstrumentReads) return nullptr;
330     *IsWrite = false;
331     *TypeSize = DL.getTypeStoreSizeInBits(LI->getType());
332     *Alignment = LI->getAlignment();
333     PtrOperand = LI->getPointerOperand();
334   } else if (StoreInst *SI = dyn_cast<StoreInst>(I)) {
335     if (!ClInstrumentWrites) return nullptr;
336     *IsWrite = true;
337     *TypeSize = DL.getTypeStoreSizeInBits(SI->getValueOperand()->getType());
338     *Alignment = SI->getAlignment();
339     PtrOperand = SI->getPointerOperand();
340   } else if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I)) {
341     if (!ClInstrumentAtomics) return nullptr;
342     *IsWrite = true;
343     *TypeSize = DL.getTypeStoreSizeInBits(RMW->getValOperand()->getType());
344     *Alignment = 0;
345     PtrOperand = RMW->getPointerOperand();
346   } else if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I)) {
347     if (!ClInstrumentAtomics) return nullptr;
348     *IsWrite = true;
349     *TypeSize = DL.getTypeStoreSizeInBits(XCHG->getCompareOperand()->getType());
350     *Alignment = 0;
351     PtrOperand = XCHG->getPointerOperand();
352   }
353 
354   if (PtrOperand) {
355     // Do not instrument accesses from different address spaces; we cannot deal
356     // with them.
357     Type *PtrTy = cast<PointerType>(PtrOperand->getType()->getScalarType());
358     if (PtrTy->getPointerAddressSpace() != 0)
359       return nullptr;
360 
361     // Ignore swifterror addresses.
362     // swifterror memory addresses are mem2reg promoted by instruction
363     // selection. As such they cannot have regular uses like an instrumentation
364     // function and it makes no sense to track them as memory.
365     if (PtrOperand->isSwiftError())
366       return nullptr;
367   }
368 
369   return PtrOperand;
370 }
371 
getPointerOperandIndex(Instruction * I)372 static unsigned getPointerOperandIndex(Instruction *I) {
373   if (LoadInst *LI = dyn_cast<LoadInst>(I))
374     return LI->getPointerOperandIndex();
375   if (StoreInst *SI = dyn_cast<StoreInst>(I))
376     return SI->getPointerOperandIndex();
377   if (AtomicRMWInst *RMW = dyn_cast<AtomicRMWInst>(I))
378     return RMW->getPointerOperandIndex();
379   if (AtomicCmpXchgInst *XCHG = dyn_cast<AtomicCmpXchgInst>(I))
380     return XCHG->getPointerOperandIndex();
381   report_fatal_error("Unexpected instruction");
382   return -1;
383 }
384 
TypeSizeToSizeIndex(uint32_t TypeSize)385 static size_t TypeSizeToSizeIndex(uint32_t TypeSize) {
386   size_t Res = countTrailingZeros(TypeSize / 8);
387   assert(Res < kNumberOfAccessSizes);
388   return Res;
389 }
390 
untagPointerOperand(Instruction * I,Value * Addr)391 void HWAddressSanitizer::untagPointerOperand(Instruction *I, Value *Addr) {
392   if (TargetTriple.isAArch64())
393     return;
394 
395   IRBuilder<> IRB(I);
396   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
397   Value *UntaggedPtr =
398       IRB.CreateIntToPtr(untagPointer(IRB, AddrLong), Addr->getType());
399   I->setOperand(getPointerOperandIndex(I), UntaggedPtr);
400 }
401 
memToShadow(Value * Mem,Type * Ty,IRBuilder<> & IRB)402 Value *HWAddressSanitizer::memToShadow(Value *Mem, Type *Ty, IRBuilder<> &IRB) {
403   // Mem >> Scale
404   Value *Shadow = IRB.CreateLShr(Mem, Mapping.Scale);
405   if (Mapping.Offset == 0)
406     return Shadow;
407   // (Mem >> Scale) + Offset
408   Value *ShadowBase;
409   if (LocalDynamicShadow)
410     ShadowBase = LocalDynamicShadow;
411   else
412     ShadowBase = ConstantInt::get(Ty, Mapping.Offset);
413   return IRB.CreateAdd(Shadow, ShadowBase);
414 }
415 
instrumentMemAccessInline(Value * PtrLong,bool IsWrite,unsigned AccessSizeIndex,Instruction * InsertBefore)416 void HWAddressSanitizer::instrumentMemAccessInline(Value *PtrLong, bool IsWrite,
417                                                    unsigned AccessSizeIndex,
418                                                    Instruction *InsertBefore) {
419   IRBuilder<> IRB(InsertBefore);
420   Value *PtrTag = IRB.CreateTrunc(IRB.CreateLShr(PtrLong, kPointerTagShift),
421                                   IRB.getInt8Ty());
422   Value *AddrLong = untagPointer(IRB, PtrLong);
423   Value *ShadowLong = memToShadow(AddrLong, PtrLong->getType(), IRB);
424   Value *MemTag =
425       IRB.CreateLoad(IRB.CreateIntToPtr(ShadowLong, IRB.getInt8PtrTy()));
426   Value *TagMismatch = IRB.CreateICmpNE(PtrTag, MemTag);
427 
428   int matchAllTag = ClMatchAllTag.getNumOccurrences() > 0 ?
429       ClMatchAllTag : (CompileKernel ? 0xFF : -1);
430   if (matchAllTag != -1) {
431     Value *TagNotIgnored = IRB.CreateICmpNE(PtrTag,
432         ConstantInt::get(PtrTag->getType(), matchAllTag));
433     TagMismatch = IRB.CreateAnd(TagMismatch, TagNotIgnored);
434   }
435 
436   TerminatorInst *CheckTerm =
437       SplitBlockAndInsertIfThen(TagMismatch, InsertBefore, !Recover,
438                                 MDBuilder(*C).createBranchWeights(1, 100000));
439 
440   IRB.SetInsertPoint(CheckTerm);
441   const int64_t AccessInfo = Recover * 0x20 + IsWrite * 0x10 + AccessSizeIndex;
442   InlineAsm *Asm;
443   switch (TargetTriple.getArch()) {
444     case Triple::x86_64:
445       // The signal handler will find the data address in rdi.
446       Asm = InlineAsm::get(
447           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
448           "int3\nnopl " + itostr(0x40 + AccessInfo) + "(%rax)",
449           "{rdi}",
450           /*hasSideEffects=*/true);
451       break;
452     case Triple::aarch64:
453     case Triple::aarch64_be:
454       // The signal handler will find the data address in x0.
455       Asm = InlineAsm::get(
456           FunctionType::get(IRB.getVoidTy(), {PtrLong->getType()}, false),
457           "brk #" + itostr(0x900 + AccessInfo),
458           "{x0}",
459           /*hasSideEffects=*/true);
460       break;
461     default:
462       report_fatal_error("unsupported architecture");
463   }
464   IRB.CreateCall(Asm, PtrLong);
465 }
466 
instrumentMemAccess(Instruction * I)467 bool HWAddressSanitizer::instrumentMemAccess(Instruction *I) {
468   LLVM_DEBUG(dbgs() << "Instrumenting: " << *I << "\n");
469   bool IsWrite = false;
470   unsigned Alignment = 0;
471   uint64_t TypeSize = 0;
472   Value *MaybeMask = nullptr;
473   Value *Addr =
474       isInterestingMemoryAccess(I, &IsWrite, &TypeSize, &Alignment, &MaybeMask);
475 
476   if (!Addr)
477     return false;
478 
479   if (MaybeMask)
480     return false; //FIXME
481 
482   IRBuilder<> IRB(I);
483   Value *AddrLong = IRB.CreatePointerCast(Addr, IntptrTy);
484   if (isPowerOf2_64(TypeSize) &&
485       (TypeSize / 8 <= (1UL << (kNumberOfAccessSizes - 1))) &&
486       (Alignment >= (1UL << Mapping.Scale) || Alignment == 0 ||
487        Alignment >= TypeSize / 8)) {
488     size_t AccessSizeIndex = TypeSizeToSizeIndex(TypeSize);
489     if (ClInstrumentWithCalls) {
490       IRB.CreateCall(HwasanMemoryAccessCallback[IsWrite][AccessSizeIndex],
491                      AddrLong);
492     } else {
493       instrumentMemAccessInline(AddrLong, IsWrite, AccessSizeIndex, I);
494     }
495   } else {
496     IRB.CreateCall(HwasanMemoryAccessCallbackSized[IsWrite],
497                    {AddrLong, ConstantInt::get(IntptrTy, TypeSize / 8)});
498   }
499   untagPointerOperand(I, Addr);
500 
501   return true;
502 }
503 
getAllocaSizeInBytes(const AllocaInst & AI)504 static uint64_t getAllocaSizeInBytes(const AllocaInst &AI) {
505   uint64_t ArraySize = 1;
506   if (AI.isArrayAllocation()) {
507     const ConstantInt *CI = dyn_cast<ConstantInt>(AI.getArraySize());
508     assert(CI && "non-constant array size");
509     ArraySize = CI->getZExtValue();
510   }
511   Type *Ty = AI.getAllocatedType();
512   uint64_t SizeInBytes = AI.getModule()->getDataLayout().getTypeAllocSize(Ty);
513   return SizeInBytes * ArraySize;
514 }
515 
tagAlloca(IRBuilder<> & IRB,AllocaInst * AI,Value * Tag)516 bool HWAddressSanitizer::tagAlloca(IRBuilder<> &IRB, AllocaInst *AI,
517                                    Value *Tag) {
518   size_t Size = (getAllocaSizeInBytes(*AI) + Mapping.getAllocaAlignment() - 1) &
519                 ~(Mapping.getAllocaAlignment() - 1);
520 
521   Value *JustTag = IRB.CreateTrunc(Tag, IRB.getInt8Ty());
522   if (ClInstrumentWithCalls) {
523     IRB.CreateCall(HwasanTagMemoryFunc,
524                    {IRB.CreatePointerCast(AI, IntptrTy), JustTag,
525                     ConstantInt::get(IntptrTy, Size)});
526   } else {
527     size_t ShadowSize = Size >> Mapping.Scale;
528     Value *ShadowPtr = IRB.CreateIntToPtr(
529         memToShadow(IRB.CreatePointerCast(AI, IntptrTy), AI->getType(), IRB),
530         IRB.getInt8PtrTy());
531     // If this memset is not inlined, it will be intercepted in the hwasan
532     // runtime library. That's OK, because the interceptor skips the checks if
533     // the address is in the shadow region.
534     // FIXME: the interceptor is not as fast as real memset. Consider lowering
535     // llvm.memset right here into either a sequence of stores, or a call to
536     // hwasan_tag_memory.
537     IRB.CreateMemSet(ShadowPtr, JustTag, ShadowSize, /*Align=*/1);
538   }
539   return true;
540 }
541 
RetagMask(unsigned AllocaNo)542 static unsigned RetagMask(unsigned AllocaNo) {
543   // A list of 8-bit numbers that have at most one run of non-zero bits.
544   // x = x ^ (mask << 56) can be encoded as a single armv8 instruction for these
545   // masks.
546   // The list does not include the value 255, which is used for UAR.
547   static unsigned FastMasks[] = {
548       0,   1,   2,   3,   4,   6,   7,   8,   12,  14,  15, 16,  24,
549       28,  30,  31,  32,  48,  56,  60,  62,  63,  64,  96, 112, 120,
550       124, 126, 127, 128, 192, 224, 240, 248, 252, 254};
551   return FastMasks[AllocaNo % (sizeof(FastMasks) / sizeof(FastMasks[0]))];
552 }
553 
getNextTagWithCall(IRBuilder<> & IRB)554 Value *HWAddressSanitizer::getNextTagWithCall(IRBuilder<> &IRB) {
555   return IRB.CreateZExt(IRB.CreateCall(HwasanGenerateTagFunc), IntptrTy);
556 }
557 
getStackBaseTag(IRBuilder<> & IRB)558 Value *HWAddressSanitizer::getStackBaseTag(IRBuilder<> &IRB) {
559   if (ClGenerateTagsWithCalls)
560     return nullptr;
561   // FIXME: use addressofreturnaddress (but implement it in aarch64 backend
562   // first).
563   Module *M = IRB.GetInsertBlock()->getParent()->getParent();
564   auto GetStackPointerFn =
565       Intrinsic::getDeclaration(M, Intrinsic::frameaddress);
566   Value *StackPointer = IRB.CreateCall(
567       GetStackPointerFn, {Constant::getNullValue(IRB.getInt32Ty())});
568 
569   // Extract some entropy from the stack pointer for the tags.
570   // Take bits 20..28 (ASLR entropy) and xor with bits 0..8 (these differ
571   // between functions).
572   Value *StackPointerLong = IRB.CreatePointerCast(StackPointer, IntptrTy);
573   Value *StackTag =
574       IRB.CreateXor(StackPointerLong, IRB.CreateLShr(StackPointerLong, 20),
575                     "hwasan.stack.base.tag");
576   return StackTag;
577 }
578 
getAllocaTag(IRBuilder<> & IRB,Value * StackTag,AllocaInst * AI,unsigned AllocaNo)579 Value *HWAddressSanitizer::getAllocaTag(IRBuilder<> &IRB, Value *StackTag,
580                                         AllocaInst *AI, unsigned AllocaNo) {
581   if (ClGenerateTagsWithCalls)
582     return getNextTagWithCall(IRB);
583   return IRB.CreateXor(StackTag,
584                        ConstantInt::get(IntptrTy, RetagMask(AllocaNo)));
585 }
586 
getUARTag(IRBuilder<> & IRB,Value * StackTag)587 Value *HWAddressSanitizer::getUARTag(IRBuilder<> &IRB, Value *StackTag) {
588   if (ClUARRetagToZero)
589     return ConstantInt::get(IntptrTy, 0);
590   if (ClGenerateTagsWithCalls)
591     return getNextTagWithCall(IRB);
592   return IRB.CreateXor(StackTag, ConstantInt::get(IntptrTy, 0xFFU));
593 }
594 
595 // Add a tag to an address.
tagPointer(IRBuilder<> & IRB,Type * Ty,Value * PtrLong,Value * Tag)596 Value *HWAddressSanitizer::tagPointer(IRBuilder<> &IRB, Type *Ty,
597                                       Value *PtrLong, Value *Tag) {
598   Value *TaggedPtrLong;
599   if (CompileKernel) {
600     // Kernel addresses have 0xFF in the most significant byte.
601     Value *ShiftedTag = IRB.CreateOr(
602         IRB.CreateShl(Tag, kPointerTagShift),
603         ConstantInt::get(IntptrTy, (1ULL << kPointerTagShift) - 1));
604     TaggedPtrLong = IRB.CreateAnd(PtrLong, ShiftedTag);
605   } else {
606     // Userspace can simply do OR (tag << 56);
607     Value *ShiftedTag = IRB.CreateShl(Tag, kPointerTagShift);
608     TaggedPtrLong = IRB.CreateOr(PtrLong, ShiftedTag);
609   }
610   return IRB.CreateIntToPtr(TaggedPtrLong, Ty);
611 }
612 
613 // Remove tag from an address.
untagPointer(IRBuilder<> & IRB,Value * PtrLong)614 Value *HWAddressSanitizer::untagPointer(IRBuilder<> &IRB, Value *PtrLong) {
615   Value *UntaggedPtrLong;
616   if (CompileKernel) {
617     // Kernel addresses have 0xFF in the most significant byte.
618     UntaggedPtrLong = IRB.CreateOr(PtrLong,
619         ConstantInt::get(PtrLong->getType(), 0xFFULL << kPointerTagShift));
620   } else {
621     // Userspace addresses have 0x00.
622     UntaggedPtrLong = IRB.CreateAnd(PtrLong,
623         ConstantInt::get(PtrLong->getType(), ~(0xFFULL << kPointerTagShift)));
624   }
625   return UntaggedPtrLong;
626 }
627 
instrumentStack(SmallVectorImpl<AllocaInst * > & Allocas,SmallVectorImpl<Instruction * > & RetVec)628 bool HWAddressSanitizer::instrumentStack(
629     SmallVectorImpl<AllocaInst *> &Allocas,
630     SmallVectorImpl<Instruction *> &RetVec) {
631   Function *F = Allocas[0]->getParent()->getParent();
632   Instruction *InsertPt = &*F->getEntryBlock().begin();
633   IRBuilder<> IRB(InsertPt);
634 
635   Value *StackTag = getStackBaseTag(IRB);
636 
637   // Ideally, we want to calculate tagged stack base pointer, and rewrite all
638   // alloca addresses using that. Unfortunately, offsets are not known yet
639   // (unless we use ASan-style mega-alloca). Instead we keep the base tag in a
640   // temp, shift-OR it into each alloca address and xor with the retag mask.
641   // This generates one extra instruction per alloca use.
642   for (unsigned N = 0; N < Allocas.size(); ++N) {
643     auto *AI = Allocas[N];
644     IRB.SetInsertPoint(AI->getNextNode());
645 
646     // Replace uses of the alloca with tagged address.
647     Value *Tag = getAllocaTag(IRB, StackTag, AI, N);
648     Value *AILong = IRB.CreatePointerCast(AI, IntptrTy);
649     Value *Replacement = tagPointer(IRB, AI->getType(), AILong, Tag);
650     std::string Name =
651         AI->hasName() ? AI->getName().str() : "alloca." + itostr(N);
652     Replacement->setName(Name + ".hwasan");
653 
654     for (auto UI = AI->use_begin(), UE = AI->use_end(); UI != UE;) {
655       Use &U = *UI++;
656       if (U.getUser() != AILong)
657         U.set(Replacement);
658     }
659 
660     tagAlloca(IRB, AI, Tag);
661 
662     for (auto RI : RetVec) {
663       IRB.SetInsertPoint(RI);
664 
665       // Re-tag alloca memory with the special UAR tag.
666       Value *Tag = getUARTag(IRB, StackTag);
667       tagAlloca(IRB, AI, Tag);
668     }
669   }
670 
671   return true;
672 }
673 
isInterestingAlloca(const AllocaInst & AI)674 bool HWAddressSanitizer::isInterestingAlloca(const AllocaInst &AI) {
675   return (AI.getAllocatedType()->isSized() &&
676           // FIXME: instrument dynamic allocas, too
677           AI.isStaticAlloca() &&
678           // alloca() may be called with 0 size, ignore it.
679           getAllocaSizeInBytes(AI) > 0 &&
680           // We are only interested in allocas not promotable to registers.
681           // Promotable allocas are common under -O0.
682           !isAllocaPromotable(&AI) &&
683           // inalloca allocas are not treated as static, and we don't want
684           // dynamic alloca instrumentation for them as well.
685           !AI.isUsedWithInAlloca() &&
686           // swifterror allocas are register promoted by ISel
687           !AI.isSwiftError());
688 }
689 
runOnFunction(Function & F)690 bool HWAddressSanitizer::runOnFunction(Function &F) {
691   if (&F == HwasanCtorFunction)
692     return false;
693 
694   if (!F.hasFnAttribute(Attribute::SanitizeHWAddress))
695     return false;
696 
697   LLVM_DEBUG(dbgs() << "Function: " << F.getName() << "\n");
698 
699   initializeCallbacks(*F.getParent());
700 
701   assert(!LocalDynamicShadow);
702   maybeInsertDynamicShadowAtFunctionEntry(F);
703 
704   bool Changed = false;
705   SmallVector<Instruction*, 16> ToInstrument;
706   SmallVector<AllocaInst*, 8> AllocasToInstrument;
707   SmallVector<Instruction*, 8> RetVec;
708   for (auto &BB : F) {
709     for (auto &Inst : BB) {
710       if (ClInstrumentStack)
711         if (AllocaInst *AI = dyn_cast<AllocaInst>(&Inst)) {
712           // Realign all allocas. We don't want small uninteresting allocas to
713           // hide in instrumented alloca's padding.
714           if (AI->getAlignment() < Mapping.getAllocaAlignment())
715             AI->setAlignment(Mapping.getAllocaAlignment());
716           // Instrument some of them.
717           if (isInterestingAlloca(*AI))
718             AllocasToInstrument.push_back(AI);
719           continue;
720         }
721 
722       if (isa<ReturnInst>(Inst) || isa<ResumeInst>(Inst) ||
723           isa<CleanupReturnInst>(Inst))
724         RetVec.push_back(&Inst);
725 
726       Value *MaybeMask = nullptr;
727       bool IsWrite;
728       unsigned Alignment;
729       uint64_t TypeSize;
730       Value *Addr = isInterestingMemoryAccess(&Inst, &IsWrite, &TypeSize,
731                                               &Alignment, &MaybeMask);
732       if (Addr || isa<MemIntrinsic>(Inst))
733         ToInstrument.push_back(&Inst);
734     }
735   }
736 
737   if (!AllocasToInstrument.empty())
738     Changed |= instrumentStack(AllocasToInstrument, RetVec);
739 
740   for (auto Inst : ToInstrument)
741     Changed |= instrumentMemAccess(Inst);
742 
743   LocalDynamicShadow = nullptr;
744 
745   return Changed;
746 }
747 
init(Triple & TargetTriple)748 void HWAddressSanitizer::ShadowMapping::init(Triple &TargetTriple) {
749   const bool IsAndroid = TargetTriple.isAndroid();
750   const bool IsAndroidWithIfuncSupport =
751       IsAndroid && !TargetTriple.isAndroidVersionLT(21);
752 
753   Scale = kDefaultShadowScale;
754 
755   if (ClEnableKhwasan || ClInstrumentWithCalls || !IsAndroidWithIfuncSupport)
756     Offset = 0;
757   else
758     Offset = kDynamicShadowSentinel;
759   if (ClMappingOffset.getNumOccurrences() > 0)
760     Offset = ClMappingOffset;
761 
762   InGlobal = IsAndroidWithIfuncSupport;
763 }
764