1 //===-- DataFlowSanitizer.cpp - dynamic data flow analysis ----------------===//
2 //
3 // The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 /// This file is a part of DataFlowSanitizer, a generalised dynamic data flow
11 /// analysis.
12 ///
13 /// Unlike other Sanitizer tools, this tool is not designed to detect a specific
14 /// class of bugs on its own. Instead, it provides a generic dynamic data flow
15 /// analysis framework to be used by clients to help detect application-specific
16 /// issues within their own code.
17 ///
18 /// The analysis is based on automatic propagation of data flow labels (also
19 /// known as taint labels) through a program as it performs computation. Each
20 /// byte of application memory is backed by two bytes of shadow memory which
21 /// hold the label. On Linux/x86_64, memory is laid out as follows:
22 ///
23 /// +--------------------+ 0x800000000000 (top of memory)
24 /// | application memory |
25 /// +--------------------+ 0x700000008000 (kAppAddr)
26 /// | |
27 /// | unused |
28 /// | |
29 /// +--------------------+ 0x200200000000 (kUnusedAddr)
30 /// | union table |
31 /// +--------------------+ 0x200000000000 (kUnionTableAddr)
32 /// | shadow memory |
33 /// +--------------------+ 0x000000010000 (kShadowAddr)
34 /// | reserved by kernel |
35 /// +--------------------+ 0x000000000000
36 ///
37 /// To derive a shadow memory address from an application memory address,
38 /// bits 44-46 are cleared to bring the address into the range
39 /// [0x000000008000,0x100000000000). Then the address is shifted left by 1 to
40 /// account for the double byte representation of shadow labels and move the
41 /// address into the shadow memory range. See the function
42 /// DataFlowSanitizer::getShadowAddress below.
43 ///
44 /// For more information, please refer to the design document:
45 /// http://clang.llvm.org/docs/DataFlowSanitizerDesign.html
46
47 #include "llvm/Transforms/Instrumentation.h"
48 #include "llvm/ADT/DenseMap.h"
49 #include "llvm/ADT/DenseSet.h"
50 #include "llvm/ADT/DepthFirstIterator.h"
51 #include "llvm/ADT/StringExtras.h"
52 #include "llvm/ADT/Triple.h"
53 #include "llvm/Analysis/ValueTracking.h"
54 #include "llvm/IR/Dominators.h"
55 #include "llvm/IR/DebugInfo.h"
56 #include "llvm/IR/IRBuilder.h"
57 #include "llvm/IR/InlineAsm.h"
58 #include "llvm/IR/InstVisitor.h"
59 #include "llvm/IR/LLVMContext.h"
60 #include "llvm/IR/MDBuilder.h"
61 #include "llvm/IR/Type.h"
62 #include "llvm/IR/Value.h"
63 #include "llvm/Pass.h"
64 #include "llvm/Support/CommandLine.h"
65 #include "llvm/Support/SpecialCaseList.h"
66 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
67 #include "llvm/Transforms/Utils/Local.h"
68 #include <algorithm>
69 #include <iterator>
70 #include <set>
71 #include <utility>
72
73 using namespace llvm;
74
75 // External symbol to be used when generating the shadow address for
76 // architectures with multiple VMAs. Instead of using a constant integer
77 // the runtime will set the external mask based on the VMA range.
78 static const char *const kDFSanExternShadowPtrMask = "__dfsan_shadow_ptr_mask";
79
80 // The -dfsan-preserve-alignment flag controls whether this pass assumes that
81 // alignment requirements provided by the input IR are correct. For example,
82 // if the input IR contains a load with alignment 8, this flag will cause
83 // the shadow load to have alignment 16. This flag is disabled by default as
84 // we have unfortunately encountered too much code (including Clang itself;
85 // see PR14291) which performs misaligned access.
86 static cl::opt<bool> ClPreserveAlignment(
87 "dfsan-preserve-alignment",
88 cl::desc("respect alignment requirements provided by input IR"), cl::Hidden,
89 cl::init(false));
90
91 // The ABI list files control how shadow parameters are passed. The pass treats
92 // every function labelled "uninstrumented" in the ABI list file as conforming
93 // to the "native" (i.e. unsanitized) ABI. Unless the ABI list contains
94 // additional annotations for those functions, a call to one of those functions
95 // will produce a warning message, as the labelling behaviour of the function is
96 // unknown. The other supported annotations are "functional" and "discard",
97 // which are described below under DataFlowSanitizer::WrapperKind.
98 static cl::list<std::string> ClABIListFiles(
99 "dfsan-abilist",
100 cl::desc("File listing native ABI functions and how the pass treats them"),
101 cl::Hidden);
102
103 // Controls whether the pass uses IA_Args or IA_TLS as the ABI for instrumented
104 // functions (see DataFlowSanitizer::InstrumentedABI below).
105 static cl::opt<bool> ClArgsABI(
106 "dfsan-args-abi",
107 cl::desc("Use the argument ABI rather than the TLS ABI"),
108 cl::Hidden);
109
110 // Controls whether the pass includes or ignores the labels of pointers in load
111 // instructions.
112 static cl::opt<bool> ClCombinePointerLabelsOnLoad(
113 "dfsan-combine-pointer-labels-on-load",
114 cl::desc("Combine the label of the pointer with the label of the data when "
115 "loading from memory."),
116 cl::Hidden, cl::init(true));
117
118 // Controls whether the pass includes or ignores the labels of pointers in
119 // stores instructions.
120 static cl::opt<bool> ClCombinePointerLabelsOnStore(
121 "dfsan-combine-pointer-labels-on-store",
122 cl::desc("Combine the label of the pointer with the label of the data when "
123 "storing in memory."),
124 cl::Hidden, cl::init(false));
125
126 static cl::opt<bool> ClDebugNonzeroLabels(
127 "dfsan-debug-nonzero-labels",
128 cl::desc("Insert calls to __dfsan_nonzero_label on observing a parameter, "
129 "load or return with a nonzero label"),
130 cl::Hidden);
131
132
133 namespace {
134
GetGlobalTypeString(const GlobalValue & G)135 StringRef GetGlobalTypeString(const GlobalValue &G) {
136 // Types of GlobalVariables are always pointer types.
137 Type *GType = G.getValueType();
138 // For now we support blacklisting struct types only.
139 if (StructType *SGType = dyn_cast<StructType>(GType)) {
140 if (!SGType->isLiteral())
141 return SGType->getName();
142 }
143 return "<unknown type>";
144 }
145
146 class DFSanABIList {
147 std::unique_ptr<SpecialCaseList> SCL;
148
149 public:
DFSanABIList()150 DFSanABIList() {}
151
set(std::unique_ptr<SpecialCaseList> List)152 void set(std::unique_ptr<SpecialCaseList> List) { SCL = std::move(List); }
153
154 /// Returns whether either this function or its source file are listed in the
155 /// given category.
isIn(const Function & F,StringRef Category) const156 bool isIn(const Function &F, StringRef Category) const {
157 return isIn(*F.getParent(), Category) ||
158 SCL->inSection("fun", F.getName(), Category);
159 }
160
161 /// Returns whether this global alias is listed in the given category.
162 ///
163 /// If GA aliases a function, the alias's name is matched as a function name
164 /// would be. Similarly, aliases of globals are matched like globals.
isIn(const GlobalAlias & GA,StringRef Category) const165 bool isIn(const GlobalAlias &GA, StringRef Category) const {
166 if (isIn(*GA.getParent(), Category))
167 return true;
168
169 if (isa<FunctionType>(GA.getValueType()))
170 return SCL->inSection("fun", GA.getName(), Category);
171
172 return SCL->inSection("global", GA.getName(), Category) ||
173 SCL->inSection("type", GetGlobalTypeString(GA), Category);
174 }
175
176 /// Returns whether this module is listed in the given category.
isIn(const Module & M,StringRef Category) const177 bool isIn(const Module &M, StringRef Category) const {
178 return SCL->inSection("src", M.getModuleIdentifier(), Category);
179 }
180 };
181
182 class DataFlowSanitizer : public ModulePass {
183 friend struct DFSanFunction;
184 friend class DFSanVisitor;
185
186 enum {
187 ShadowWidth = 16
188 };
189
190 /// Which ABI should be used for instrumented functions?
191 enum InstrumentedABI {
192 /// Argument and return value labels are passed through additional
193 /// arguments and by modifying the return type.
194 IA_Args,
195
196 /// Argument and return value labels are passed through TLS variables
197 /// __dfsan_arg_tls and __dfsan_retval_tls.
198 IA_TLS
199 };
200
201 /// How should calls to uninstrumented functions be handled?
202 enum WrapperKind {
203 /// This function is present in an uninstrumented form but we don't know
204 /// how it should be handled. Print a warning and call the function anyway.
205 /// Don't label the return value.
206 WK_Warning,
207
208 /// This function does not write to (user-accessible) memory, and its return
209 /// value is unlabelled.
210 WK_Discard,
211
212 /// This function does not write to (user-accessible) memory, and the label
213 /// of its return value is the union of the label of its arguments.
214 WK_Functional,
215
216 /// Instead of calling the function, a custom wrapper __dfsw_F is called,
217 /// where F is the name of the function. This function may wrap the
218 /// original function or provide its own implementation. This is similar to
219 /// the IA_Args ABI, except that IA_Args uses a struct return type to
220 /// pass the return value shadow in a register, while WK_Custom uses an
221 /// extra pointer argument to return the shadow. This allows the wrapped
222 /// form of the function type to be expressed in C.
223 WK_Custom
224 };
225
226 Module *Mod;
227 LLVMContext *Ctx;
228 IntegerType *ShadowTy;
229 PointerType *ShadowPtrTy;
230 IntegerType *IntptrTy;
231 ConstantInt *ZeroShadow;
232 ConstantInt *ShadowPtrMask;
233 ConstantInt *ShadowPtrMul;
234 Constant *ArgTLS;
235 Constant *RetvalTLS;
236 void *(*GetArgTLSPtr)();
237 void *(*GetRetvalTLSPtr)();
238 Constant *GetArgTLS;
239 Constant *GetRetvalTLS;
240 Constant *ExternalShadowMask;
241 FunctionType *DFSanUnionFnTy;
242 FunctionType *DFSanUnionLoadFnTy;
243 FunctionType *DFSanUnimplementedFnTy;
244 FunctionType *DFSanSetLabelFnTy;
245 FunctionType *DFSanNonzeroLabelFnTy;
246 FunctionType *DFSanVarargWrapperFnTy;
247 Constant *DFSanUnionFn;
248 Constant *DFSanCheckedUnionFn;
249 Constant *DFSanUnionLoadFn;
250 Constant *DFSanUnimplementedFn;
251 Constant *DFSanSetLabelFn;
252 Constant *DFSanNonzeroLabelFn;
253 Constant *DFSanVarargWrapperFn;
254 MDNode *ColdCallWeights;
255 DFSanABIList ABIList;
256 DenseMap<Value *, Function *> UnwrappedFnMap;
257 AttributeSet ReadOnlyNoneAttrs;
258 bool DFSanRuntimeShadowMask;
259
260 Value *getShadowAddress(Value *Addr, Instruction *Pos);
261 bool isInstrumented(const Function *F);
262 bool isInstrumented(const GlobalAlias *GA);
263 FunctionType *getArgsFunctionType(FunctionType *T);
264 FunctionType *getTrampolineFunctionType(FunctionType *T);
265 FunctionType *getCustomFunctionType(FunctionType *T);
266 InstrumentedABI getInstrumentedABI();
267 WrapperKind getWrapperKind(Function *F);
268 void addGlobalNamePrefix(GlobalValue *GV);
269 Function *buildWrapperFunction(Function *F, StringRef NewFName,
270 GlobalValue::LinkageTypes NewFLink,
271 FunctionType *NewFT);
272 Constant *getOrBuildTrampolineFunction(FunctionType *FT, StringRef FName);
273
274 public:
275 DataFlowSanitizer(
276 const std::vector<std::string> &ABIListFiles = std::vector<std::string>(),
277 void *(*getArgTLS)() = nullptr, void *(*getRetValTLS)() = nullptr);
278 static char ID;
279 bool doInitialization(Module &M) override;
280 bool runOnModule(Module &M) override;
281 };
282
283 struct DFSanFunction {
284 DataFlowSanitizer &DFS;
285 Function *F;
286 DominatorTree DT;
287 DataFlowSanitizer::InstrumentedABI IA;
288 bool IsNativeABI;
289 Value *ArgTLSPtr;
290 Value *RetvalTLSPtr;
291 AllocaInst *LabelReturnAlloca;
292 DenseMap<Value *, Value *> ValShadowMap;
293 DenseMap<AllocaInst *, AllocaInst *> AllocaShadowMap;
294 std::vector<std::pair<PHINode *, PHINode *> > PHIFixups;
295 DenseSet<Instruction *> SkipInsts;
296 std::vector<Value *> NonZeroChecks;
297 bool AvoidNewBlocks;
298
299 struct CachedCombinedShadow {
300 BasicBlock *Block;
301 Value *Shadow;
302 };
303 DenseMap<std::pair<Value *, Value *>, CachedCombinedShadow>
304 CachedCombinedShadows;
305 DenseMap<Value *, std::set<Value *>> ShadowElements;
306
DFSanFunction__anon267cbafd0111::DFSanFunction307 DFSanFunction(DataFlowSanitizer &DFS, Function *F, bool IsNativeABI)
308 : DFS(DFS), F(F), IA(DFS.getInstrumentedABI()),
309 IsNativeABI(IsNativeABI), ArgTLSPtr(nullptr), RetvalTLSPtr(nullptr),
310 LabelReturnAlloca(nullptr) {
311 DT.recalculate(*F);
312 // FIXME: Need to track down the register allocator issue which causes poor
313 // performance in pathological cases with large numbers of basic blocks.
314 AvoidNewBlocks = F->size() > 1000;
315 }
316 Value *getArgTLSPtr();
317 Value *getArgTLS(unsigned Index, Instruction *Pos);
318 Value *getRetvalTLS();
319 Value *getShadow(Value *V);
320 void setShadow(Instruction *I, Value *Shadow);
321 Value *combineShadows(Value *V1, Value *V2, Instruction *Pos);
322 Value *combineOperandShadows(Instruction *Inst);
323 Value *loadShadow(Value *ShadowAddr, uint64_t Size, uint64_t Align,
324 Instruction *Pos);
325 void storeShadow(Value *Addr, uint64_t Size, uint64_t Align, Value *Shadow,
326 Instruction *Pos);
327 };
328
329 class DFSanVisitor : public InstVisitor<DFSanVisitor> {
330 public:
331 DFSanFunction &DFSF;
DFSanVisitor(DFSanFunction & DFSF)332 DFSanVisitor(DFSanFunction &DFSF) : DFSF(DFSF) {}
333
334 void visitOperandShadowInst(Instruction &I);
335
336 void visitBinaryOperator(BinaryOperator &BO);
337 void visitCastInst(CastInst &CI);
338 void visitCmpInst(CmpInst &CI);
339 void visitGetElementPtrInst(GetElementPtrInst &GEPI);
340 void visitLoadInst(LoadInst &LI);
341 void visitStoreInst(StoreInst &SI);
342 void visitReturnInst(ReturnInst &RI);
343 void visitCallSite(CallSite CS);
344 void visitPHINode(PHINode &PN);
345 void visitExtractElementInst(ExtractElementInst &I);
346 void visitInsertElementInst(InsertElementInst &I);
347 void visitShuffleVectorInst(ShuffleVectorInst &I);
348 void visitExtractValueInst(ExtractValueInst &I);
349 void visitInsertValueInst(InsertValueInst &I);
350 void visitAllocaInst(AllocaInst &I);
351 void visitSelectInst(SelectInst &I);
352 void visitMemSetInst(MemSetInst &I);
353 void visitMemTransferInst(MemTransferInst &I);
354 };
355
356 }
357
358 char DataFlowSanitizer::ID;
359 INITIALIZE_PASS(DataFlowSanitizer, "dfsan",
360 "DataFlowSanitizer: dynamic data flow analysis.", false, false)
361
362 ModulePass *
createDataFlowSanitizerPass(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())363 llvm::createDataFlowSanitizerPass(const std::vector<std::string> &ABIListFiles,
364 void *(*getArgTLS)(),
365 void *(*getRetValTLS)()) {
366 return new DataFlowSanitizer(ABIListFiles, getArgTLS, getRetValTLS);
367 }
368
DataFlowSanitizer(const std::vector<std::string> & ABIListFiles,void * (* getArgTLS)(),void * (* getRetValTLS)())369 DataFlowSanitizer::DataFlowSanitizer(
370 const std::vector<std::string> &ABIListFiles, void *(*getArgTLS)(),
371 void *(*getRetValTLS)())
372 : ModulePass(ID), GetArgTLSPtr(getArgTLS), GetRetvalTLSPtr(getRetValTLS),
373 DFSanRuntimeShadowMask(false) {
374 std::vector<std::string> AllABIListFiles(std::move(ABIListFiles));
375 AllABIListFiles.insert(AllABIListFiles.end(), ClABIListFiles.begin(),
376 ClABIListFiles.end());
377 ABIList.set(SpecialCaseList::createOrDie(AllABIListFiles));
378 }
379
getArgsFunctionType(FunctionType * T)380 FunctionType *DataFlowSanitizer::getArgsFunctionType(FunctionType *T) {
381 llvm::SmallVector<Type *, 4> ArgTypes(T->param_begin(), T->param_end());
382 ArgTypes.append(T->getNumParams(), ShadowTy);
383 if (T->isVarArg())
384 ArgTypes.push_back(ShadowPtrTy);
385 Type *RetType = T->getReturnType();
386 if (!RetType->isVoidTy())
387 RetType = StructType::get(RetType, ShadowTy, (Type *)nullptr);
388 return FunctionType::get(RetType, ArgTypes, T->isVarArg());
389 }
390
getTrampolineFunctionType(FunctionType * T)391 FunctionType *DataFlowSanitizer::getTrampolineFunctionType(FunctionType *T) {
392 assert(!T->isVarArg());
393 llvm::SmallVector<Type *, 4> ArgTypes;
394 ArgTypes.push_back(T->getPointerTo());
395 ArgTypes.append(T->param_begin(), T->param_end());
396 ArgTypes.append(T->getNumParams(), ShadowTy);
397 Type *RetType = T->getReturnType();
398 if (!RetType->isVoidTy())
399 ArgTypes.push_back(ShadowPtrTy);
400 return FunctionType::get(T->getReturnType(), ArgTypes, false);
401 }
402
getCustomFunctionType(FunctionType * T)403 FunctionType *DataFlowSanitizer::getCustomFunctionType(FunctionType *T) {
404 llvm::SmallVector<Type *, 4> ArgTypes;
405 for (FunctionType::param_iterator i = T->param_begin(), e = T->param_end();
406 i != e; ++i) {
407 FunctionType *FT;
408 if (isa<PointerType>(*i) && (FT = dyn_cast<FunctionType>(cast<PointerType>(
409 *i)->getElementType()))) {
410 ArgTypes.push_back(getTrampolineFunctionType(FT)->getPointerTo());
411 ArgTypes.push_back(Type::getInt8PtrTy(*Ctx));
412 } else {
413 ArgTypes.push_back(*i);
414 }
415 }
416 for (unsigned i = 0, e = T->getNumParams(); i != e; ++i)
417 ArgTypes.push_back(ShadowTy);
418 if (T->isVarArg())
419 ArgTypes.push_back(ShadowPtrTy);
420 Type *RetType = T->getReturnType();
421 if (!RetType->isVoidTy())
422 ArgTypes.push_back(ShadowPtrTy);
423 return FunctionType::get(T->getReturnType(), ArgTypes, T->isVarArg());
424 }
425
doInitialization(Module & M)426 bool DataFlowSanitizer::doInitialization(Module &M) {
427 llvm::Triple TargetTriple(M.getTargetTriple());
428 bool IsX86_64 = TargetTriple.getArch() == llvm::Triple::x86_64;
429 bool IsMIPS64 = TargetTriple.getArch() == llvm::Triple::mips64 ||
430 TargetTriple.getArch() == llvm::Triple::mips64el;
431 bool IsAArch64 = TargetTriple.getArch() == llvm::Triple::aarch64 ||
432 TargetTriple.getArch() == llvm::Triple::aarch64_be;
433
434 const DataLayout &DL = M.getDataLayout();
435
436 Mod = &M;
437 Ctx = &M.getContext();
438 ShadowTy = IntegerType::get(*Ctx, ShadowWidth);
439 ShadowPtrTy = PointerType::getUnqual(ShadowTy);
440 IntptrTy = DL.getIntPtrType(*Ctx);
441 ZeroShadow = ConstantInt::getSigned(ShadowTy, 0);
442 ShadowPtrMul = ConstantInt::getSigned(IntptrTy, ShadowWidth / 8);
443 if (IsX86_64)
444 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0x700000000000LL);
445 else if (IsMIPS64)
446 ShadowPtrMask = ConstantInt::getSigned(IntptrTy, ~0xF000000000LL);
447 // AArch64 supports multiple VMAs and the shadow mask is set at runtime.
448 else if (IsAArch64)
449 DFSanRuntimeShadowMask = true;
450 else
451 report_fatal_error("unsupported triple");
452
453 Type *DFSanUnionArgs[2] = { ShadowTy, ShadowTy };
454 DFSanUnionFnTy =
455 FunctionType::get(ShadowTy, DFSanUnionArgs, /*isVarArg=*/ false);
456 Type *DFSanUnionLoadArgs[2] = { ShadowPtrTy, IntptrTy };
457 DFSanUnionLoadFnTy =
458 FunctionType::get(ShadowTy, DFSanUnionLoadArgs, /*isVarArg=*/ false);
459 DFSanUnimplementedFnTy = FunctionType::get(
460 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
461 Type *DFSanSetLabelArgs[3] = { ShadowTy, Type::getInt8PtrTy(*Ctx), IntptrTy };
462 DFSanSetLabelFnTy = FunctionType::get(Type::getVoidTy(*Ctx),
463 DFSanSetLabelArgs, /*isVarArg=*/false);
464 DFSanNonzeroLabelFnTy = FunctionType::get(
465 Type::getVoidTy(*Ctx), None, /*isVarArg=*/false);
466 DFSanVarargWrapperFnTy = FunctionType::get(
467 Type::getVoidTy(*Ctx), Type::getInt8PtrTy(*Ctx), /*isVarArg=*/false);
468
469 if (GetArgTLSPtr) {
470 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
471 ArgTLS = nullptr;
472 GetArgTLS = ConstantExpr::getIntToPtr(
473 ConstantInt::get(IntptrTy, uintptr_t(GetArgTLSPtr)),
474 PointerType::getUnqual(
475 FunctionType::get(PointerType::getUnqual(ArgTLSTy),
476 (Type *)nullptr)));
477 }
478 if (GetRetvalTLSPtr) {
479 RetvalTLS = nullptr;
480 GetRetvalTLS = ConstantExpr::getIntToPtr(
481 ConstantInt::get(IntptrTy, uintptr_t(GetRetvalTLSPtr)),
482 PointerType::getUnqual(
483 FunctionType::get(PointerType::getUnqual(ShadowTy),
484 (Type *)nullptr)));
485 }
486
487 ColdCallWeights = MDBuilder(*Ctx).createBranchWeights(1, 1000);
488 return true;
489 }
490
isInstrumented(const Function * F)491 bool DataFlowSanitizer::isInstrumented(const Function *F) {
492 return !ABIList.isIn(*F, "uninstrumented");
493 }
494
isInstrumented(const GlobalAlias * GA)495 bool DataFlowSanitizer::isInstrumented(const GlobalAlias *GA) {
496 return !ABIList.isIn(*GA, "uninstrumented");
497 }
498
getInstrumentedABI()499 DataFlowSanitizer::InstrumentedABI DataFlowSanitizer::getInstrumentedABI() {
500 return ClArgsABI ? IA_Args : IA_TLS;
501 }
502
getWrapperKind(Function * F)503 DataFlowSanitizer::WrapperKind DataFlowSanitizer::getWrapperKind(Function *F) {
504 if (ABIList.isIn(*F, "functional"))
505 return WK_Functional;
506 if (ABIList.isIn(*F, "discard"))
507 return WK_Discard;
508 if (ABIList.isIn(*F, "custom"))
509 return WK_Custom;
510
511 return WK_Warning;
512 }
513
addGlobalNamePrefix(GlobalValue * GV)514 void DataFlowSanitizer::addGlobalNamePrefix(GlobalValue *GV) {
515 std::string GVName = GV->getName(), Prefix = "dfs$";
516 GV->setName(Prefix + GVName);
517
518 // Try to change the name of the function in module inline asm. We only do
519 // this for specific asm directives, currently only ".symver", to try to avoid
520 // corrupting asm which happens to contain the symbol name as a substring.
521 // Note that the substitution for .symver assumes that the versioned symbol
522 // also has an instrumented name.
523 std::string Asm = GV->getParent()->getModuleInlineAsm();
524 std::string SearchStr = ".symver " + GVName + ",";
525 size_t Pos = Asm.find(SearchStr);
526 if (Pos != std::string::npos) {
527 Asm.replace(Pos, SearchStr.size(),
528 ".symver " + Prefix + GVName + "," + Prefix);
529 GV->getParent()->setModuleInlineAsm(Asm);
530 }
531 }
532
533 Function *
buildWrapperFunction(Function * F,StringRef NewFName,GlobalValue::LinkageTypes NewFLink,FunctionType * NewFT)534 DataFlowSanitizer::buildWrapperFunction(Function *F, StringRef NewFName,
535 GlobalValue::LinkageTypes NewFLink,
536 FunctionType *NewFT) {
537 FunctionType *FT = F->getFunctionType();
538 Function *NewF = Function::Create(NewFT, NewFLink, NewFName,
539 F->getParent());
540 NewF->copyAttributesFrom(F);
541 NewF->removeAttributes(
542 AttributeSet::ReturnIndex,
543 AttributeSet::get(F->getContext(), AttributeSet::ReturnIndex,
544 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
545
546 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", NewF);
547 if (F->isVarArg()) {
548 NewF->removeAttributes(
549 AttributeSet::FunctionIndex,
550 AttributeSet().addAttribute(*Ctx, AttributeSet::FunctionIndex,
551 "split-stack"));
552 CallInst::Create(DFSanVarargWrapperFn,
553 IRBuilder<>(BB).CreateGlobalStringPtr(F->getName()), "",
554 BB);
555 new UnreachableInst(*Ctx, BB);
556 } else {
557 std::vector<Value *> Args;
558 unsigned n = FT->getNumParams();
559 for (Function::arg_iterator ai = NewF->arg_begin(); n != 0; ++ai, --n)
560 Args.push_back(&*ai);
561 CallInst *CI = CallInst::Create(F, Args, "", BB);
562 if (FT->getReturnType()->isVoidTy())
563 ReturnInst::Create(*Ctx, BB);
564 else
565 ReturnInst::Create(*Ctx, CI, BB);
566 }
567
568 return NewF;
569 }
570
getOrBuildTrampolineFunction(FunctionType * FT,StringRef FName)571 Constant *DataFlowSanitizer::getOrBuildTrampolineFunction(FunctionType *FT,
572 StringRef FName) {
573 FunctionType *FTT = getTrampolineFunctionType(FT);
574 Constant *C = Mod->getOrInsertFunction(FName, FTT);
575 Function *F = dyn_cast<Function>(C);
576 if (F && F->isDeclaration()) {
577 F->setLinkage(GlobalValue::LinkOnceODRLinkage);
578 BasicBlock *BB = BasicBlock::Create(*Ctx, "entry", F);
579 std::vector<Value *> Args;
580 Function::arg_iterator AI = F->arg_begin(); ++AI;
581 for (unsigned N = FT->getNumParams(); N != 0; ++AI, --N)
582 Args.push_back(&*AI);
583 CallInst *CI =
584 CallInst::Create(&F->getArgumentList().front(), Args, "", BB);
585 ReturnInst *RI;
586 if (FT->getReturnType()->isVoidTy())
587 RI = ReturnInst::Create(*Ctx, BB);
588 else
589 RI = ReturnInst::Create(*Ctx, CI, BB);
590
591 DFSanFunction DFSF(*this, F, /*IsNativeABI=*/true);
592 Function::arg_iterator ValAI = F->arg_begin(), ShadowAI = AI; ++ValAI;
593 for (unsigned N = FT->getNumParams(); N != 0; ++ValAI, ++ShadowAI, --N)
594 DFSF.ValShadowMap[&*ValAI] = &*ShadowAI;
595 DFSanVisitor(DFSF).visitCallInst(*CI);
596 if (!FT->getReturnType()->isVoidTy())
597 new StoreInst(DFSF.getShadow(RI->getReturnValue()),
598 &F->getArgumentList().back(), RI);
599 }
600
601 return C;
602 }
603
runOnModule(Module & M)604 bool DataFlowSanitizer::runOnModule(Module &M) {
605 if (ABIList.isIn(M, "skip"))
606 return false;
607
608 if (!GetArgTLSPtr) {
609 Type *ArgTLSTy = ArrayType::get(ShadowTy, 64);
610 ArgTLS = Mod->getOrInsertGlobal("__dfsan_arg_tls", ArgTLSTy);
611 if (GlobalVariable *G = dyn_cast<GlobalVariable>(ArgTLS))
612 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
613 }
614 if (!GetRetvalTLSPtr) {
615 RetvalTLS = Mod->getOrInsertGlobal("__dfsan_retval_tls", ShadowTy);
616 if (GlobalVariable *G = dyn_cast<GlobalVariable>(RetvalTLS))
617 G->setThreadLocalMode(GlobalVariable::InitialExecTLSModel);
618 }
619
620 ExternalShadowMask =
621 Mod->getOrInsertGlobal(kDFSanExternShadowPtrMask, IntptrTy);
622
623 DFSanUnionFn = Mod->getOrInsertFunction("__dfsan_union", DFSanUnionFnTy);
624 if (Function *F = dyn_cast<Function>(DFSanUnionFn)) {
625 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
626 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
627 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
628 F->addAttribute(1, Attribute::ZExt);
629 F->addAttribute(2, Attribute::ZExt);
630 }
631 DFSanCheckedUnionFn = Mod->getOrInsertFunction("dfsan_union", DFSanUnionFnTy);
632 if (Function *F = dyn_cast<Function>(DFSanCheckedUnionFn)) {
633 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
634 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadNone);
635 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
636 F->addAttribute(1, Attribute::ZExt);
637 F->addAttribute(2, Attribute::ZExt);
638 }
639 DFSanUnionLoadFn =
640 Mod->getOrInsertFunction("__dfsan_union_load", DFSanUnionLoadFnTy);
641 if (Function *F = dyn_cast<Function>(DFSanUnionLoadFn)) {
642 F->addAttribute(AttributeSet::FunctionIndex, Attribute::NoUnwind);
643 F->addAttribute(AttributeSet::FunctionIndex, Attribute::ReadOnly);
644 F->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
645 }
646 DFSanUnimplementedFn =
647 Mod->getOrInsertFunction("__dfsan_unimplemented", DFSanUnimplementedFnTy);
648 DFSanSetLabelFn =
649 Mod->getOrInsertFunction("__dfsan_set_label", DFSanSetLabelFnTy);
650 if (Function *F = dyn_cast<Function>(DFSanSetLabelFn)) {
651 F->addAttribute(1, Attribute::ZExt);
652 }
653 DFSanNonzeroLabelFn =
654 Mod->getOrInsertFunction("__dfsan_nonzero_label", DFSanNonzeroLabelFnTy);
655 DFSanVarargWrapperFn = Mod->getOrInsertFunction("__dfsan_vararg_wrapper",
656 DFSanVarargWrapperFnTy);
657
658 std::vector<Function *> FnsToInstrument;
659 llvm::SmallPtrSet<Function *, 2> FnsWithNativeABI;
660 for (Function &i : M) {
661 if (!i.isIntrinsic() &&
662 &i != DFSanUnionFn &&
663 &i != DFSanCheckedUnionFn &&
664 &i != DFSanUnionLoadFn &&
665 &i != DFSanUnimplementedFn &&
666 &i != DFSanSetLabelFn &&
667 &i != DFSanNonzeroLabelFn &&
668 &i != DFSanVarargWrapperFn)
669 FnsToInstrument.push_back(&i);
670 }
671
672 // Give function aliases prefixes when necessary, and build wrappers where the
673 // instrumentedness is inconsistent.
674 for (Module::alias_iterator i = M.alias_begin(), e = M.alias_end(); i != e;) {
675 GlobalAlias *GA = &*i;
676 ++i;
677 // Don't stop on weak. We assume people aren't playing games with the
678 // instrumentedness of overridden weak aliases.
679 if (auto F = dyn_cast<Function>(GA->getBaseObject())) {
680 bool GAInst = isInstrumented(GA), FInst = isInstrumented(F);
681 if (GAInst && FInst) {
682 addGlobalNamePrefix(GA);
683 } else if (GAInst != FInst) {
684 // Non-instrumented alias of an instrumented function, or vice versa.
685 // Replace the alias with a native-ABI wrapper of the aliasee. The pass
686 // below will take care of instrumenting it.
687 Function *NewF =
688 buildWrapperFunction(F, "", GA->getLinkage(), F->getFunctionType());
689 GA->replaceAllUsesWith(ConstantExpr::getBitCast(NewF, GA->getType()));
690 NewF->takeName(GA);
691 GA->eraseFromParent();
692 FnsToInstrument.push_back(NewF);
693 }
694 }
695 }
696
697 AttrBuilder B;
698 B.addAttribute(Attribute::ReadOnly).addAttribute(Attribute::ReadNone);
699 ReadOnlyNoneAttrs = AttributeSet::get(*Ctx, AttributeSet::FunctionIndex, B);
700
701 // First, change the ABI of every function in the module. ABI-listed
702 // functions keep their original ABI and get a wrapper function.
703 for (std::vector<Function *>::iterator i = FnsToInstrument.begin(),
704 e = FnsToInstrument.end();
705 i != e; ++i) {
706 Function &F = **i;
707 FunctionType *FT = F.getFunctionType();
708
709 bool IsZeroArgsVoidRet = (FT->getNumParams() == 0 && !FT->isVarArg() &&
710 FT->getReturnType()->isVoidTy());
711
712 if (isInstrumented(&F)) {
713 // Instrumented functions get a 'dfs$' prefix. This allows us to more
714 // easily identify cases of mismatching ABIs.
715 if (getInstrumentedABI() == IA_Args && !IsZeroArgsVoidRet) {
716 FunctionType *NewFT = getArgsFunctionType(FT);
717 Function *NewF = Function::Create(NewFT, F.getLinkage(), "", &M);
718 NewF->copyAttributesFrom(&F);
719 NewF->removeAttributes(
720 AttributeSet::ReturnIndex,
721 AttributeSet::get(NewF->getContext(), AttributeSet::ReturnIndex,
722 AttributeFuncs::typeIncompatible(NewFT->getReturnType())));
723 for (Function::arg_iterator FArg = F.arg_begin(),
724 NewFArg = NewF->arg_begin(),
725 FArgEnd = F.arg_end();
726 FArg != FArgEnd; ++FArg, ++NewFArg) {
727 FArg->replaceAllUsesWith(&*NewFArg);
728 }
729 NewF->getBasicBlockList().splice(NewF->begin(), F.getBasicBlockList());
730
731 for (Function::user_iterator UI = F.user_begin(), UE = F.user_end();
732 UI != UE;) {
733 BlockAddress *BA = dyn_cast<BlockAddress>(*UI);
734 ++UI;
735 if (BA) {
736 BA->replaceAllUsesWith(
737 BlockAddress::get(NewF, BA->getBasicBlock()));
738 delete BA;
739 }
740 }
741 F.replaceAllUsesWith(
742 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT)));
743 NewF->takeName(&F);
744 F.eraseFromParent();
745 *i = NewF;
746 addGlobalNamePrefix(NewF);
747 } else {
748 addGlobalNamePrefix(&F);
749 }
750 } else if (!IsZeroArgsVoidRet || getWrapperKind(&F) == WK_Custom) {
751 // Build a wrapper function for F. The wrapper simply calls F, and is
752 // added to FnsToInstrument so that any instrumentation according to its
753 // WrapperKind is done in the second pass below.
754 FunctionType *NewFT = getInstrumentedABI() == IA_Args
755 ? getArgsFunctionType(FT)
756 : FT;
757 Function *NewF = buildWrapperFunction(
758 &F, std::string("dfsw$") + std::string(F.getName()),
759 GlobalValue::LinkOnceODRLinkage, NewFT);
760 if (getInstrumentedABI() == IA_TLS)
761 NewF->removeAttributes(AttributeSet::FunctionIndex, ReadOnlyNoneAttrs);
762
763 Value *WrappedFnCst =
764 ConstantExpr::getBitCast(NewF, PointerType::getUnqual(FT));
765 F.replaceAllUsesWith(WrappedFnCst);
766
767 UnwrappedFnMap[WrappedFnCst] = &F;
768 *i = NewF;
769
770 if (!F.isDeclaration()) {
771 // This function is probably defining an interposition of an
772 // uninstrumented function and hence needs to keep the original ABI.
773 // But any functions it may call need to use the instrumented ABI, so
774 // we instrument it in a mode which preserves the original ABI.
775 FnsWithNativeABI.insert(&F);
776
777 // This code needs to rebuild the iterators, as they may be invalidated
778 // by the push_back, taking care that the new range does not include
779 // any functions added by this code.
780 size_t N = i - FnsToInstrument.begin(),
781 Count = e - FnsToInstrument.begin();
782 FnsToInstrument.push_back(&F);
783 i = FnsToInstrument.begin() + N;
784 e = FnsToInstrument.begin() + Count;
785 }
786 // Hopefully, nobody will try to indirectly call a vararg
787 // function... yet.
788 } else if (FT->isVarArg()) {
789 UnwrappedFnMap[&F] = &F;
790 *i = nullptr;
791 }
792 }
793
794 for (Function *i : FnsToInstrument) {
795 if (!i || i->isDeclaration())
796 continue;
797
798 removeUnreachableBlocks(*i);
799
800 DFSanFunction DFSF(*this, i, FnsWithNativeABI.count(i));
801
802 // DFSanVisitor may create new basic blocks, which confuses df_iterator.
803 // Build a copy of the list before iterating over it.
804 llvm::SmallVector<BasicBlock *, 4> BBList(depth_first(&i->getEntryBlock()));
805
806 for (BasicBlock *i : BBList) {
807 Instruction *Inst = &i->front();
808 while (1) {
809 // DFSanVisitor may split the current basic block, changing the current
810 // instruction's next pointer and moving the next instruction to the
811 // tail block from which we should continue.
812 Instruction *Next = Inst->getNextNode();
813 // DFSanVisitor may delete Inst, so keep track of whether it was a
814 // terminator.
815 bool IsTerminator = isa<TerminatorInst>(Inst);
816 if (!DFSF.SkipInsts.count(Inst))
817 DFSanVisitor(DFSF).visit(Inst);
818 if (IsTerminator)
819 break;
820 Inst = Next;
821 }
822 }
823
824 // We will not necessarily be able to compute the shadow for every phi node
825 // until we have visited every block. Therefore, the code that handles phi
826 // nodes adds them to the PHIFixups list so that they can be properly
827 // handled here.
828 for (std::vector<std::pair<PHINode *, PHINode *> >::iterator
829 i = DFSF.PHIFixups.begin(),
830 e = DFSF.PHIFixups.end();
831 i != e; ++i) {
832 for (unsigned val = 0, n = i->first->getNumIncomingValues(); val != n;
833 ++val) {
834 i->second->setIncomingValue(
835 val, DFSF.getShadow(i->first->getIncomingValue(val)));
836 }
837 }
838
839 // -dfsan-debug-nonzero-labels will split the CFG in all kinds of crazy
840 // places (i.e. instructions in basic blocks we haven't even begun visiting
841 // yet). To make our life easier, do this work in a pass after the main
842 // instrumentation.
843 if (ClDebugNonzeroLabels) {
844 for (Value *V : DFSF.NonZeroChecks) {
845 Instruction *Pos;
846 if (Instruction *I = dyn_cast<Instruction>(V))
847 Pos = I->getNextNode();
848 else
849 Pos = &DFSF.F->getEntryBlock().front();
850 while (isa<PHINode>(Pos) || isa<AllocaInst>(Pos))
851 Pos = Pos->getNextNode();
852 IRBuilder<> IRB(Pos);
853 Value *Ne = IRB.CreateICmpNE(V, DFSF.DFS.ZeroShadow);
854 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
855 Ne, Pos, /*Unreachable=*/false, ColdCallWeights));
856 IRBuilder<> ThenIRB(BI);
857 ThenIRB.CreateCall(DFSF.DFS.DFSanNonzeroLabelFn, {});
858 }
859 }
860 }
861
862 return false;
863 }
864
getArgTLSPtr()865 Value *DFSanFunction::getArgTLSPtr() {
866 if (ArgTLSPtr)
867 return ArgTLSPtr;
868 if (DFS.ArgTLS)
869 return ArgTLSPtr = DFS.ArgTLS;
870
871 IRBuilder<> IRB(&F->getEntryBlock().front());
872 return ArgTLSPtr = IRB.CreateCall(DFS.GetArgTLS, {});
873 }
874
getRetvalTLS()875 Value *DFSanFunction::getRetvalTLS() {
876 if (RetvalTLSPtr)
877 return RetvalTLSPtr;
878 if (DFS.RetvalTLS)
879 return RetvalTLSPtr = DFS.RetvalTLS;
880
881 IRBuilder<> IRB(&F->getEntryBlock().front());
882 return RetvalTLSPtr = IRB.CreateCall(DFS.GetRetvalTLS, {});
883 }
884
getArgTLS(unsigned Idx,Instruction * Pos)885 Value *DFSanFunction::getArgTLS(unsigned Idx, Instruction *Pos) {
886 IRBuilder<> IRB(Pos);
887 return IRB.CreateConstGEP2_64(getArgTLSPtr(), 0, Idx);
888 }
889
getShadow(Value * V)890 Value *DFSanFunction::getShadow(Value *V) {
891 if (!isa<Argument>(V) && !isa<Instruction>(V))
892 return DFS.ZeroShadow;
893 Value *&Shadow = ValShadowMap[V];
894 if (!Shadow) {
895 if (Argument *A = dyn_cast<Argument>(V)) {
896 if (IsNativeABI)
897 return DFS.ZeroShadow;
898 switch (IA) {
899 case DataFlowSanitizer::IA_TLS: {
900 Value *ArgTLSPtr = getArgTLSPtr();
901 Instruction *ArgTLSPos =
902 DFS.ArgTLS ? &*F->getEntryBlock().begin()
903 : cast<Instruction>(ArgTLSPtr)->getNextNode();
904 IRBuilder<> IRB(ArgTLSPos);
905 Shadow = IRB.CreateLoad(getArgTLS(A->getArgNo(), ArgTLSPos));
906 break;
907 }
908 case DataFlowSanitizer::IA_Args: {
909 unsigned ArgIdx = A->getArgNo() + F->getArgumentList().size() / 2;
910 Function::arg_iterator i = F->arg_begin();
911 while (ArgIdx--)
912 ++i;
913 Shadow = &*i;
914 assert(Shadow->getType() == DFS.ShadowTy);
915 break;
916 }
917 }
918 NonZeroChecks.push_back(Shadow);
919 } else {
920 Shadow = DFS.ZeroShadow;
921 }
922 }
923 return Shadow;
924 }
925
setShadow(Instruction * I,Value * Shadow)926 void DFSanFunction::setShadow(Instruction *I, Value *Shadow) {
927 assert(!ValShadowMap.count(I));
928 assert(Shadow->getType() == DFS.ShadowTy);
929 ValShadowMap[I] = Shadow;
930 }
931
getShadowAddress(Value * Addr,Instruction * Pos)932 Value *DataFlowSanitizer::getShadowAddress(Value *Addr, Instruction *Pos) {
933 assert(Addr != RetvalTLS && "Reinstrumenting?");
934 IRBuilder<> IRB(Pos);
935 Value *ShadowPtrMaskValue;
936 if (DFSanRuntimeShadowMask)
937 ShadowPtrMaskValue = IRB.CreateLoad(IntptrTy, ExternalShadowMask);
938 else
939 ShadowPtrMaskValue = ShadowPtrMask;
940 return IRB.CreateIntToPtr(
941 IRB.CreateMul(
942 IRB.CreateAnd(IRB.CreatePtrToInt(Addr, IntptrTy),
943 IRB.CreatePtrToInt(ShadowPtrMaskValue, IntptrTy)),
944 ShadowPtrMul),
945 ShadowPtrTy);
946 }
947
948 // Generates IR to compute the union of the two given shadows, inserting it
949 // before Pos. Returns the computed union Value.
combineShadows(Value * V1,Value * V2,Instruction * Pos)950 Value *DFSanFunction::combineShadows(Value *V1, Value *V2, Instruction *Pos) {
951 if (V1 == DFS.ZeroShadow)
952 return V2;
953 if (V2 == DFS.ZeroShadow)
954 return V1;
955 if (V1 == V2)
956 return V1;
957
958 auto V1Elems = ShadowElements.find(V1);
959 auto V2Elems = ShadowElements.find(V2);
960 if (V1Elems != ShadowElements.end() && V2Elems != ShadowElements.end()) {
961 if (std::includes(V1Elems->second.begin(), V1Elems->second.end(),
962 V2Elems->second.begin(), V2Elems->second.end())) {
963 return V1;
964 } else if (std::includes(V2Elems->second.begin(), V2Elems->second.end(),
965 V1Elems->second.begin(), V1Elems->second.end())) {
966 return V2;
967 }
968 } else if (V1Elems != ShadowElements.end()) {
969 if (V1Elems->second.count(V2))
970 return V1;
971 } else if (V2Elems != ShadowElements.end()) {
972 if (V2Elems->second.count(V1))
973 return V2;
974 }
975
976 auto Key = std::make_pair(V1, V2);
977 if (V1 > V2)
978 std::swap(Key.first, Key.second);
979 CachedCombinedShadow &CCS = CachedCombinedShadows[Key];
980 if (CCS.Block && DT.dominates(CCS.Block, Pos->getParent()))
981 return CCS.Shadow;
982
983 IRBuilder<> IRB(Pos);
984 if (AvoidNewBlocks) {
985 CallInst *Call = IRB.CreateCall(DFS.DFSanCheckedUnionFn, {V1, V2});
986 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
987 Call->addAttribute(1, Attribute::ZExt);
988 Call->addAttribute(2, Attribute::ZExt);
989
990 CCS.Block = Pos->getParent();
991 CCS.Shadow = Call;
992 } else {
993 BasicBlock *Head = Pos->getParent();
994 Value *Ne = IRB.CreateICmpNE(V1, V2);
995 BranchInst *BI = cast<BranchInst>(SplitBlockAndInsertIfThen(
996 Ne, Pos, /*Unreachable=*/false, DFS.ColdCallWeights, &DT));
997 IRBuilder<> ThenIRB(BI);
998 CallInst *Call = ThenIRB.CreateCall(DFS.DFSanUnionFn, {V1, V2});
999 Call->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1000 Call->addAttribute(1, Attribute::ZExt);
1001 Call->addAttribute(2, Attribute::ZExt);
1002
1003 BasicBlock *Tail = BI->getSuccessor(0);
1004 PHINode *Phi = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1005 Phi->addIncoming(Call, Call->getParent());
1006 Phi->addIncoming(V1, Head);
1007
1008 CCS.Block = Tail;
1009 CCS.Shadow = Phi;
1010 }
1011
1012 std::set<Value *> UnionElems;
1013 if (V1Elems != ShadowElements.end()) {
1014 UnionElems = V1Elems->second;
1015 } else {
1016 UnionElems.insert(V1);
1017 }
1018 if (V2Elems != ShadowElements.end()) {
1019 UnionElems.insert(V2Elems->second.begin(), V2Elems->second.end());
1020 } else {
1021 UnionElems.insert(V2);
1022 }
1023 ShadowElements[CCS.Shadow] = std::move(UnionElems);
1024
1025 return CCS.Shadow;
1026 }
1027
1028 // A convenience function which folds the shadows of each of the operands
1029 // of the provided instruction Inst, inserting the IR before Inst. Returns
1030 // the computed union Value.
combineOperandShadows(Instruction * Inst)1031 Value *DFSanFunction::combineOperandShadows(Instruction *Inst) {
1032 if (Inst->getNumOperands() == 0)
1033 return DFS.ZeroShadow;
1034
1035 Value *Shadow = getShadow(Inst->getOperand(0));
1036 for (unsigned i = 1, n = Inst->getNumOperands(); i != n; ++i) {
1037 Shadow = combineShadows(Shadow, getShadow(Inst->getOperand(i)), Inst);
1038 }
1039 return Shadow;
1040 }
1041
visitOperandShadowInst(Instruction & I)1042 void DFSanVisitor::visitOperandShadowInst(Instruction &I) {
1043 Value *CombinedShadow = DFSF.combineOperandShadows(&I);
1044 DFSF.setShadow(&I, CombinedShadow);
1045 }
1046
1047 // Generates IR to load shadow corresponding to bytes [Addr, Addr+Size), where
1048 // Addr has alignment Align, and take the union of each of those shadows.
loadShadow(Value * Addr,uint64_t Size,uint64_t Align,Instruction * Pos)1049 Value *DFSanFunction::loadShadow(Value *Addr, uint64_t Size, uint64_t Align,
1050 Instruction *Pos) {
1051 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1052 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1053 AllocaShadowMap.find(AI);
1054 if (i != AllocaShadowMap.end()) {
1055 IRBuilder<> IRB(Pos);
1056 return IRB.CreateLoad(i->second);
1057 }
1058 }
1059
1060 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1061 SmallVector<Value *, 2> Objs;
1062 GetUnderlyingObjects(Addr, Objs, Pos->getModule()->getDataLayout());
1063 bool AllConstants = true;
1064 for (Value *Obj : Objs) {
1065 if (isa<Function>(Obj) || isa<BlockAddress>(Obj))
1066 continue;
1067 if (isa<GlobalVariable>(Obj) && cast<GlobalVariable>(Obj)->isConstant())
1068 continue;
1069
1070 AllConstants = false;
1071 break;
1072 }
1073 if (AllConstants)
1074 return DFS.ZeroShadow;
1075
1076 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1077 switch (Size) {
1078 case 0:
1079 return DFS.ZeroShadow;
1080 case 1: {
1081 LoadInst *LI = new LoadInst(ShadowAddr, "", Pos);
1082 LI->setAlignment(ShadowAlign);
1083 return LI;
1084 }
1085 case 2: {
1086 IRBuilder<> IRB(Pos);
1087 Value *ShadowAddr1 = IRB.CreateGEP(DFS.ShadowTy, ShadowAddr,
1088 ConstantInt::get(DFS.IntptrTy, 1));
1089 return combineShadows(IRB.CreateAlignedLoad(ShadowAddr, ShadowAlign),
1090 IRB.CreateAlignedLoad(ShadowAddr1, ShadowAlign), Pos);
1091 }
1092 }
1093 if (!AvoidNewBlocks && Size % (64 / DFS.ShadowWidth) == 0) {
1094 // Fast path for the common case where each byte has identical shadow: load
1095 // shadow 64 bits at a time, fall out to a __dfsan_union_load call if any
1096 // shadow is non-equal.
1097 BasicBlock *FallbackBB = BasicBlock::Create(*DFS.Ctx, "", F);
1098 IRBuilder<> FallbackIRB(FallbackBB);
1099 CallInst *FallbackCall = FallbackIRB.CreateCall(
1100 DFS.DFSanUnionLoadFn,
1101 {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1102 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1103
1104 // Compare each of the shadows stored in the loaded 64 bits to each other,
1105 // by computing (WideShadow rotl ShadowWidth) == WideShadow.
1106 IRBuilder<> IRB(Pos);
1107 Value *WideAddr =
1108 IRB.CreateBitCast(ShadowAddr, Type::getInt64PtrTy(*DFS.Ctx));
1109 Value *WideShadow = IRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1110 Value *TruncShadow = IRB.CreateTrunc(WideShadow, DFS.ShadowTy);
1111 Value *ShlShadow = IRB.CreateShl(WideShadow, DFS.ShadowWidth);
1112 Value *ShrShadow = IRB.CreateLShr(WideShadow, 64 - DFS.ShadowWidth);
1113 Value *RotShadow = IRB.CreateOr(ShlShadow, ShrShadow);
1114 Value *ShadowsEq = IRB.CreateICmpEQ(WideShadow, RotShadow);
1115
1116 BasicBlock *Head = Pos->getParent();
1117 BasicBlock *Tail = Head->splitBasicBlock(Pos->getIterator());
1118
1119 if (DomTreeNode *OldNode = DT.getNode(Head)) {
1120 std::vector<DomTreeNode *> Children(OldNode->begin(), OldNode->end());
1121
1122 DomTreeNode *NewNode = DT.addNewBlock(Tail, Head);
1123 for (auto Child : Children)
1124 DT.changeImmediateDominator(Child, NewNode);
1125 }
1126
1127 // In the following code LastBr will refer to the previous basic block's
1128 // conditional branch instruction, whose true successor is fixed up to point
1129 // to the next block during the loop below or to the tail after the final
1130 // iteration.
1131 BranchInst *LastBr = BranchInst::Create(FallbackBB, FallbackBB, ShadowsEq);
1132 ReplaceInstWithInst(Head->getTerminator(), LastBr);
1133 DT.addNewBlock(FallbackBB, Head);
1134
1135 for (uint64_t Ofs = 64 / DFS.ShadowWidth; Ofs != Size;
1136 Ofs += 64 / DFS.ShadowWidth) {
1137 BasicBlock *NextBB = BasicBlock::Create(*DFS.Ctx, "", F);
1138 DT.addNewBlock(NextBB, LastBr->getParent());
1139 IRBuilder<> NextIRB(NextBB);
1140 WideAddr = NextIRB.CreateGEP(Type::getInt64Ty(*DFS.Ctx), WideAddr,
1141 ConstantInt::get(DFS.IntptrTy, 1));
1142 Value *NextWideShadow = NextIRB.CreateAlignedLoad(WideAddr, ShadowAlign);
1143 ShadowsEq = NextIRB.CreateICmpEQ(WideShadow, NextWideShadow);
1144 LastBr->setSuccessor(0, NextBB);
1145 LastBr = NextIRB.CreateCondBr(ShadowsEq, FallbackBB, FallbackBB);
1146 }
1147
1148 LastBr->setSuccessor(0, Tail);
1149 FallbackIRB.CreateBr(Tail);
1150 PHINode *Shadow = PHINode::Create(DFS.ShadowTy, 2, "", &Tail->front());
1151 Shadow->addIncoming(FallbackCall, FallbackBB);
1152 Shadow->addIncoming(TruncShadow, LastBr->getParent());
1153 return Shadow;
1154 }
1155
1156 IRBuilder<> IRB(Pos);
1157 CallInst *FallbackCall = IRB.CreateCall(
1158 DFS.DFSanUnionLoadFn, {ShadowAddr, ConstantInt::get(DFS.IntptrTy, Size)});
1159 FallbackCall->addAttribute(AttributeSet::ReturnIndex, Attribute::ZExt);
1160 return FallbackCall;
1161 }
1162
visitLoadInst(LoadInst & LI)1163 void DFSanVisitor::visitLoadInst(LoadInst &LI) {
1164 auto &DL = LI.getModule()->getDataLayout();
1165 uint64_t Size = DL.getTypeStoreSize(LI.getType());
1166 if (Size == 0) {
1167 DFSF.setShadow(&LI, DFSF.DFS.ZeroShadow);
1168 return;
1169 }
1170
1171 uint64_t Align;
1172 if (ClPreserveAlignment) {
1173 Align = LI.getAlignment();
1174 if (Align == 0)
1175 Align = DL.getABITypeAlignment(LI.getType());
1176 } else {
1177 Align = 1;
1178 }
1179 IRBuilder<> IRB(&LI);
1180 Value *Shadow = DFSF.loadShadow(LI.getPointerOperand(), Size, Align, &LI);
1181 if (ClCombinePointerLabelsOnLoad) {
1182 Value *PtrShadow = DFSF.getShadow(LI.getPointerOperand());
1183 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &LI);
1184 }
1185 if (Shadow != DFSF.DFS.ZeroShadow)
1186 DFSF.NonZeroChecks.push_back(Shadow);
1187
1188 DFSF.setShadow(&LI, Shadow);
1189 }
1190
storeShadow(Value * Addr,uint64_t Size,uint64_t Align,Value * Shadow,Instruction * Pos)1191 void DFSanFunction::storeShadow(Value *Addr, uint64_t Size, uint64_t Align,
1192 Value *Shadow, Instruction *Pos) {
1193 if (AllocaInst *AI = dyn_cast<AllocaInst>(Addr)) {
1194 llvm::DenseMap<AllocaInst *, AllocaInst *>::iterator i =
1195 AllocaShadowMap.find(AI);
1196 if (i != AllocaShadowMap.end()) {
1197 IRBuilder<> IRB(Pos);
1198 IRB.CreateStore(Shadow, i->second);
1199 return;
1200 }
1201 }
1202
1203 uint64_t ShadowAlign = Align * DFS.ShadowWidth / 8;
1204 IRBuilder<> IRB(Pos);
1205 Value *ShadowAddr = DFS.getShadowAddress(Addr, Pos);
1206 if (Shadow == DFS.ZeroShadow) {
1207 IntegerType *ShadowTy = IntegerType::get(*DFS.Ctx, Size * DFS.ShadowWidth);
1208 Value *ExtZeroShadow = ConstantInt::get(ShadowTy, 0);
1209 Value *ExtShadowAddr =
1210 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowTy));
1211 IRB.CreateAlignedStore(ExtZeroShadow, ExtShadowAddr, ShadowAlign);
1212 return;
1213 }
1214
1215 const unsigned ShadowVecSize = 128 / DFS.ShadowWidth;
1216 uint64_t Offset = 0;
1217 if (Size >= ShadowVecSize) {
1218 VectorType *ShadowVecTy = VectorType::get(DFS.ShadowTy, ShadowVecSize);
1219 Value *ShadowVec = UndefValue::get(ShadowVecTy);
1220 for (unsigned i = 0; i != ShadowVecSize; ++i) {
1221 ShadowVec = IRB.CreateInsertElement(
1222 ShadowVec, Shadow, ConstantInt::get(Type::getInt32Ty(*DFS.Ctx), i));
1223 }
1224 Value *ShadowVecAddr =
1225 IRB.CreateBitCast(ShadowAddr, PointerType::getUnqual(ShadowVecTy));
1226 do {
1227 Value *CurShadowVecAddr =
1228 IRB.CreateConstGEP1_32(ShadowVecTy, ShadowVecAddr, Offset);
1229 IRB.CreateAlignedStore(ShadowVec, CurShadowVecAddr, ShadowAlign);
1230 Size -= ShadowVecSize;
1231 ++Offset;
1232 } while (Size >= ShadowVecSize);
1233 Offset *= ShadowVecSize;
1234 }
1235 while (Size > 0) {
1236 Value *CurShadowAddr =
1237 IRB.CreateConstGEP1_32(DFS.ShadowTy, ShadowAddr, Offset);
1238 IRB.CreateAlignedStore(Shadow, CurShadowAddr, ShadowAlign);
1239 --Size;
1240 ++Offset;
1241 }
1242 }
1243
visitStoreInst(StoreInst & SI)1244 void DFSanVisitor::visitStoreInst(StoreInst &SI) {
1245 auto &DL = SI.getModule()->getDataLayout();
1246 uint64_t Size = DL.getTypeStoreSize(SI.getValueOperand()->getType());
1247 if (Size == 0)
1248 return;
1249
1250 uint64_t Align;
1251 if (ClPreserveAlignment) {
1252 Align = SI.getAlignment();
1253 if (Align == 0)
1254 Align = DL.getABITypeAlignment(SI.getValueOperand()->getType());
1255 } else {
1256 Align = 1;
1257 }
1258
1259 Value* Shadow = DFSF.getShadow(SI.getValueOperand());
1260 if (ClCombinePointerLabelsOnStore) {
1261 Value *PtrShadow = DFSF.getShadow(SI.getPointerOperand());
1262 Shadow = DFSF.combineShadows(Shadow, PtrShadow, &SI);
1263 }
1264 DFSF.storeShadow(SI.getPointerOperand(), Size, Align, Shadow, &SI);
1265 }
1266
visitBinaryOperator(BinaryOperator & BO)1267 void DFSanVisitor::visitBinaryOperator(BinaryOperator &BO) {
1268 visitOperandShadowInst(BO);
1269 }
1270
visitCastInst(CastInst & CI)1271 void DFSanVisitor::visitCastInst(CastInst &CI) { visitOperandShadowInst(CI); }
1272
visitCmpInst(CmpInst & CI)1273 void DFSanVisitor::visitCmpInst(CmpInst &CI) { visitOperandShadowInst(CI); }
1274
visitGetElementPtrInst(GetElementPtrInst & GEPI)1275 void DFSanVisitor::visitGetElementPtrInst(GetElementPtrInst &GEPI) {
1276 visitOperandShadowInst(GEPI);
1277 }
1278
visitExtractElementInst(ExtractElementInst & I)1279 void DFSanVisitor::visitExtractElementInst(ExtractElementInst &I) {
1280 visitOperandShadowInst(I);
1281 }
1282
visitInsertElementInst(InsertElementInst & I)1283 void DFSanVisitor::visitInsertElementInst(InsertElementInst &I) {
1284 visitOperandShadowInst(I);
1285 }
1286
visitShuffleVectorInst(ShuffleVectorInst & I)1287 void DFSanVisitor::visitShuffleVectorInst(ShuffleVectorInst &I) {
1288 visitOperandShadowInst(I);
1289 }
1290
visitExtractValueInst(ExtractValueInst & I)1291 void DFSanVisitor::visitExtractValueInst(ExtractValueInst &I) {
1292 visitOperandShadowInst(I);
1293 }
1294
visitInsertValueInst(InsertValueInst & I)1295 void DFSanVisitor::visitInsertValueInst(InsertValueInst &I) {
1296 visitOperandShadowInst(I);
1297 }
1298
visitAllocaInst(AllocaInst & I)1299 void DFSanVisitor::visitAllocaInst(AllocaInst &I) {
1300 bool AllLoadsStores = true;
1301 for (User *U : I.users()) {
1302 if (isa<LoadInst>(U))
1303 continue;
1304
1305 if (StoreInst *SI = dyn_cast<StoreInst>(U)) {
1306 if (SI->getPointerOperand() == &I)
1307 continue;
1308 }
1309
1310 AllLoadsStores = false;
1311 break;
1312 }
1313 if (AllLoadsStores) {
1314 IRBuilder<> IRB(&I);
1315 DFSF.AllocaShadowMap[&I] = IRB.CreateAlloca(DFSF.DFS.ShadowTy);
1316 }
1317 DFSF.setShadow(&I, DFSF.DFS.ZeroShadow);
1318 }
1319
visitSelectInst(SelectInst & I)1320 void DFSanVisitor::visitSelectInst(SelectInst &I) {
1321 Value *CondShadow = DFSF.getShadow(I.getCondition());
1322 Value *TrueShadow = DFSF.getShadow(I.getTrueValue());
1323 Value *FalseShadow = DFSF.getShadow(I.getFalseValue());
1324
1325 if (isa<VectorType>(I.getCondition()->getType())) {
1326 DFSF.setShadow(
1327 &I,
1328 DFSF.combineShadows(
1329 CondShadow, DFSF.combineShadows(TrueShadow, FalseShadow, &I), &I));
1330 } else {
1331 Value *ShadowSel;
1332 if (TrueShadow == FalseShadow) {
1333 ShadowSel = TrueShadow;
1334 } else {
1335 ShadowSel =
1336 SelectInst::Create(I.getCondition(), TrueShadow, FalseShadow, "", &I);
1337 }
1338 DFSF.setShadow(&I, DFSF.combineShadows(CondShadow, ShadowSel, &I));
1339 }
1340 }
1341
visitMemSetInst(MemSetInst & I)1342 void DFSanVisitor::visitMemSetInst(MemSetInst &I) {
1343 IRBuilder<> IRB(&I);
1344 Value *ValShadow = DFSF.getShadow(I.getValue());
1345 IRB.CreateCall(DFSF.DFS.DFSanSetLabelFn,
1346 {ValShadow, IRB.CreateBitCast(I.getDest(), Type::getInt8PtrTy(
1347 *DFSF.DFS.Ctx)),
1348 IRB.CreateZExtOrTrunc(I.getLength(), DFSF.DFS.IntptrTy)});
1349 }
1350
visitMemTransferInst(MemTransferInst & I)1351 void DFSanVisitor::visitMemTransferInst(MemTransferInst &I) {
1352 IRBuilder<> IRB(&I);
1353 Value *DestShadow = DFSF.DFS.getShadowAddress(I.getDest(), &I);
1354 Value *SrcShadow = DFSF.DFS.getShadowAddress(I.getSource(), &I);
1355 Value *LenShadow = IRB.CreateMul(
1356 I.getLength(),
1357 ConstantInt::get(I.getLength()->getType(), DFSF.DFS.ShadowWidth / 8));
1358 Value *AlignShadow;
1359 if (ClPreserveAlignment) {
1360 AlignShadow = IRB.CreateMul(I.getAlignmentCst(),
1361 ConstantInt::get(I.getAlignmentCst()->getType(),
1362 DFSF.DFS.ShadowWidth / 8));
1363 } else {
1364 AlignShadow = ConstantInt::get(I.getAlignmentCst()->getType(),
1365 DFSF.DFS.ShadowWidth / 8);
1366 }
1367 Type *Int8Ptr = Type::getInt8PtrTy(*DFSF.DFS.Ctx);
1368 DestShadow = IRB.CreateBitCast(DestShadow, Int8Ptr);
1369 SrcShadow = IRB.CreateBitCast(SrcShadow, Int8Ptr);
1370 IRB.CreateCall(I.getCalledValue(), {DestShadow, SrcShadow, LenShadow,
1371 AlignShadow, I.getVolatileCst()});
1372 }
1373
visitReturnInst(ReturnInst & RI)1374 void DFSanVisitor::visitReturnInst(ReturnInst &RI) {
1375 if (!DFSF.IsNativeABI && RI.getReturnValue()) {
1376 switch (DFSF.IA) {
1377 case DataFlowSanitizer::IA_TLS: {
1378 Value *S = DFSF.getShadow(RI.getReturnValue());
1379 IRBuilder<> IRB(&RI);
1380 IRB.CreateStore(S, DFSF.getRetvalTLS());
1381 break;
1382 }
1383 case DataFlowSanitizer::IA_Args: {
1384 IRBuilder<> IRB(&RI);
1385 Type *RT = DFSF.F->getFunctionType()->getReturnType();
1386 Value *InsVal =
1387 IRB.CreateInsertValue(UndefValue::get(RT), RI.getReturnValue(), 0);
1388 Value *InsShadow =
1389 IRB.CreateInsertValue(InsVal, DFSF.getShadow(RI.getReturnValue()), 1);
1390 RI.setOperand(0, InsShadow);
1391 break;
1392 }
1393 }
1394 }
1395 }
1396
visitCallSite(CallSite CS)1397 void DFSanVisitor::visitCallSite(CallSite CS) {
1398 Function *F = CS.getCalledFunction();
1399 if ((F && F->isIntrinsic()) || isa<InlineAsm>(CS.getCalledValue())) {
1400 visitOperandShadowInst(*CS.getInstruction());
1401 return;
1402 }
1403
1404 // Calls to this function are synthesized in wrappers, and we shouldn't
1405 // instrument them.
1406 if (F == DFSF.DFS.DFSanVarargWrapperFn)
1407 return;
1408
1409 IRBuilder<> IRB(CS.getInstruction());
1410
1411 DenseMap<Value *, Function *>::iterator i =
1412 DFSF.DFS.UnwrappedFnMap.find(CS.getCalledValue());
1413 if (i != DFSF.DFS.UnwrappedFnMap.end()) {
1414 Function *F = i->second;
1415 switch (DFSF.DFS.getWrapperKind(F)) {
1416 case DataFlowSanitizer::WK_Warning: {
1417 CS.setCalledFunction(F);
1418 IRB.CreateCall(DFSF.DFS.DFSanUnimplementedFn,
1419 IRB.CreateGlobalStringPtr(F->getName()));
1420 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1421 return;
1422 }
1423 case DataFlowSanitizer::WK_Discard: {
1424 CS.setCalledFunction(F);
1425 DFSF.setShadow(CS.getInstruction(), DFSF.DFS.ZeroShadow);
1426 return;
1427 }
1428 case DataFlowSanitizer::WK_Functional: {
1429 CS.setCalledFunction(F);
1430 visitOperandShadowInst(*CS.getInstruction());
1431 return;
1432 }
1433 case DataFlowSanitizer::WK_Custom: {
1434 // Don't try to handle invokes of custom functions, it's too complicated.
1435 // Instead, invoke the dfsw$ wrapper, which will in turn call the __dfsw_
1436 // wrapper.
1437 if (CallInst *CI = dyn_cast<CallInst>(CS.getInstruction())) {
1438 FunctionType *FT = F->getFunctionType();
1439 FunctionType *CustomFT = DFSF.DFS.getCustomFunctionType(FT);
1440 std::string CustomFName = "__dfsw_";
1441 CustomFName += F->getName();
1442 Constant *CustomF =
1443 DFSF.DFS.Mod->getOrInsertFunction(CustomFName, CustomFT);
1444 if (Function *CustomFn = dyn_cast<Function>(CustomF)) {
1445 CustomFn->copyAttributesFrom(F);
1446
1447 // Custom functions returning non-void will write to the return label.
1448 if (!FT->getReturnType()->isVoidTy()) {
1449 CustomFn->removeAttributes(AttributeSet::FunctionIndex,
1450 DFSF.DFS.ReadOnlyNoneAttrs);
1451 }
1452 }
1453
1454 std::vector<Value *> Args;
1455
1456 CallSite::arg_iterator i = CS.arg_begin();
1457 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n) {
1458 Type *T = (*i)->getType();
1459 FunctionType *ParamFT;
1460 if (isa<PointerType>(T) &&
1461 (ParamFT = dyn_cast<FunctionType>(
1462 cast<PointerType>(T)->getElementType()))) {
1463 std::string TName = "dfst";
1464 TName += utostr(FT->getNumParams() - n);
1465 TName += "$";
1466 TName += F->getName();
1467 Constant *T = DFSF.DFS.getOrBuildTrampolineFunction(ParamFT, TName);
1468 Args.push_back(T);
1469 Args.push_back(
1470 IRB.CreateBitCast(*i, Type::getInt8PtrTy(*DFSF.DFS.Ctx)));
1471 } else {
1472 Args.push_back(*i);
1473 }
1474 }
1475
1476 i = CS.arg_begin();
1477 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1478 Args.push_back(DFSF.getShadow(*i));
1479
1480 if (FT->isVarArg()) {
1481 auto *LabelVATy = ArrayType::get(DFSF.DFS.ShadowTy,
1482 CS.arg_size() - FT->getNumParams());
1483 auto *LabelVAAlloca = new AllocaInst(
1484 LabelVATy, "labelva", &DFSF.F->getEntryBlock().front());
1485
1486 for (unsigned n = 0; i != CS.arg_end(); ++i, ++n) {
1487 auto LabelVAPtr = IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, n);
1488 IRB.CreateStore(DFSF.getShadow(*i), LabelVAPtr);
1489 }
1490
1491 Args.push_back(IRB.CreateStructGEP(LabelVATy, LabelVAAlloca, 0));
1492 }
1493
1494 if (!FT->getReturnType()->isVoidTy()) {
1495 if (!DFSF.LabelReturnAlloca) {
1496 DFSF.LabelReturnAlloca =
1497 new AllocaInst(DFSF.DFS.ShadowTy, "labelreturn",
1498 &DFSF.F->getEntryBlock().front());
1499 }
1500 Args.push_back(DFSF.LabelReturnAlloca);
1501 }
1502
1503 for (i = CS.arg_begin() + FT->getNumParams(); i != CS.arg_end(); ++i)
1504 Args.push_back(*i);
1505
1506 CallInst *CustomCI = IRB.CreateCall(CustomF, Args);
1507 CustomCI->setCallingConv(CI->getCallingConv());
1508 CustomCI->setAttributes(CI->getAttributes());
1509
1510 if (!FT->getReturnType()->isVoidTy()) {
1511 LoadInst *LabelLoad = IRB.CreateLoad(DFSF.LabelReturnAlloca);
1512 DFSF.setShadow(CustomCI, LabelLoad);
1513 }
1514
1515 CI->replaceAllUsesWith(CustomCI);
1516 CI->eraseFromParent();
1517 return;
1518 }
1519 break;
1520 }
1521 }
1522 }
1523
1524 FunctionType *FT = cast<FunctionType>(
1525 CS.getCalledValue()->getType()->getPointerElementType());
1526 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1527 for (unsigned i = 0, n = FT->getNumParams(); i != n; ++i) {
1528 IRB.CreateStore(DFSF.getShadow(CS.getArgument(i)),
1529 DFSF.getArgTLS(i, CS.getInstruction()));
1530 }
1531 }
1532
1533 Instruction *Next = nullptr;
1534 if (!CS.getType()->isVoidTy()) {
1535 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1536 if (II->getNormalDest()->getSinglePredecessor()) {
1537 Next = &II->getNormalDest()->front();
1538 } else {
1539 BasicBlock *NewBB =
1540 SplitEdge(II->getParent(), II->getNormalDest(), &DFSF.DT);
1541 Next = &NewBB->front();
1542 }
1543 } else {
1544 assert(CS->getIterator() != CS->getParent()->end());
1545 Next = CS->getNextNode();
1546 }
1547
1548 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_TLS) {
1549 IRBuilder<> NextIRB(Next);
1550 LoadInst *LI = NextIRB.CreateLoad(DFSF.getRetvalTLS());
1551 DFSF.SkipInsts.insert(LI);
1552 DFSF.setShadow(CS.getInstruction(), LI);
1553 DFSF.NonZeroChecks.push_back(LI);
1554 }
1555 }
1556
1557 // Do all instrumentation for IA_Args down here to defer tampering with the
1558 // CFG in a way that SplitEdge may be able to detect.
1559 if (DFSF.DFS.getInstrumentedABI() == DataFlowSanitizer::IA_Args) {
1560 FunctionType *NewFT = DFSF.DFS.getArgsFunctionType(FT);
1561 Value *Func =
1562 IRB.CreateBitCast(CS.getCalledValue(), PointerType::getUnqual(NewFT));
1563 std::vector<Value *> Args;
1564
1565 CallSite::arg_iterator i = CS.arg_begin(), e = CS.arg_end();
1566 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1567 Args.push_back(*i);
1568
1569 i = CS.arg_begin();
1570 for (unsigned n = FT->getNumParams(); n != 0; ++i, --n)
1571 Args.push_back(DFSF.getShadow(*i));
1572
1573 if (FT->isVarArg()) {
1574 unsigned VarArgSize = CS.arg_size() - FT->getNumParams();
1575 ArrayType *VarArgArrayTy = ArrayType::get(DFSF.DFS.ShadowTy, VarArgSize);
1576 AllocaInst *VarArgShadow =
1577 new AllocaInst(VarArgArrayTy, "", &DFSF.F->getEntryBlock().front());
1578 Args.push_back(IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, 0));
1579 for (unsigned n = 0; i != e; ++i, ++n) {
1580 IRB.CreateStore(
1581 DFSF.getShadow(*i),
1582 IRB.CreateConstGEP2_32(VarArgArrayTy, VarArgShadow, 0, n));
1583 Args.push_back(*i);
1584 }
1585 }
1586
1587 CallSite NewCS;
1588 if (InvokeInst *II = dyn_cast<InvokeInst>(CS.getInstruction())) {
1589 NewCS = IRB.CreateInvoke(Func, II->getNormalDest(), II->getUnwindDest(),
1590 Args);
1591 } else {
1592 NewCS = IRB.CreateCall(Func, Args);
1593 }
1594 NewCS.setCallingConv(CS.getCallingConv());
1595 NewCS.setAttributes(CS.getAttributes().removeAttributes(
1596 *DFSF.DFS.Ctx, AttributeSet::ReturnIndex,
1597 AttributeFuncs::typeIncompatible(NewCS.getInstruction()->getType())));
1598
1599 if (Next) {
1600 ExtractValueInst *ExVal =
1601 ExtractValueInst::Create(NewCS.getInstruction(), 0, "", Next);
1602 DFSF.SkipInsts.insert(ExVal);
1603 ExtractValueInst *ExShadow =
1604 ExtractValueInst::Create(NewCS.getInstruction(), 1, "", Next);
1605 DFSF.SkipInsts.insert(ExShadow);
1606 DFSF.setShadow(ExVal, ExShadow);
1607 DFSF.NonZeroChecks.push_back(ExShadow);
1608
1609 CS.getInstruction()->replaceAllUsesWith(ExVal);
1610 }
1611
1612 CS.getInstruction()->eraseFromParent();
1613 }
1614 }
1615
visitPHINode(PHINode & PN)1616 void DFSanVisitor::visitPHINode(PHINode &PN) {
1617 PHINode *ShadowPN =
1618 PHINode::Create(DFSF.DFS.ShadowTy, PN.getNumIncomingValues(), "", &PN);
1619
1620 // Give the shadow phi node valid predecessors to fool SplitEdge into working.
1621 Value *UndefShadow = UndefValue::get(DFSF.DFS.ShadowTy);
1622 for (PHINode::block_iterator i = PN.block_begin(), e = PN.block_end(); i != e;
1623 ++i) {
1624 ShadowPN->addIncoming(UndefShadow, *i);
1625 }
1626
1627 DFSF.PHIFixups.push_back(std::make_pair(&PN, ShadowPN));
1628 DFSF.setShadow(&PN, ShadowPN);
1629 }
1630