1 //===- MemorySanitizer.cpp - detector of uninitialized reads --------------===//
2 //
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
4 // See https://llvm.org/LICENSE.txt for license information.
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
6 //
7 //===----------------------------------------------------------------------===//
8 //
9 /// \file
10 /// This file is a part of MemorySanitizer, a detector of uninitialized
11 /// reads.
12 ///
13 /// The algorithm of the tool is similar to Memcheck
14 /// (http://goo.gl/QKbem). We associate a few shadow bits with every
15 /// byte of the application memory, poison the shadow of the malloc-ed
16 /// or alloca-ed memory, load the shadow bits on every memory read,
17 /// propagate the shadow bits through some of the arithmetic
18 /// instruction (including MOV), store the shadow bits on every memory
19 /// write, report a bug on some other instructions (e.g. JMP) if the
20 /// associated shadow is poisoned.
21 ///
22 /// But there are differences too. The first and the major one:
23 /// compiler instrumentation instead of binary instrumentation. This
24 /// gives us much better register allocation, possible compiler
25 /// optimizations and a fast start-up. But this brings the major issue
26 /// as well: msan needs to see all program events, including system
27 /// calls and reads/writes in system libraries, so we either need to
28 /// compile *everything* with msan or use a binary translation
29 /// component (e.g. DynamoRIO) to instrument pre-built libraries.
30 /// Another difference from Memcheck is that we use 8 shadow bits per
31 /// byte of application memory and use a direct shadow mapping. This
32 /// greatly simplifies the instrumentation code and avoids races on
33 /// shadow updates (Memcheck is single-threaded so races are not a
34 /// concern there. Memcheck uses 2 shadow bits per byte with a slow
35 /// path storage that uses 8 bits per byte).
36 ///
37 /// The default value of shadow is 0, which means "clean" (not poisoned).
38 ///
39 /// Every module initializer should call __msan_init to ensure that the
40 /// shadow memory is ready. On error, __msan_warning is called. Since
41 /// parameters and return values may be passed via registers, we have a
42 /// specialized thread-local shadow for return values
43 /// (__msan_retval_tls) and parameters (__msan_param_tls).
44 ///
45 /// Origin tracking.
46 ///
47 /// MemorySanitizer can track origins (allocation points) of all uninitialized
48 /// values. This behavior is controlled with a flag (msan-track-origins) and is
49 /// disabled by default.
50 ///
51 /// Origins are 4-byte values created and interpreted by the runtime library.
52 /// They are stored in a second shadow mapping, one 4-byte value for 4 bytes
53 /// of application memory. Propagation of origins is basically a bunch of
54 /// "select" instructions that pick the origin of a dirty argument, if an
55 /// instruction has one.
56 ///
57 /// Every 4 aligned, consecutive bytes of application memory have one origin
58 /// value associated with them. If these bytes contain uninitialized data
59 /// coming from 2 different allocations, the last store wins. Because of this,
60 /// MemorySanitizer reports can show unrelated origins, but this is unlikely in
61 /// practice.
62 ///
63 /// Origins are meaningless for fully initialized values, so MemorySanitizer
64 /// avoids storing origin to memory when a fully initialized value is stored.
65 /// This way it avoids needless overwritting origin of the 4-byte region on
66 /// a short (i.e. 1 byte) clean store, and it is also good for performance.
67 ///
68 /// Atomic handling.
69 ///
70 /// Ideally, every atomic store of application value should update the
71 /// corresponding shadow location in an atomic way. Unfortunately, atomic store
72 /// of two disjoint locations can not be done without severe slowdown.
73 ///
74 /// Therefore, we implement an approximation that may err on the safe side.
75 /// In this implementation, every atomically accessed location in the program
76 /// may only change from (partially) uninitialized to fully initialized, but
77 /// not the other way around. We load the shadow _after_ the application load,
78 /// and we store the shadow _before_ the app store. Also, we always store clean
79 /// shadow (if the application store is atomic). This way, if the store-load
80 /// pair constitutes a happens-before arc, shadow store and load are correctly
81 /// ordered such that the load will get either the value that was stored, or
82 /// some later value (which is always clean).
83 ///
84 /// This does not work very well with Compare-And-Swap (CAS) and
85 /// Read-Modify-Write (RMW) operations. To follow the above logic, CAS and RMW
86 /// must store the new shadow before the app operation, and load the shadow
87 /// after the app operation. Computers don't work this way. Current
88 /// implementation ignores the load aspect of CAS/RMW, always returning a clean
89 /// value. It implements the store part as a simple atomic store by storing a
90 /// clean shadow.
91 ///
92 /// Instrumenting inline assembly.
93 ///
94 /// For inline assembly code LLVM has little idea about which memory locations
95 /// become initialized depending on the arguments. It can be possible to figure
96 /// out which arguments are meant to point to inputs and outputs, but the
97 /// actual semantics can be only visible at runtime. In the Linux kernel it's
98 /// also possible that the arguments only indicate the offset for a base taken
99 /// from a segment register, so it's dangerous to treat any asm() arguments as
100 /// pointers. We take a conservative approach generating calls to
101 /// __msan_instrument_asm_store(ptr, size)
102 /// , which defer the memory unpoisoning to the runtime library.
103 /// The latter can perform more complex address checks to figure out whether
104 /// it's safe to touch the shadow memory.
105 /// Like with atomic operations, we call __msan_instrument_asm_store() before
106 /// the assembly call, so that changes to the shadow memory will be seen by
107 /// other threads together with main memory initialization.
108 ///
109 /// KernelMemorySanitizer (KMSAN) implementation.
110 ///
111 /// The major differences between KMSAN and MSan instrumentation are:
112 /// - KMSAN always tracks the origins and implies msan-keep-going=true;
113 /// - KMSAN allocates shadow and origin memory for each page separately, so
114 /// there are no explicit accesses to shadow and origin in the
115 /// instrumentation.
116 /// Shadow and origin values for a particular X-byte memory location
117 /// (X=1,2,4,8) are accessed through pointers obtained via the
118 /// __msan_metadata_ptr_for_load_X(ptr)
119 /// __msan_metadata_ptr_for_store_X(ptr)
120 /// functions. The corresponding functions check that the X-byte accesses
121 /// are possible and returns the pointers to shadow and origin memory.
122 /// Arbitrary sized accesses are handled with:
123 /// __msan_metadata_ptr_for_load_n(ptr, size)
124 /// __msan_metadata_ptr_for_store_n(ptr, size);
125 /// - TLS variables are stored in a single per-task struct. A call to a
126 /// function __msan_get_context_state() returning a pointer to that struct
127 /// is inserted into every instrumented function before the entry block;
128 /// - __msan_warning() takes a 32-bit origin parameter;
129 /// - local variables are poisoned with __msan_poison_alloca() upon function
130 /// entry and unpoisoned with __msan_unpoison_alloca() before leaving the
131 /// function;
132 /// - the pass doesn't declare any global variables or add global constructors
133 /// to the translation unit.
134 ///
135 /// Also, KMSAN currently ignores uninitialized memory passed into inline asm
136 /// calls, making sure we're on the safe side wrt. possible false positives.
137 ///
138 /// KernelMemorySanitizer only supports X86_64 at the moment.
139 ///
140 //===----------------------------------------------------------------------===//
141
142 #include "llvm/Transforms/Instrumentation/MemorySanitizer.h"
143 #include "llvm/ADT/APInt.h"
144 #include "llvm/ADT/ArrayRef.h"
145 #include "llvm/ADT/DepthFirstIterator.h"
146 #include "llvm/ADT/SmallSet.h"
147 #include "llvm/ADT/SmallString.h"
148 #include "llvm/ADT/SmallVector.h"
149 #include "llvm/ADT/StringExtras.h"
150 #include "llvm/ADT/StringRef.h"
151 #include "llvm/ADT/Triple.h"
152 #include "llvm/Analysis/TargetLibraryInfo.h"
153 #include "llvm/IR/Argument.h"
154 #include "llvm/IR/Attributes.h"
155 #include "llvm/IR/BasicBlock.h"
156 #include "llvm/IR/CallSite.h"
157 #include "llvm/IR/CallingConv.h"
158 #include "llvm/IR/Constant.h"
159 #include "llvm/IR/Constants.h"
160 #include "llvm/IR/DataLayout.h"
161 #include "llvm/IR/DerivedTypes.h"
162 #include "llvm/IR/Function.h"
163 #include "llvm/IR/GlobalValue.h"
164 #include "llvm/IR/GlobalVariable.h"
165 #include "llvm/IR/IRBuilder.h"
166 #include "llvm/IR/InlineAsm.h"
167 #include "llvm/IR/InstVisitor.h"
168 #include "llvm/IR/InstrTypes.h"
169 #include "llvm/IR/Instruction.h"
170 #include "llvm/IR/Instructions.h"
171 #include "llvm/IR/IntrinsicInst.h"
172 #include "llvm/IR/Intrinsics.h"
173 #include "llvm/IR/IntrinsicsX86.h"
174 #include "llvm/IR/LLVMContext.h"
175 #include "llvm/IR/MDBuilder.h"
176 #include "llvm/IR/Module.h"
177 #include "llvm/IR/Type.h"
178 #include "llvm/IR/Value.h"
179 #include "llvm/IR/ValueMap.h"
180 #include "llvm/InitializePasses.h"
181 #include "llvm/Pass.h"
182 #include "llvm/Support/AtomicOrdering.h"
183 #include "llvm/Support/Casting.h"
184 #include "llvm/Support/CommandLine.h"
185 #include "llvm/Support/Compiler.h"
186 #include "llvm/Support/Debug.h"
187 #include "llvm/Support/ErrorHandling.h"
188 #include "llvm/Support/MathExtras.h"
189 #include "llvm/Support/raw_ostream.h"
190 #include "llvm/Transforms/Instrumentation.h"
191 #include "llvm/Transforms/Utils/BasicBlockUtils.h"
192 #include "llvm/Transforms/Utils/Local.h"
193 #include "llvm/Transforms/Utils/ModuleUtils.h"
194 #include <algorithm>
195 #include <cassert>
196 #include <cstddef>
197 #include <cstdint>
198 #include <memory>
199 #include <string>
200 #include <tuple>
201
202 using namespace llvm;
203
204 #define DEBUG_TYPE "msan"
205
206 static const unsigned kOriginSize = 4;
207 static const Align kMinOriginAlignment = Align(4);
208 static const Align kShadowTLSAlignment = Align(8);
209
210 // These constants must be kept in sync with the ones in msan.h.
211 static const unsigned kParamTLSSize = 800;
212 static const unsigned kRetvalTLSSize = 800;
213
214 // Accesses sizes are powers of two: 1, 2, 4, 8.
215 static const size_t kNumberOfAccessSizes = 4;
216
217 /// Track origins of uninitialized values.
218 ///
219 /// Adds a section to MemorySanitizer report that points to the allocation
220 /// (stack or heap) the uninitialized bits came from originally.
221 static cl::opt<int> ClTrackOrigins("msan-track-origins",
222 cl::desc("Track origins (allocation sites) of poisoned memory"),
223 cl::Hidden, cl::init(0));
224
225 static cl::opt<bool> ClKeepGoing("msan-keep-going",
226 cl::desc("keep going after reporting a UMR"),
227 cl::Hidden, cl::init(false));
228
229 static cl::opt<bool> ClPoisonStack("msan-poison-stack",
230 cl::desc("poison uninitialized stack variables"),
231 cl::Hidden, cl::init(true));
232
233 static cl::opt<bool> ClPoisonStackWithCall("msan-poison-stack-with-call",
234 cl::desc("poison uninitialized stack variables with a call"),
235 cl::Hidden, cl::init(false));
236
237 static cl::opt<int> ClPoisonStackPattern("msan-poison-stack-pattern",
238 cl::desc("poison uninitialized stack variables with the given pattern"),
239 cl::Hidden, cl::init(0xff));
240
241 static cl::opt<bool> ClPoisonUndef("msan-poison-undef",
242 cl::desc("poison undef temps"),
243 cl::Hidden, cl::init(true));
244
245 static cl::opt<bool> ClHandleICmp("msan-handle-icmp",
246 cl::desc("propagate shadow through ICmpEQ and ICmpNE"),
247 cl::Hidden, cl::init(true));
248
249 static cl::opt<bool> ClHandleICmpExact("msan-handle-icmp-exact",
250 cl::desc("exact handling of relational integer ICmp"),
251 cl::Hidden, cl::init(false));
252
253 static cl::opt<bool> ClHandleLifetimeIntrinsics(
254 "msan-handle-lifetime-intrinsics",
255 cl::desc(
256 "when possible, poison scoped variables at the beginning of the scope "
257 "(slower, but more precise)"),
258 cl::Hidden, cl::init(true));
259
260 // When compiling the Linux kernel, we sometimes see false positives related to
261 // MSan being unable to understand that inline assembly calls may initialize
262 // local variables.
263 // This flag makes the compiler conservatively unpoison every memory location
264 // passed into an assembly call. Note that this may cause false positives.
265 // Because it's impossible to figure out the array sizes, we can only unpoison
266 // the first sizeof(type) bytes for each type* pointer.
267 // The instrumentation is only enabled in KMSAN builds, and only if
268 // -msan-handle-asm-conservative is on. This is done because we may want to
269 // quickly disable assembly instrumentation when it breaks.
270 static cl::opt<bool> ClHandleAsmConservative(
271 "msan-handle-asm-conservative",
272 cl::desc("conservative handling of inline assembly"), cl::Hidden,
273 cl::init(true));
274
275 // This flag controls whether we check the shadow of the address
276 // operand of load or store. Such bugs are very rare, since load from
277 // a garbage address typically results in SEGV, but still happen
278 // (e.g. only lower bits of address are garbage, or the access happens
279 // early at program startup where malloc-ed memory is more likely to
280 // be zeroed. As of 2012-08-28 this flag adds 20% slowdown.
281 static cl::opt<bool> ClCheckAccessAddress("msan-check-access-address",
282 cl::desc("report accesses through a pointer which has poisoned shadow"),
283 cl::Hidden, cl::init(true));
284
285 static cl::opt<bool> ClDumpStrictInstructions("msan-dump-strict-instructions",
286 cl::desc("print out instructions with default strict semantics"),
287 cl::Hidden, cl::init(false));
288
289 static cl::opt<int> ClInstrumentationWithCallThreshold(
290 "msan-instrumentation-with-call-threshold",
291 cl::desc(
292 "If the function being instrumented requires more than "
293 "this number of checks and origin stores, use callbacks instead of "
294 "inline checks (-1 means never use callbacks)."),
295 cl::Hidden, cl::init(3500));
296
297 static cl::opt<bool>
298 ClEnableKmsan("msan-kernel",
299 cl::desc("Enable KernelMemorySanitizer instrumentation"),
300 cl::Hidden, cl::init(false));
301
302 // This is an experiment to enable handling of cases where shadow is a non-zero
303 // compile-time constant. For some unexplainable reason they were silently
304 // ignored in the instrumentation.
305 static cl::opt<bool> ClCheckConstantShadow("msan-check-constant-shadow",
306 cl::desc("Insert checks for constant shadow values"),
307 cl::Hidden, cl::init(false));
308
309 // This is off by default because of a bug in gold:
310 // https://sourceware.org/bugzilla/show_bug.cgi?id=19002
311 static cl::opt<bool> ClWithComdat("msan-with-comdat",
312 cl::desc("Place MSan constructors in comdat sections"),
313 cl::Hidden, cl::init(false));
314
315 // These options allow to specify custom memory map parameters
316 // See MemoryMapParams for details.
317 static cl::opt<uint64_t> ClAndMask("msan-and-mask",
318 cl::desc("Define custom MSan AndMask"),
319 cl::Hidden, cl::init(0));
320
321 static cl::opt<uint64_t> ClXorMask("msan-xor-mask",
322 cl::desc("Define custom MSan XorMask"),
323 cl::Hidden, cl::init(0));
324
325 static cl::opt<uint64_t> ClShadowBase("msan-shadow-base",
326 cl::desc("Define custom MSan ShadowBase"),
327 cl::Hidden, cl::init(0));
328
329 static cl::opt<uint64_t> ClOriginBase("msan-origin-base",
330 cl::desc("Define custom MSan OriginBase"),
331 cl::Hidden, cl::init(0));
332
333 static const char *const kMsanModuleCtorName = "msan.module_ctor";
334 static const char *const kMsanInitName = "__msan_init";
335
336 namespace {
337
338 // Memory map parameters used in application-to-shadow address calculation.
339 // Offset = (Addr & ~AndMask) ^ XorMask
340 // Shadow = ShadowBase + Offset
341 // Origin = OriginBase + Offset
342 struct MemoryMapParams {
343 uint64_t AndMask;
344 uint64_t XorMask;
345 uint64_t ShadowBase;
346 uint64_t OriginBase;
347 };
348
349 struct PlatformMemoryMapParams {
350 const MemoryMapParams *bits32;
351 const MemoryMapParams *bits64;
352 };
353
354 } // end anonymous namespace
355
356 // i386 Linux
357 static const MemoryMapParams Linux_I386_MemoryMapParams = {
358 0x000080000000, // AndMask
359 0, // XorMask (not used)
360 0, // ShadowBase (not used)
361 0x000040000000, // OriginBase
362 };
363
364 // x86_64 Linux
365 static const MemoryMapParams Linux_X86_64_MemoryMapParams = {
366 #ifdef MSAN_LINUX_X86_64_OLD_MAPPING
367 0x400000000000, // AndMask
368 0, // XorMask (not used)
369 0, // ShadowBase (not used)
370 0x200000000000, // OriginBase
371 #else
372 0, // AndMask (not used)
373 0x500000000000, // XorMask
374 0, // ShadowBase (not used)
375 0x100000000000, // OriginBase
376 #endif
377 };
378
379 // mips64 Linux
380 static const MemoryMapParams Linux_MIPS64_MemoryMapParams = {
381 0, // AndMask (not used)
382 0x008000000000, // XorMask
383 0, // ShadowBase (not used)
384 0x002000000000, // OriginBase
385 };
386
387 // ppc64 Linux
388 static const MemoryMapParams Linux_PowerPC64_MemoryMapParams = {
389 0xE00000000000, // AndMask
390 0x100000000000, // XorMask
391 0x080000000000, // ShadowBase
392 0x1C0000000000, // OriginBase
393 };
394
395 // aarch64 Linux
396 static const MemoryMapParams Linux_AArch64_MemoryMapParams = {
397 0, // AndMask (not used)
398 0x06000000000, // XorMask
399 0, // ShadowBase (not used)
400 0x01000000000, // OriginBase
401 };
402
403 // i386 FreeBSD
404 static const MemoryMapParams FreeBSD_I386_MemoryMapParams = {
405 0x000180000000, // AndMask
406 0x000040000000, // XorMask
407 0x000020000000, // ShadowBase
408 0x000700000000, // OriginBase
409 };
410
411 // x86_64 FreeBSD
412 static const MemoryMapParams FreeBSD_X86_64_MemoryMapParams = {
413 0xc00000000000, // AndMask
414 0x200000000000, // XorMask
415 0x100000000000, // ShadowBase
416 0x380000000000, // OriginBase
417 };
418
419 // x86_64 NetBSD
420 static const MemoryMapParams NetBSD_X86_64_MemoryMapParams = {
421 0, // AndMask
422 0x500000000000, // XorMask
423 0, // ShadowBase
424 0x100000000000, // OriginBase
425 };
426
427 static const PlatformMemoryMapParams Linux_X86_MemoryMapParams = {
428 &Linux_I386_MemoryMapParams,
429 &Linux_X86_64_MemoryMapParams,
430 };
431
432 static const PlatformMemoryMapParams Linux_MIPS_MemoryMapParams = {
433 nullptr,
434 &Linux_MIPS64_MemoryMapParams,
435 };
436
437 static const PlatformMemoryMapParams Linux_PowerPC_MemoryMapParams = {
438 nullptr,
439 &Linux_PowerPC64_MemoryMapParams,
440 };
441
442 static const PlatformMemoryMapParams Linux_ARM_MemoryMapParams = {
443 nullptr,
444 &Linux_AArch64_MemoryMapParams,
445 };
446
447 static const PlatformMemoryMapParams FreeBSD_X86_MemoryMapParams = {
448 &FreeBSD_I386_MemoryMapParams,
449 &FreeBSD_X86_64_MemoryMapParams,
450 };
451
452 static const PlatformMemoryMapParams NetBSD_X86_MemoryMapParams = {
453 nullptr,
454 &NetBSD_X86_64_MemoryMapParams,
455 };
456
457 namespace {
458
459 /// Instrument functions of a module to detect uninitialized reads.
460 ///
461 /// Instantiating MemorySanitizer inserts the msan runtime library API function
462 /// declarations into the module if they don't exist already. Instantiating
463 /// ensures the __msan_init function is in the list of global constructors for
464 /// the module.
465 class MemorySanitizer {
466 public:
MemorySanitizer(Module & M,MemorySanitizerOptions Options)467 MemorySanitizer(Module &M, MemorySanitizerOptions Options)
468 : CompileKernel(Options.Kernel), TrackOrigins(Options.TrackOrigins),
469 Recover(Options.Recover) {
470 initializeModule(M);
471 }
472
473 // MSan cannot be moved or copied because of MapParams.
474 MemorySanitizer(MemorySanitizer &&) = delete;
475 MemorySanitizer &operator=(MemorySanitizer &&) = delete;
476 MemorySanitizer(const MemorySanitizer &) = delete;
477 MemorySanitizer &operator=(const MemorySanitizer &) = delete;
478
479 bool sanitizeFunction(Function &F, TargetLibraryInfo &TLI);
480
481 private:
482 friend struct MemorySanitizerVisitor;
483 friend struct VarArgAMD64Helper;
484 friend struct VarArgMIPS64Helper;
485 friend struct VarArgAArch64Helper;
486 friend struct VarArgPowerPC64Helper;
487
488 void initializeModule(Module &M);
489 void initializeCallbacks(Module &M);
490 void createKernelApi(Module &M);
491 void createUserspaceApi(Module &M);
492
493 /// True if we're compiling the Linux kernel.
494 bool CompileKernel;
495 /// Track origins (allocation points) of uninitialized values.
496 int TrackOrigins;
497 bool Recover;
498
499 LLVMContext *C;
500 Type *IntptrTy;
501 Type *OriginTy;
502
503 // XxxTLS variables represent the per-thread state in MSan and per-task state
504 // in KMSAN.
505 // For the userspace these point to thread-local globals. In the kernel land
506 // they point to the members of a per-task struct obtained via a call to
507 // __msan_get_context_state().
508
509 /// Thread-local shadow storage for function parameters.
510 Value *ParamTLS;
511
512 /// Thread-local origin storage for function parameters.
513 Value *ParamOriginTLS;
514
515 /// Thread-local shadow storage for function return value.
516 Value *RetvalTLS;
517
518 /// Thread-local origin storage for function return value.
519 Value *RetvalOriginTLS;
520
521 /// Thread-local shadow storage for in-register va_arg function
522 /// parameters (x86_64-specific).
523 Value *VAArgTLS;
524
525 /// Thread-local shadow storage for in-register va_arg function
526 /// parameters (x86_64-specific).
527 Value *VAArgOriginTLS;
528
529 /// Thread-local shadow storage for va_arg overflow area
530 /// (x86_64-specific).
531 Value *VAArgOverflowSizeTLS;
532
533 /// Thread-local space used to pass origin value to the UMR reporting
534 /// function.
535 Value *OriginTLS;
536
537 /// Are the instrumentation callbacks set up?
538 bool CallbacksInitialized = false;
539
540 /// The run-time callback to print a warning.
541 FunctionCallee WarningFn;
542
543 // These arrays are indexed by log2(AccessSize).
544 FunctionCallee MaybeWarningFn[kNumberOfAccessSizes];
545 FunctionCallee MaybeStoreOriginFn[kNumberOfAccessSizes];
546
547 /// Run-time helper that generates a new origin value for a stack
548 /// allocation.
549 FunctionCallee MsanSetAllocaOrigin4Fn;
550
551 /// Run-time helper that poisons stack on function entry.
552 FunctionCallee MsanPoisonStackFn;
553
554 /// Run-time helper that records a store (or any event) of an
555 /// uninitialized value and returns an updated origin id encoding this info.
556 FunctionCallee MsanChainOriginFn;
557
558 /// MSan runtime replacements for memmove, memcpy and memset.
559 FunctionCallee MemmoveFn, MemcpyFn, MemsetFn;
560
561 /// KMSAN callback for task-local function argument shadow.
562 StructType *MsanContextStateTy;
563 FunctionCallee MsanGetContextStateFn;
564
565 /// Functions for poisoning/unpoisoning local variables
566 FunctionCallee MsanPoisonAllocaFn, MsanUnpoisonAllocaFn;
567
568 /// Each of the MsanMetadataPtrXxx functions returns a pair of shadow/origin
569 /// pointers.
570 FunctionCallee MsanMetadataPtrForLoadN, MsanMetadataPtrForStoreN;
571 FunctionCallee MsanMetadataPtrForLoad_1_8[4];
572 FunctionCallee MsanMetadataPtrForStore_1_8[4];
573 FunctionCallee MsanInstrumentAsmStoreFn;
574
575 /// Helper to choose between different MsanMetadataPtrXxx().
576 FunctionCallee getKmsanShadowOriginAccessFn(bool isStore, int size);
577
578 /// Memory map parameters used in application-to-shadow calculation.
579 const MemoryMapParams *MapParams;
580
581 /// Custom memory map parameters used when -msan-shadow-base or
582 // -msan-origin-base is provided.
583 MemoryMapParams CustomMapParams;
584
585 MDNode *ColdCallWeights;
586
587 /// Branch weights for origin store.
588 MDNode *OriginStoreWeights;
589
590 /// An empty volatile inline asm that prevents callback merge.
591 InlineAsm *EmptyAsm;
592 };
593
insertModuleCtor(Module & M)594 void insertModuleCtor(Module &M) {
595 getOrCreateSanitizerCtorAndInitFunctions(
596 M, kMsanModuleCtorName, kMsanInitName,
597 /*InitArgTypes=*/{},
598 /*InitArgs=*/{},
599 // This callback is invoked when the functions are created the first
600 // time. Hook them into the global ctors list in that case:
601 [&](Function *Ctor, FunctionCallee) {
602 if (!ClWithComdat) {
603 appendToGlobalCtors(M, Ctor, 0);
604 return;
605 }
606 Comdat *MsanCtorComdat = M.getOrInsertComdat(kMsanModuleCtorName);
607 Ctor->setComdat(MsanCtorComdat);
608 appendToGlobalCtors(M, Ctor, 0, Ctor);
609 });
610 }
611
612 /// A legacy function pass for msan instrumentation.
613 ///
614 /// Instruments functions to detect unitialized reads.
615 struct MemorySanitizerLegacyPass : public FunctionPass {
616 // Pass identification, replacement for typeid.
617 static char ID;
618
MemorySanitizerLegacyPass__anonde83a36c0211::MemorySanitizerLegacyPass619 MemorySanitizerLegacyPass(MemorySanitizerOptions Options = {})
620 : FunctionPass(ID), Options(Options) {}
getPassName__anonde83a36c0211::MemorySanitizerLegacyPass621 StringRef getPassName() const override { return "MemorySanitizerLegacyPass"; }
622
getAnalysisUsage__anonde83a36c0211::MemorySanitizerLegacyPass623 void getAnalysisUsage(AnalysisUsage &AU) const override {
624 AU.addRequired<TargetLibraryInfoWrapperPass>();
625 }
626
runOnFunction__anonde83a36c0211::MemorySanitizerLegacyPass627 bool runOnFunction(Function &F) override {
628 return MSan->sanitizeFunction(
629 F, getAnalysis<TargetLibraryInfoWrapperPass>().getTLI(F));
630 }
631 bool doInitialization(Module &M) override;
632
633 Optional<MemorySanitizer> MSan;
634 MemorySanitizerOptions Options;
635 };
636
getOptOrDefault(const cl::opt<T> & Opt,T Default)637 template <class T> T getOptOrDefault(const cl::opt<T> &Opt, T Default) {
638 return (Opt.getNumOccurrences() > 0) ? Opt : Default;
639 }
640
641 } // end anonymous namespace
642
MemorySanitizerOptions(int TO,bool R,bool K)643 MemorySanitizerOptions::MemorySanitizerOptions(int TO, bool R, bool K)
644 : Kernel(getOptOrDefault(ClEnableKmsan, K)),
645 TrackOrigins(getOptOrDefault(ClTrackOrigins, Kernel ? 2 : TO)),
646 Recover(getOptOrDefault(ClKeepGoing, Kernel || R)) {}
647
run(Function & F,FunctionAnalysisManager & FAM)648 PreservedAnalyses MemorySanitizerPass::run(Function &F,
649 FunctionAnalysisManager &FAM) {
650 MemorySanitizer Msan(*F.getParent(), Options);
651 if (Msan.sanitizeFunction(F, FAM.getResult<TargetLibraryAnalysis>(F)))
652 return PreservedAnalyses::none();
653 return PreservedAnalyses::all();
654 }
655
run(Module & M,ModuleAnalysisManager & AM)656 PreservedAnalyses MemorySanitizerPass::run(Module &M,
657 ModuleAnalysisManager &AM) {
658 if (Options.Kernel)
659 return PreservedAnalyses::all();
660 insertModuleCtor(M);
661 return PreservedAnalyses::none();
662 }
663
664 char MemorySanitizerLegacyPass::ID = 0;
665
666 INITIALIZE_PASS_BEGIN(MemorySanitizerLegacyPass, "msan",
667 "MemorySanitizer: detects uninitialized reads.", false,
668 false)
INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)669 INITIALIZE_PASS_DEPENDENCY(TargetLibraryInfoWrapperPass)
670 INITIALIZE_PASS_END(MemorySanitizerLegacyPass, "msan",
671 "MemorySanitizer: detects uninitialized reads.", false,
672 false)
673
674 FunctionPass *
675 llvm::createMemorySanitizerLegacyPassPass(MemorySanitizerOptions Options) {
676 return new MemorySanitizerLegacyPass(Options);
677 }
678
679 /// Create a non-const global initialized with the given string.
680 ///
681 /// Creates a writable global for Str so that we can pass it to the
682 /// run-time lib. Runtime uses first 4 bytes of the string to store the
683 /// frame ID, so the string needs to be mutable.
createPrivateNonConstGlobalForString(Module & M,StringRef Str)684 static GlobalVariable *createPrivateNonConstGlobalForString(Module &M,
685 StringRef Str) {
686 Constant *StrConst = ConstantDataArray::getString(M.getContext(), Str);
687 return new GlobalVariable(M, StrConst->getType(), /*isConstant=*/false,
688 GlobalValue::PrivateLinkage, StrConst, "");
689 }
690
691 /// Create KMSAN API callbacks.
createKernelApi(Module & M)692 void MemorySanitizer::createKernelApi(Module &M) {
693 IRBuilder<> IRB(*C);
694
695 // These will be initialized in insertKmsanPrologue().
696 RetvalTLS = nullptr;
697 RetvalOriginTLS = nullptr;
698 ParamTLS = nullptr;
699 ParamOriginTLS = nullptr;
700 VAArgTLS = nullptr;
701 VAArgOriginTLS = nullptr;
702 VAArgOverflowSizeTLS = nullptr;
703 // OriginTLS is unused in the kernel.
704 OriginTLS = nullptr;
705
706 // __msan_warning() in the kernel takes an origin.
707 WarningFn = M.getOrInsertFunction("__msan_warning", IRB.getVoidTy(),
708 IRB.getInt32Ty());
709 // Requests the per-task context state (kmsan_context_state*) from the
710 // runtime library.
711 MsanContextStateTy = StructType::get(
712 ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
713 ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8),
714 ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8),
715 ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8), /* va_arg_origin */
716 IRB.getInt64Ty(), ArrayType::get(OriginTy, kParamTLSSize / 4), OriginTy,
717 OriginTy);
718 MsanGetContextStateFn = M.getOrInsertFunction(
719 "__msan_get_context_state", PointerType::get(MsanContextStateTy, 0));
720
721 Type *RetTy = StructType::get(PointerType::get(IRB.getInt8Ty(), 0),
722 PointerType::get(IRB.getInt32Ty(), 0));
723
724 for (int ind = 0, size = 1; ind < 4; ind++, size <<= 1) {
725 std::string name_load =
726 "__msan_metadata_ptr_for_load_" + std::to_string(size);
727 std::string name_store =
728 "__msan_metadata_ptr_for_store_" + std::to_string(size);
729 MsanMetadataPtrForLoad_1_8[ind] = M.getOrInsertFunction(
730 name_load, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
731 MsanMetadataPtrForStore_1_8[ind] = M.getOrInsertFunction(
732 name_store, RetTy, PointerType::get(IRB.getInt8Ty(), 0));
733 }
734
735 MsanMetadataPtrForLoadN = M.getOrInsertFunction(
736 "__msan_metadata_ptr_for_load_n", RetTy,
737 PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
738 MsanMetadataPtrForStoreN = M.getOrInsertFunction(
739 "__msan_metadata_ptr_for_store_n", RetTy,
740 PointerType::get(IRB.getInt8Ty(), 0), IRB.getInt64Ty());
741
742 // Functions for poisoning and unpoisoning memory.
743 MsanPoisonAllocaFn =
744 M.getOrInsertFunction("__msan_poison_alloca", IRB.getVoidTy(),
745 IRB.getInt8PtrTy(), IntptrTy, IRB.getInt8PtrTy());
746 MsanUnpoisonAllocaFn = M.getOrInsertFunction(
747 "__msan_unpoison_alloca", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy);
748 }
749
getOrInsertGlobal(Module & M,StringRef Name,Type * Ty)750 static Constant *getOrInsertGlobal(Module &M, StringRef Name, Type *Ty) {
751 return M.getOrInsertGlobal(Name, Ty, [&] {
752 return new GlobalVariable(M, Ty, false, GlobalVariable::ExternalLinkage,
753 nullptr, Name, nullptr,
754 GlobalVariable::InitialExecTLSModel);
755 });
756 }
757
758 /// Insert declarations for userspace-specific functions and globals.
createUserspaceApi(Module & M)759 void MemorySanitizer::createUserspaceApi(Module &M) {
760 IRBuilder<> IRB(*C);
761 // Create the callback.
762 // FIXME: this function should have "Cold" calling conv,
763 // which is not yet implemented.
764 StringRef WarningFnName = Recover ? "__msan_warning"
765 : "__msan_warning_noreturn";
766 WarningFn = M.getOrInsertFunction(WarningFnName, IRB.getVoidTy());
767
768 // Create the global TLS variables.
769 RetvalTLS =
770 getOrInsertGlobal(M, "__msan_retval_tls",
771 ArrayType::get(IRB.getInt64Ty(), kRetvalTLSSize / 8));
772
773 RetvalOriginTLS = getOrInsertGlobal(M, "__msan_retval_origin_tls", OriginTy);
774
775 ParamTLS =
776 getOrInsertGlobal(M, "__msan_param_tls",
777 ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
778
779 ParamOriginTLS =
780 getOrInsertGlobal(M, "__msan_param_origin_tls",
781 ArrayType::get(OriginTy, kParamTLSSize / 4));
782
783 VAArgTLS =
784 getOrInsertGlobal(M, "__msan_va_arg_tls",
785 ArrayType::get(IRB.getInt64Ty(), kParamTLSSize / 8));
786
787 VAArgOriginTLS =
788 getOrInsertGlobal(M, "__msan_va_arg_origin_tls",
789 ArrayType::get(OriginTy, kParamTLSSize / 4));
790
791 VAArgOverflowSizeTLS =
792 getOrInsertGlobal(M, "__msan_va_arg_overflow_size_tls", IRB.getInt64Ty());
793 OriginTLS = getOrInsertGlobal(M, "__msan_origin_tls", IRB.getInt32Ty());
794
795 for (size_t AccessSizeIndex = 0; AccessSizeIndex < kNumberOfAccessSizes;
796 AccessSizeIndex++) {
797 unsigned AccessSize = 1 << AccessSizeIndex;
798 std::string FunctionName = "__msan_maybe_warning_" + itostr(AccessSize);
799 MaybeWarningFn[AccessSizeIndex] = M.getOrInsertFunction(
800 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
801 IRB.getInt32Ty());
802
803 FunctionName = "__msan_maybe_store_origin_" + itostr(AccessSize);
804 MaybeStoreOriginFn[AccessSizeIndex] = M.getOrInsertFunction(
805 FunctionName, IRB.getVoidTy(), IRB.getIntNTy(AccessSize * 8),
806 IRB.getInt8PtrTy(), IRB.getInt32Ty());
807 }
808
809 MsanSetAllocaOrigin4Fn = M.getOrInsertFunction(
810 "__msan_set_alloca_origin4", IRB.getVoidTy(), IRB.getInt8PtrTy(), IntptrTy,
811 IRB.getInt8PtrTy(), IntptrTy);
812 MsanPoisonStackFn =
813 M.getOrInsertFunction("__msan_poison_stack", IRB.getVoidTy(),
814 IRB.getInt8PtrTy(), IntptrTy);
815 }
816
817 /// Insert extern declaration of runtime-provided functions and globals.
initializeCallbacks(Module & M)818 void MemorySanitizer::initializeCallbacks(Module &M) {
819 // Only do this once.
820 if (CallbacksInitialized)
821 return;
822
823 IRBuilder<> IRB(*C);
824 // Initialize callbacks that are common for kernel and userspace
825 // instrumentation.
826 MsanChainOriginFn = M.getOrInsertFunction(
827 "__msan_chain_origin", IRB.getInt32Ty(), IRB.getInt32Ty());
828 MemmoveFn = M.getOrInsertFunction(
829 "__msan_memmove", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
830 IRB.getInt8PtrTy(), IntptrTy);
831 MemcpyFn = M.getOrInsertFunction(
832 "__msan_memcpy", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt8PtrTy(),
833 IntptrTy);
834 MemsetFn = M.getOrInsertFunction(
835 "__msan_memset", IRB.getInt8PtrTy(), IRB.getInt8PtrTy(), IRB.getInt32Ty(),
836 IntptrTy);
837 // We insert an empty inline asm after __msan_report* to avoid callback merge.
838 EmptyAsm = InlineAsm::get(FunctionType::get(IRB.getVoidTy(), false),
839 StringRef(""), StringRef(""),
840 /*hasSideEffects=*/true);
841
842 MsanInstrumentAsmStoreFn =
843 M.getOrInsertFunction("__msan_instrument_asm_store", IRB.getVoidTy(),
844 PointerType::get(IRB.getInt8Ty(), 0), IntptrTy);
845
846 if (CompileKernel) {
847 createKernelApi(M);
848 } else {
849 createUserspaceApi(M);
850 }
851 CallbacksInitialized = true;
852 }
853
getKmsanShadowOriginAccessFn(bool isStore,int size)854 FunctionCallee MemorySanitizer::getKmsanShadowOriginAccessFn(bool isStore,
855 int size) {
856 FunctionCallee *Fns =
857 isStore ? MsanMetadataPtrForStore_1_8 : MsanMetadataPtrForLoad_1_8;
858 switch (size) {
859 case 1:
860 return Fns[0];
861 case 2:
862 return Fns[1];
863 case 4:
864 return Fns[2];
865 case 8:
866 return Fns[3];
867 default:
868 return nullptr;
869 }
870 }
871
872 /// Module-level initialization.
873 ///
874 /// inserts a call to __msan_init to the module's constructor list.
initializeModule(Module & M)875 void MemorySanitizer::initializeModule(Module &M) {
876 auto &DL = M.getDataLayout();
877
878 bool ShadowPassed = ClShadowBase.getNumOccurrences() > 0;
879 bool OriginPassed = ClOriginBase.getNumOccurrences() > 0;
880 // Check the overrides first
881 if (ShadowPassed || OriginPassed) {
882 CustomMapParams.AndMask = ClAndMask;
883 CustomMapParams.XorMask = ClXorMask;
884 CustomMapParams.ShadowBase = ClShadowBase;
885 CustomMapParams.OriginBase = ClOriginBase;
886 MapParams = &CustomMapParams;
887 } else {
888 Triple TargetTriple(M.getTargetTriple());
889 switch (TargetTriple.getOS()) {
890 case Triple::FreeBSD:
891 switch (TargetTriple.getArch()) {
892 case Triple::x86_64:
893 MapParams = FreeBSD_X86_MemoryMapParams.bits64;
894 break;
895 case Triple::x86:
896 MapParams = FreeBSD_X86_MemoryMapParams.bits32;
897 break;
898 default:
899 report_fatal_error("unsupported architecture");
900 }
901 break;
902 case Triple::NetBSD:
903 switch (TargetTriple.getArch()) {
904 case Triple::x86_64:
905 MapParams = NetBSD_X86_MemoryMapParams.bits64;
906 break;
907 default:
908 report_fatal_error("unsupported architecture");
909 }
910 break;
911 case Triple::Linux:
912 switch (TargetTriple.getArch()) {
913 case Triple::x86_64:
914 MapParams = Linux_X86_MemoryMapParams.bits64;
915 break;
916 case Triple::x86:
917 MapParams = Linux_X86_MemoryMapParams.bits32;
918 break;
919 case Triple::mips64:
920 case Triple::mips64el:
921 MapParams = Linux_MIPS_MemoryMapParams.bits64;
922 break;
923 case Triple::ppc64:
924 case Triple::ppc64le:
925 MapParams = Linux_PowerPC_MemoryMapParams.bits64;
926 break;
927 case Triple::aarch64:
928 case Triple::aarch64_be:
929 MapParams = Linux_ARM_MemoryMapParams.bits64;
930 break;
931 default:
932 report_fatal_error("unsupported architecture");
933 }
934 break;
935 default:
936 report_fatal_error("unsupported operating system");
937 }
938 }
939
940 C = &(M.getContext());
941 IRBuilder<> IRB(*C);
942 IntptrTy = IRB.getIntPtrTy(DL);
943 OriginTy = IRB.getInt32Ty();
944
945 ColdCallWeights = MDBuilder(*C).createBranchWeights(1, 1000);
946 OriginStoreWeights = MDBuilder(*C).createBranchWeights(1, 1000);
947
948 if (!CompileKernel) {
949 if (TrackOrigins)
950 M.getOrInsertGlobal("__msan_track_origins", IRB.getInt32Ty(), [&] {
951 return new GlobalVariable(
952 M, IRB.getInt32Ty(), true, GlobalValue::WeakODRLinkage,
953 IRB.getInt32(TrackOrigins), "__msan_track_origins");
954 });
955
956 if (Recover)
957 M.getOrInsertGlobal("__msan_keep_going", IRB.getInt32Ty(), [&] {
958 return new GlobalVariable(M, IRB.getInt32Ty(), true,
959 GlobalValue::WeakODRLinkage,
960 IRB.getInt32(Recover), "__msan_keep_going");
961 });
962 }
963 }
964
doInitialization(Module & M)965 bool MemorySanitizerLegacyPass::doInitialization(Module &M) {
966 if (!Options.Kernel)
967 insertModuleCtor(M);
968 MSan.emplace(M, Options);
969 return true;
970 }
971
972 namespace {
973
974 /// A helper class that handles instrumentation of VarArg
975 /// functions on a particular platform.
976 ///
977 /// Implementations are expected to insert the instrumentation
978 /// necessary to propagate argument shadow through VarArg function
979 /// calls. Visit* methods are called during an InstVisitor pass over
980 /// the function, and should avoid creating new basic blocks. A new
981 /// instance of this class is created for each instrumented function.
982 struct VarArgHelper {
983 virtual ~VarArgHelper() = default;
984
985 /// Visit a CallSite.
986 virtual void visitCallSite(CallSite &CS, IRBuilder<> &IRB) = 0;
987
988 /// Visit a va_start call.
989 virtual void visitVAStartInst(VAStartInst &I) = 0;
990
991 /// Visit a va_copy call.
992 virtual void visitVACopyInst(VACopyInst &I) = 0;
993
994 /// Finalize function instrumentation.
995 ///
996 /// This method is called after visiting all interesting (see above)
997 /// instructions in a function.
998 virtual void finalizeInstrumentation() = 0;
999 };
1000
1001 struct MemorySanitizerVisitor;
1002
1003 } // end anonymous namespace
1004
1005 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
1006 MemorySanitizerVisitor &Visitor);
1007
TypeSizeToSizeIndex(unsigned TypeSize)1008 static unsigned TypeSizeToSizeIndex(unsigned TypeSize) {
1009 if (TypeSize <= 8) return 0;
1010 return Log2_32_Ceil((TypeSize + 7) / 8);
1011 }
1012
1013 namespace {
1014
1015 /// This class does all the work for a given function. Store and Load
1016 /// instructions store and load corresponding shadow and origin
1017 /// values. Most instructions propagate shadow from arguments to their
1018 /// return values. Certain instructions (most importantly, BranchInst)
1019 /// test their argument shadow and print reports (with a runtime call) if it's
1020 /// non-zero.
1021 struct MemorySanitizerVisitor : public InstVisitor<MemorySanitizerVisitor> {
1022 Function &F;
1023 MemorySanitizer &MS;
1024 SmallVector<PHINode *, 16> ShadowPHINodes, OriginPHINodes;
1025 ValueMap<Value*, Value*> ShadowMap, OriginMap;
1026 std::unique_ptr<VarArgHelper> VAHelper;
1027 const TargetLibraryInfo *TLI;
1028 BasicBlock *ActualFnStart;
1029
1030 // The following flags disable parts of MSan instrumentation based on
1031 // blacklist contents and command-line options.
1032 bool InsertChecks;
1033 bool PropagateShadow;
1034 bool PoisonStack;
1035 bool PoisonUndef;
1036 bool CheckReturnValue;
1037
1038 struct ShadowOriginAndInsertPoint {
1039 Value *Shadow;
1040 Value *Origin;
1041 Instruction *OrigIns;
1042
ShadowOriginAndInsertPoint__anonde83a36c0811::MemorySanitizerVisitor::ShadowOriginAndInsertPoint1043 ShadowOriginAndInsertPoint(Value *S, Value *O, Instruction *I)
1044 : Shadow(S), Origin(O), OrigIns(I) {}
1045 };
1046 SmallVector<ShadowOriginAndInsertPoint, 16> InstrumentationList;
1047 bool InstrumentLifetimeStart = ClHandleLifetimeIntrinsics;
1048 SmallSet<AllocaInst *, 16> AllocaSet;
1049 SmallVector<std::pair<IntrinsicInst *, AllocaInst *>, 16> LifetimeStartList;
1050 SmallVector<StoreInst *, 16> StoreList;
1051
MemorySanitizerVisitor__anonde83a36c0811::MemorySanitizerVisitor1052 MemorySanitizerVisitor(Function &F, MemorySanitizer &MS,
1053 const TargetLibraryInfo &TLI)
1054 : F(F), MS(MS), VAHelper(CreateVarArgHelper(F, MS, *this)), TLI(&TLI) {
1055 bool SanitizeFunction = F.hasFnAttribute(Attribute::SanitizeMemory);
1056 InsertChecks = SanitizeFunction;
1057 PropagateShadow = SanitizeFunction;
1058 PoisonStack = SanitizeFunction && ClPoisonStack;
1059 PoisonUndef = SanitizeFunction && ClPoisonUndef;
1060 // FIXME: Consider using SpecialCaseList to specify a list of functions that
1061 // must always return fully initialized values. For now, we hardcode "main".
1062 CheckReturnValue = SanitizeFunction && (F.getName() == "main");
1063
1064 MS.initializeCallbacks(*F.getParent());
1065 if (MS.CompileKernel)
1066 ActualFnStart = insertKmsanPrologue(F);
1067 else
1068 ActualFnStart = &F.getEntryBlock();
1069
1070 LLVM_DEBUG(if (!InsertChecks) dbgs()
1071 << "MemorySanitizer is not inserting checks into '"
1072 << F.getName() << "'\n");
1073 }
1074
updateOrigin__anonde83a36c0811::MemorySanitizerVisitor1075 Value *updateOrigin(Value *V, IRBuilder<> &IRB) {
1076 if (MS.TrackOrigins <= 1) return V;
1077 return IRB.CreateCall(MS.MsanChainOriginFn, V);
1078 }
1079
originToIntptr__anonde83a36c0811::MemorySanitizerVisitor1080 Value *originToIntptr(IRBuilder<> &IRB, Value *Origin) {
1081 const DataLayout &DL = F.getParent()->getDataLayout();
1082 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1083 if (IntptrSize == kOriginSize) return Origin;
1084 assert(IntptrSize == kOriginSize * 2);
1085 Origin = IRB.CreateIntCast(Origin, MS.IntptrTy, /* isSigned */ false);
1086 return IRB.CreateOr(Origin, IRB.CreateShl(Origin, kOriginSize * 8));
1087 }
1088
1089 /// Fill memory range with the given origin value.
paintOrigin__anonde83a36c0811::MemorySanitizerVisitor1090 void paintOrigin(IRBuilder<> &IRB, Value *Origin, Value *OriginPtr,
1091 unsigned Size, Align Alignment) {
1092 const DataLayout &DL = F.getParent()->getDataLayout();
1093 const Align IntptrAlignment = Align(DL.getABITypeAlignment(MS.IntptrTy));
1094 unsigned IntptrSize = DL.getTypeStoreSize(MS.IntptrTy);
1095 assert(IntptrAlignment >= kMinOriginAlignment);
1096 assert(IntptrSize >= kOriginSize);
1097
1098 unsigned Ofs = 0;
1099 Align CurrentAlignment = Alignment;
1100 if (Alignment >= IntptrAlignment && IntptrSize > kOriginSize) {
1101 Value *IntptrOrigin = originToIntptr(IRB, Origin);
1102 Value *IntptrOriginPtr =
1103 IRB.CreatePointerCast(OriginPtr, PointerType::get(MS.IntptrTy, 0));
1104 for (unsigned i = 0; i < Size / IntptrSize; ++i) {
1105 Value *Ptr = i ? IRB.CreateConstGEP1_32(MS.IntptrTy, IntptrOriginPtr, i)
1106 : IntptrOriginPtr;
1107 IRB.CreateAlignedStore(IntptrOrigin, Ptr, CurrentAlignment.value());
1108 Ofs += IntptrSize / kOriginSize;
1109 CurrentAlignment = IntptrAlignment;
1110 }
1111 }
1112
1113 for (unsigned i = Ofs; i < (Size + kOriginSize - 1) / kOriginSize; ++i) {
1114 Value *GEP =
1115 i ? IRB.CreateConstGEP1_32(MS.OriginTy, OriginPtr, i) : OriginPtr;
1116 IRB.CreateAlignedStore(Origin, GEP, CurrentAlignment.value());
1117 CurrentAlignment = kMinOriginAlignment;
1118 }
1119 }
1120
storeOrigin__anonde83a36c0811::MemorySanitizerVisitor1121 void storeOrigin(IRBuilder<> &IRB, Value *Addr, Value *Shadow, Value *Origin,
1122 Value *OriginPtr, Align Alignment, bool AsCall) {
1123 const DataLayout &DL = F.getParent()->getDataLayout();
1124 const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1125 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
1126 if (Shadow->getType()->isAggregateType()) {
1127 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1128 OriginAlignment);
1129 } else {
1130 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1131 if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1132 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue())
1133 paintOrigin(IRB, updateOrigin(Origin, IRB), OriginPtr, StoreSize,
1134 OriginAlignment);
1135 return;
1136 }
1137
1138 unsigned TypeSizeInBits =
1139 DL.getTypeSizeInBits(ConvertedShadow->getType());
1140 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1141 if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1142 FunctionCallee Fn = MS.MaybeStoreOriginFn[SizeIndex];
1143 Value *ConvertedShadow2 = IRB.CreateZExt(
1144 ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1145 IRB.CreateCall(Fn, {ConvertedShadow2,
1146 IRB.CreatePointerCast(Addr, IRB.getInt8PtrTy()),
1147 Origin});
1148 } else {
1149 Value *Cmp = IRB.CreateICmpNE(
1150 ConvertedShadow, getCleanShadow(ConvertedShadow), "_mscmp");
1151 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1152 Cmp, &*IRB.GetInsertPoint(), false, MS.OriginStoreWeights);
1153 IRBuilder<> IRBNew(CheckTerm);
1154 paintOrigin(IRBNew, updateOrigin(Origin, IRBNew), OriginPtr, StoreSize,
1155 OriginAlignment);
1156 }
1157 }
1158 }
1159
materializeStores__anonde83a36c0811::MemorySanitizerVisitor1160 void materializeStores(bool InstrumentWithCalls) {
1161 for (StoreInst *SI : StoreList) {
1162 IRBuilder<> IRB(SI);
1163 Value *Val = SI->getValueOperand();
1164 Value *Addr = SI->getPointerOperand();
1165 Value *Shadow = SI->isAtomic() ? getCleanShadow(Val) : getShadow(Val);
1166 Value *ShadowPtr, *OriginPtr;
1167 Type *ShadowTy = Shadow->getType();
1168 const Align Alignment = assumeAligned(SI->getAlignment());
1169 const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1170 std::tie(ShadowPtr, OriginPtr) =
1171 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ true);
1172
1173 StoreInst *NewSI =
1174 IRB.CreateAlignedStore(Shadow, ShadowPtr, Alignment.value());
1175 LLVM_DEBUG(dbgs() << " STORE: " << *NewSI << "\n");
1176 (void)NewSI;
1177
1178 if (SI->isAtomic())
1179 SI->setOrdering(addReleaseOrdering(SI->getOrdering()));
1180
1181 if (MS.TrackOrigins && !SI->isAtomic())
1182 storeOrigin(IRB, Addr, Shadow, getOrigin(Val), OriginPtr,
1183 OriginAlignment, InstrumentWithCalls);
1184 }
1185 }
1186
1187 /// Helper function to insert a warning at IRB's current insert point.
insertWarningFn__anonde83a36c0811::MemorySanitizerVisitor1188 void insertWarningFn(IRBuilder<> &IRB, Value *Origin) {
1189 if (!Origin)
1190 Origin = (Value *)IRB.getInt32(0);
1191 if (MS.CompileKernel) {
1192 IRB.CreateCall(MS.WarningFn, Origin);
1193 } else {
1194 if (MS.TrackOrigins) {
1195 IRB.CreateStore(Origin, MS.OriginTLS);
1196 }
1197 IRB.CreateCall(MS.WarningFn, {});
1198 }
1199 IRB.CreateCall(MS.EmptyAsm, {});
1200 // FIXME: Insert UnreachableInst if !MS.Recover?
1201 // This may invalidate some of the following checks and needs to be done
1202 // at the very end.
1203 }
1204
materializeOneCheck__anonde83a36c0811::MemorySanitizerVisitor1205 void materializeOneCheck(Instruction *OrigIns, Value *Shadow, Value *Origin,
1206 bool AsCall) {
1207 IRBuilder<> IRB(OrigIns);
1208 LLVM_DEBUG(dbgs() << " SHAD0 : " << *Shadow << "\n");
1209 Value *ConvertedShadow = convertToShadowTyNoVec(Shadow, IRB);
1210 LLVM_DEBUG(dbgs() << " SHAD1 : " << *ConvertedShadow << "\n");
1211
1212 if (auto *ConstantShadow = dyn_cast<Constant>(ConvertedShadow)) {
1213 if (ClCheckConstantShadow && !ConstantShadow->isZeroValue()) {
1214 insertWarningFn(IRB, Origin);
1215 }
1216 return;
1217 }
1218
1219 const DataLayout &DL = OrigIns->getModule()->getDataLayout();
1220
1221 unsigned TypeSizeInBits = DL.getTypeSizeInBits(ConvertedShadow->getType());
1222 unsigned SizeIndex = TypeSizeToSizeIndex(TypeSizeInBits);
1223 if (AsCall && SizeIndex < kNumberOfAccessSizes && !MS.CompileKernel) {
1224 FunctionCallee Fn = MS.MaybeWarningFn[SizeIndex];
1225 Value *ConvertedShadow2 =
1226 IRB.CreateZExt(ConvertedShadow, IRB.getIntNTy(8 * (1 << SizeIndex)));
1227 IRB.CreateCall(Fn, {ConvertedShadow2, MS.TrackOrigins && Origin
1228 ? Origin
1229 : (Value *)IRB.getInt32(0)});
1230 } else {
1231 Value *Cmp = IRB.CreateICmpNE(ConvertedShadow,
1232 getCleanShadow(ConvertedShadow), "_mscmp");
1233 Instruction *CheckTerm = SplitBlockAndInsertIfThen(
1234 Cmp, OrigIns,
1235 /* Unreachable */ !MS.Recover, MS.ColdCallWeights);
1236
1237 IRB.SetInsertPoint(CheckTerm);
1238 insertWarningFn(IRB, Origin);
1239 LLVM_DEBUG(dbgs() << " CHECK: " << *Cmp << "\n");
1240 }
1241 }
1242
materializeChecks__anonde83a36c0811::MemorySanitizerVisitor1243 void materializeChecks(bool InstrumentWithCalls) {
1244 for (const auto &ShadowData : InstrumentationList) {
1245 Instruction *OrigIns = ShadowData.OrigIns;
1246 Value *Shadow = ShadowData.Shadow;
1247 Value *Origin = ShadowData.Origin;
1248 materializeOneCheck(OrigIns, Shadow, Origin, InstrumentWithCalls);
1249 }
1250 LLVM_DEBUG(dbgs() << "DONE:\n" << F);
1251 }
1252
insertKmsanPrologue__anonde83a36c0811::MemorySanitizerVisitor1253 BasicBlock *insertKmsanPrologue(Function &F) {
1254 BasicBlock *ret =
1255 SplitBlock(&F.getEntryBlock(), F.getEntryBlock().getFirstNonPHI());
1256 IRBuilder<> IRB(F.getEntryBlock().getFirstNonPHI());
1257 Value *ContextState = IRB.CreateCall(MS.MsanGetContextStateFn, {});
1258 Constant *Zero = IRB.getInt32(0);
1259 MS.ParamTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1260 {Zero, IRB.getInt32(0)}, "param_shadow");
1261 MS.RetvalTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1262 {Zero, IRB.getInt32(1)}, "retval_shadow");
1263 MS.VAArgTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1264 {Zero, IRB.getInt32(2)}, "va_arg_shadow");
1265 MS.VAArgOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1266 {Zero, IRB.getInt32(3)}, "va_arg_origin");
1267 MS.VAArgOverflowSizeTLS =
1268 IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1269 {Zero, IRB.getInt32(4)}, "va_arg_overflow_size");
1270 MS.ParamOriginTLS = IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1271 {Zero, IRB.getInt32(5)}, "param_origin");
1272 MS.RetvalOriginTLS =
1273 IRB.CreateGEP(MS.MsanContextStateTy, ContextState,
1274 {Zero, IRB.getInt32(6)}, "retval_origin");
1275 return ret;
1276 }
1277
1278 /// Add MemorySanitizer instrumentation to a function.
runOnFunction__anonde83a36c0811::MemorySanitizerVisitor1279 bool runOnFunction() {
1280 // In the presence of unreachable blocks, we may see Phi nodes with
1281 // incoming nodes from such blocks. Since InstVisitor skips unreachable
1282 // blocks, such nodes will not have any shadow value associated with them.
1283 // It's easier to remove unreachable blocks than deal with missing shadow.
1284 removeUnreachableBlocks(F);
1285
1286 // Iterate all BBs in depth-first order and create shadow instructions
1287 // for all instructions (where applicable).
1288 // For PHI nodes we create dummy shadow PHIs which will be finalized later.
1289 for (BasicBlock *BB : depth_first(ActualFnStart))
1290 visit(*BB);
1291
1292 // Finalize PHI nodes.
1293 for (PHINode *PN : ShadowPHINodes) {
1294 PHINode *PNS = cast<PHINode>(getShadow(PN));
1295 PHINode *PNO = MS.TrackOrigins ? cast<PHINode>(getOrigin(PN)) : nullptr;
1296 size_t NumValues = PN->getNumIncomingValues();
1297 for (size_t v = 0; v < NumValues; v++) {
1298 PNS->addIncoming(getShadow(PN, v), PN->getIncomingBlock(v));
1299 if (PNO) PNO->addIncoming(getOrigin(PN, v), PN->getIncomingBlock(v));
1300 }
1301 }
1302
1303 VAHelper->finalizeInstrumentation();
1304
1305 // Poison llvm.lifetime.start intrinsics, if we haven't fallen back to
1306 // instrumenting only allocas.
1307 if (InstrumentLifetimeStart) {
1308 for (auto Item : LifetimeStartList) {
1309 instrumentAlloca(*Item.second, Item.first);
1310 AllocaSet.erase(Item.second);
1311 }
1312 }
1313 // Poison the allocas for which we didn't instrument the corresponding
1314 // lifetime intrinsics.
1315 for (AllocaInst *AI : AllocaSet)
1316 instrumentAlloca(*AI);
1317
1318 bool InstrumentWithCalls = ClInstrumentationWithCallThreshold >= 0 &&
1319 InstrumentationList.size() + StoreList.size() >
1320 (unsigned)ClInstrumentationWithCallThreshold;
1321
1322 // Insert shadow value checks.
1323 materializeChecks(InstrumentWithCalls);
1324
1325 // Delayed instrumentation of StoreInst.
1326 // This may not add new address checks.
1327 materializeStores(InstrumentWithCalls);
1328
1329 return true;
1330 }
1331
1332 /// Compute the shadow type that corresponds to a given Value.
getShadowTy__anonde83a36c0811::MemorySanitizerVisitor1333 Type *getShadowTy(Value *V) {
1334 return getShadowTy(V->getType());
1335 }
1336
1337 /// Compute the shadow type that corresponds to a given Type.
getShadowTy__anonde83a36c0811::MemorySanitizerVisitor1338 Type *getShadowTy(Type *OrigTy) {
1339 if (!OrigTy->isSized()) {
1340 return nullptr;
1341 }
1342 // For integer type, shadow is the same as the original type.
1343 // This may return weird-sized types like i1.
1344 if (IntegerType *IT = dyn_cast<IntegerType>(OrigTy))
1345 return IT;
1346 const DataLayout &DL = F.getParent()->getDataLayout();
1347 if (VectorType *VT = dyn_cast<VectorType>(OrigTy)) {
1348 uint32_t EltSize = DL.getTypeSizeInBits(VT->getElementType());
1349 return VectorType::get(IntegerType::get(*MS.C, EltSize),
1350 VT->getNumElements());
1351 }
1352 if (ArrayType *AT = dyn_cast<ArrayType>(OrigTy)) {
1353 return ArrayType::get(getShadowTy(AT->getElementType()),
1354 AT->getNumElements());
1355 }
1356 if (StructType *ST = dyn_cast<StructType>(OrigTy)) {
1357 SmallVector<Type*, 4> Elements;
1358 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1359 Elements.push_back(getShadowTy(ST->getElementType(i)));
1360 StructType *Res = StructType::get(*MS.C, Elements, ST->isPacked());
1361 LLVM_DEBUG(dbgs() << "getShadowTy: " << *ST << " ===> " << *Res << "\n");
1362 return Res;
1363 }
1364 uint32_t TypeSize = DL.getTypeSizeInBits(OrigTy);
1365 return IntegerType::get(*MS.C, TypeSize);
1366 }
1367
1368 /// Flatten a vector type.
getShadowTyNoVec__anonde83a36c0811::MemorySanitizerVisitor1369 Type *getShadowTyNoVec(Type *ty) {
1370 if (VectorType *vt = dyn_cast<VectorType>(ty))
1371 return IntegerType::get(*MS.C, vt->getBitWidth());
1372 return ty;
1373 }
1374
1375 /// Convert a shadow value to it's flattened variant.
convertToShadowTyNoVec__anonde83a36c0811::MemorySanitizerVisitor1376 Value *convertToShadowTyNoVec(Value *V, IRBuilder<> &IRB) {
1377 Type *Ty = V->getType();
1378 Type *NoVecTy = getShadowTyNoVec(Ty);
1379 if (Ty == NoVecTy) return V;
1380 return IRB.CreateBitCast(V, NoVecTy);
1381 }
1382
1383 /// Compute the integer shadow offset that corresponds to a given
1384 /// application address.
1385 ///
1386 /// Offset = (Addr & ~AndMask) ^ XorMask
getShadowPtrOffset__anonde83a36c0811::MemorySanitizerVisitor1387 Value *getShadowPtrOffset(Value *Addr, IRBuilder<> &IRB) {
1388 Value *OffsetLong = IRB.CreatePointerCast(Addr, MS.IntptrTy);
1389
1390 uint64_t AndMask = MS.MapParams->AndMask;
1391 if (AndMask)
1392 OffsetLong =
1393 IRB.CreateAnd(OffsetLong, ConstantInt::get(MS.IntptrTy, ~AndMask));
1394
1395 uint64_t XorMask = MS.MapParams->XorMask;
1396 if (XorMask)
1397 OffsetLong =
1398 IRB.CreateXor(OffsetLong, ConstantInt::get(MS.IntptrTy, XorMask));
1399 return OffsetLong;
1400 }
1401
1402 /// Compute the shadow and origin addresses corresponding to a given
1403 /// application address.
1404 ///
1405 /// Shadow = ShadowBase + Offset
1406 /// Origin = (OriginBase + Offset) & ~3ULL
1407 std::pair<Value *, Value *>
getShadowOriginPtrUserspace__anonde83a36c0811::MemorySanitizerVisitor1408 getShadowOriginPtrUserspace(Value *Addr, IRBuilder<> &IRB, Type *ShadowTy,
1409 MaybeAlign Alignment) {
1410 Value *ShadowOffset = getShadowPtrOffset(Addr, IRB);
1411 Value *ShadowLong = ShadowOffset;
1412 uint64_t ShadowBase = MS.MapParams->ShadowBase;
1413 if (ShadowBase != 0) {
1414 ShadowLong =
1415 IRB.CreateAdd(ShadowLong,
1416 ConstantInt::get(MS.IntptrTy, ShadowBase));
1417 }
1418 Value *ShadowPtr =
1419 IRB.CreateIntToPtr(ShadowLong, PointerType::get(ShadowTy, 0));
1420 Value *OriginPtr = nullptr;
1421 if (MS.TrackOrigins) {
1422 Value *OriginLong = ShadowOffset;
1423 uint64_t OriginBase = MS.MapParams->OriginBase;
1424 if (OriginBase != 0)
1425 OriginLong = IRB.CreateAdd(OriginLong,
1426 ConstantInt::get(MS.IntptrTy, OriginBase));
1427 if (!Alignment || *Alignment < kMinOriginAlignment) {
1428 uint64_t Mask = kMinOriginAlignment.value() - 1;
1429 OriginLong =
1430 IRB.CreateAnd(OriginLong, ConstantInt::get(MS.IntptrTy, ~Mask));
1431 }
1432 OriginPtr =
1433 IRB.CreateIntToPtr(OriginLong, PointerType::get(MS.OriginTy, 0));
1434 }
1435 return std::make_pair(ShadowPtr, OriginPtr);
1436 }
1437
getShadowOriginPtrKernel__anonde83a36c0811::MemorySanitizerVisitor1438 std::pair<Value *, Value *> getShadowOriginPtrKernel(Value *Addr,
1439 IRBuilder<> &IRB,
1440 Type *ShadowTy,
1441 bool isStore) {
1442 Value *ShadowOriginPtrs;
1443 const DataLayout &DL = F.getParent()->getDataLayout();
1444 int Size = DL.getTypeStoreSize(ShadowTy);
1445
1446 FunctionCallee Getter = MS.getKmsanShadowOriginAccessFn(isStore, Size);
1447 Value *AddrCast =
1448 IRB.CreatePointerCast(Addr, PointerType::get(IRB.getInt8Ty(), 0));
1449 if (Getter) {
1450 ShadowOriginPtrs = IRB.CreateCall(Getter, AddrCast);
1451 } else {
1452 Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
1453 ShadowOriginPtrs = IRB.CreateCall(isStore ? MS.MsanMetadataPtrForStoreN
1454 : MS.MsanMetadataPtrForLoadN,
1455 {AddrCast, SizeVal});
1456 }
1457 Value *ShadowPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 0);
1458 ShadowPtr = IRB.CreatePointerCast(ShadowPtr, PointerType::get(ShadowTy, 0));
1459 Value *OriginPtr = IRB.CreateExtractValue(ShadowOriginPtrs, 1);
1460
1461 return std::make_pair(ShadowPtr, OriginPtr);
1462 }
1463
getShadowOriginPtr__anonde83a36c0811::MemorySanitizerVisitor1464 std::pair<Value *, Value *> getShadowOriginPtr(Value *Addr, IRBuilder<> &IRB,
1465 Type *ShadowTy,
1466 MaybeAlign Alignment,
1467 bool isStore) {
1468 if (MS.CompileKernel)
1469 return getShadowOriginPtrKernel(Addr, IRB, ShadowTy, isStore);
1470 return getShadowOriginPtrUserspace(Addr, IRB, ShadowTy, Alignment);
1471 }
1472
1473 /// Compute the shadow address for a given function argument.
1474 ///
1475 /// Shadow = ParamTLS+ArgOffset.
getShadowPtrForArgument__anonde83a36c0811::MemorySanitizerVisitor1476 Value *getShadowPtrForArgument(Value *A, IRBuilder<> &IRB,
1477 int ArgOffset) {
1478 Value *Base = IRB.CreatePointerCast(MS.ParamTLS, MS.IntptrTy);
1479 if (ArgOffset)
1480 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1481 return IRB.CreateIntToPtr(Base, PointerType::get(getShadowTy(A), 0),
1482 "_msarg");
1483 }
1484
1485 /// Compute the origin address for a given function argument.
getOriginPtrForArgument__anonde83a36c0811::MemorySanitizerVisitor1486 Value *getOriginPtrForArgument(Value *A, IRBuilder<> &IRB,
1487 int ArgOffset) {
1488 if (!MS.TrackOrigins)
1489 return nullptr;
1490 Value *Base = IRB.CreatePointerCast(MS.ParamOriginTLS, MS.IntptrTy);
1491 if (ArgOffset)
1492 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
1493 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
1494 "_msarg_o");
1495 }
1496
1497 /// Compute the shadow address for a retval.
getShadowPtrForRetval__anonde83a36c0811::MemorySanitizerVisitor1498 Value *getShadowPtrForRetval(Value *A, IRBuilder<> &IRB) {
1499 return IRB.CreatePointerCast(MS.RetvalTLS,
1500 PointerType::get(getShadowTy(A), 0),
1501 "_msret");
1502 }
1503
1504 /// Compute the origin address for a retval.
getOriginPtrForRetval__anonde83a36c0811::MemorySanitizerVisitor1505 Value *getOriginPtrForRetval(IRBuilder<> &IRB) {
1506 // We keep a single origin for the entire retval. Might be too optimistic.
1507 return MS.RetvalOriginTLS;
1508 }
1509
1510 /// Set SV to be the shadow value for V.
setShadow__anonde83a36c0811::MemorySanitizerVisitor1511 void setShadow(Value *V, Value *SV) {
1512 assert(!ShadowMap.count(V) && "Values may only have one shadow");
1513 ShadowMap[V] = PropagateShadow ? SV : getCleanShadow(V);
1514 }
1515
1516 /// Set Origin to be the origin value for V.
setOrigin__anonde83a36c0811::MemorySanitizerVisitor1517 void setOrigin(Value *V, Value *Origin) {
1518 if (!MS.TrackOrigins) return;
1519 assert(!OriginMap.count(V) && "Values may only have one origin");
1520 LLVM_DEBUG(dbgs() << "ORIGIN: " << *V << " ==> " << *Origin << "\n");
1521 OriginMap[V] = Origin;
1522 }
1523
getCleanShadow__anonde83a36c0811::MemorySanitizerVisitor1524 Constant *getCleanShadow(Type *OrigTy) {
1525 Type *ShadowTy = getShadowTy(OrigTy);
1526 if (!ShadowTy)
1527 return nullptr;
1528 return Constant::getNullValue(ShadowTy);
1529 }
1530
1531 /// Create a clean shadow value for a given value.
1532 ///
1533 /// Clean shadow (all zeroes) means all bits of the value are defined
1534 /// (initialized).
getCleanShadow__anonde83a36c0811::MemorySanitizerVisitor1535 Constant *getCleanShadow(Value *V) {
1536 return getCleanShadow(V->getType());
1537 }
1538
1539 /// Create a dirty shadow of a given shadow type.
getPoisonedShadow__anonde83a36c0811::MemorySanitizerVisitor1540 Constant *getPoisonedShadow(Type *ShadowTy) {
1541 assert(ShadowTy);
1542 if (isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy))
1543 return Constant::getAllOnesValue(ShadowTy);
1544 if (ArrayType *AT = dyn_cast<ArrayType>(ShadowTy)) {
1545 SmallVector<Constant *, 4> Vals(AT->getNumElements(),
1546 getPoisonedShadow(AT->getElementType()));
1547 return ConstantArray::get(AT, Vals);
1548 }
1549 if (StructType *ST = dyn_cast<StructType>(ShadowTy)) {
1550 SmallVector<Constant *, 4> Vals;
1551 for (unsigned i = 0, n = ST->getNumElements(); i < n; i++)
1552 Vals.push_back(getPoisonedShadow(ST->getElementType(i)));
1553 return ConstantStruct::get(ST, Vals);
1554 }
1555 llvm_unreachable("Unexpected shadow type");
1556 }
1557
1558 /// Create a dirty shadow for a given value.
getPoisonedShadow__anonde83a36c0811::MemorySanitizerVisitor1559 Constant *getPoisonedShadow(Value *V) {
1560 Type *ShadowTy = getShadowTy(V);
1561 if (!ShadowTy)
1562 return nullptr;
1563 return getPoisonedShadow(ShadowTy);
1564 }
1565
1566 /// Create a clean (zero) origin.
getCleanOrigin__anonde83a36c0811::MemorySanitizerVisitor1567 Value *getCleanOrigin() {
1568 return Constant::getNullValue(MS.OriginTy);
1569 }
1570
1571 /// Get the shadow value for a given Value.
1572 ///
1573 /// This function either returns the value set earlier with setShadow,
1574 /// or extracts if from ParamTLS (for function arguments).
getShadow__anonde83a36c0811::MemorySanitizerVisitor1575 Value *getShadow(Value *V) {
1576 if (!PropagateShadow) return getCleanShadow(V);
1577 if (Instruction *I = dyn_cast<Instruction>(V)) {
1578 if (I->getMetadata("nosanitize"))
1579 return getCleanShadow(V);
1580 // For instructions the shadow is already stored in the map.
1581 Value *Shadow = ShadowMap[V];
1582 if (!Shadow) {
1583 LLVM_DEBUG(dbgs() << "No shadow: " << *V << "\n" << *(I->getParent()));
1584 (void)I;
1585 assert(Shadow && "No shadow for a value");
1586 }
1587 return Shadow;
1588 }
1589 if (UndefValue *U = dyn_cast<UndefValue>(V)) {
1590 Value *AllOnes = PoisonUndef ? getPoisonedShadow(V) : getCleanShadow(V);
1591 LLVM_DEBUG(dbgs() << "Undef: " << *U << " ==> " << *AllOnes << "\n");
1592 (void)U;
1593 return AllOnes;
1594 }
1595 if (Argument *A = dyn_cast<Argument>(V)) {
1596 // For arguments we compute the shadow on demand and store it in the map.
1597 Value **ShadowPtr = &ShadowMap[V];
1598 if (*ShadowPtr)
1599 return *ShadowPtr;
1600 Function *F = A->getParent();
1601 IRBuilder<> EntryIRB(ActualFnStart->getFirstNonPHI());
1602 unsigned ArgOffset = 0;
1603 const DataLayout &DL = F->getParent()->getDataLayout();
1604 for (auto &FArg : F->args()) {
1605 if (!FArg.getType()->isSized()) {
1606 LLVM_DEBUG(dbgs() << "Arg is not sized\n");
1607 continue;
1608 }
1609 unsigned Size =
1610 FArg.hasByValAttr()
1611 ? DL.getTypeAllocSize(FArg.getType()->getPointerElementType())
1612 : DL.getTypeAllocSize(FArg.getType());
1613 if (A == &FArg) {
1614 bool Overflow = ArgOffset + Size > kParamTLSSize;
1615 Value *Base = getShadowPtrForArgument(&FArg, EntryIRB, ArgOffset);
1616 if (FArg.hasByValAttr()) {
1617 // ByVal pointer itself has clean shadow. We copy the actual
1618 // argument shadow to the underlying memory.
1619 // Figure out maximal valid memcpy alignment.
1620 const Align ArgAlign = DL.getValueOrABITypeAlignment(
1621 MaybeAlign(FArg.getParamAlignment()),
1622 A->getType()->getPointerElementType());
1623 Value *CpShadowPtr =
1624 getShadowOriginPtr(V, EntryIRB, EntryIRB.getInt8Ty(), ArgAlign,
1625 /*isStore*/ true)
1626 .first;
1627 // TODO(glider): need to copy origins.
1628 if (Overflow) {
1629 // ParamTLS overflow.
1630 EntryIRB.CreateMemSet(
1631 CpShadowPtr, Constant::getNullValue(EntryIRB.getInt8Ty()),
1632 Size, ArgAlign);
1633 } else {
1634 const Align CopyAlign = std::min(ArgAlign, kShadowTLSAlignment);
1635 Value *Cpy = EntryIRB.CreateMemCpy(CpShadowPtr, CopyAlign, Base,
1636 CopyAlign, Size);
1637 LLVM_DEBUG(dbgs() << " ByValCpy: " << *Cpy << "\n");
1638 (void)Cpy;
1639 }
1640 *ShadowPtr = getCleanShadow(V);
1641 } else {
1642 if (Overflow) {
1643 // ParamTLS overflow.
1644 *ShadowPtr = getCleanShadow(V);
1645 } else {
1646 *ShadowPtr = EntryIRB.CreateAlignedLoad(
1647 getShadowTy(&FArg), Base, kShadowTLSAlignment.value());
1648 }
1649 }
1650 LLVM_DEBUG(dbgs()
1651 << " ARG: " << FArg << " ==> " << **ShadowPtr << "\n");
1652 if (MS.TrackOrigins && !Overflow) {
1653 Value *OriginPtr =
1654 getOriginPtrForArgument(&FArg, EntryIRB, ArgOffset);
1655 setOrigin(A, EntryIRB.CreateLoad(MS.OriginTy, OriginPtr));
1656 } else {
1657 setOrigin(A, getCleanOrigin());
1658 }
1659 }
1660 ArgOffset += alignTo(Size, kShadowTLSAlignment);
1661 }
1662 assert(*ShadowPtr && "Could not find shadow for an argument");
1663 return *ShadowPtr;
1664 }
1665 // For everything else the shadow is zero.
1666 return getCleanShadow(V);
1667 }
1668
1669 /// Get the shadow for i-th argument of the instruction I.
getShadow__anonde83a36c0811::MemorySanitizerVisitor1670 Value *getShadow(Instruction *I, int i) {
1671 return getShadow(I->getOperand(i));
1672 }
1673
1674 /// Get the origin for a value.
getOrigin__anonde83a36c0811::MemorySanitizerVisitor1675 Value *getOrigin(Value *V) {
1676 if (!MS.TrackOrigins) return nullptr;
1677 if (!PropagateShadow) return getCleanOrigin();
1678 if (isa<Constant>(V)) return getCleanOrigin();
1679 assert((isa<Instruction>(V) || isa<Argument>(V)) &&
1680 "Unexpected value type in getOrigin()");
1681 if (Instruction *I = dyn_cast<Instruction>(V)) {
1682 if (I->getMetadata("nosanitize"))
1683 return getCleanOrigin();
1684 }
1685 Value *Origin = OriginMap[V];
1686 assert(Origin && "Missing origin");
1687 return Origin;
1688 }
1689
1690 /// Get the origin for i-th argument of the instruction I.
getOrigin__anonde83a36c0811::MemorySanitizerVisitor1691 Value *getOrigin(Instruction *I, int i) {
1692 return getOrigin(I->getOperand(i));
1693 }
1694
1695 /// Remember the place where a shadow check should be inserted.
1696 ///
1697 /// This location will be later instrumented with a check that will print a
1698 /// UMR warning in runtime if the shadow value is not 0.
insertShadowCheck__anonde83a36c0811::MemorySanitizerVisitor1699 void insertShadowCheck(Value *Shadow, Value *Origin, Instruction *OrigIns) {
1700 assert(Shadow);
1701 if (!InsertChecks) return;
1702 #ifndef NDEBUG
1703 Type *ShadowTy = Shadow->getType();
1704 assert((isa<IntegerType>(ShadowTy) || isa<VectorType>(ShadowTy)) &&
1705 "Can only insert checks for integer and vector shadow types");
1706 #endif
1707 InstrumentationList.push_back(
1708 ShadowOriginAndInsertPoint(Shadow, Origin, OrigIns));
1709 }
1710
1711 /// Remember the place where a shadow check should be inserted.
1712 ///
1713 /// This location will be later instrumented with a check that will print a
1714 /// UMR warning in runtime if the value is not fully defined.
insertShadowCheck__anonde83a36c0811::MemorySanitizerVisitor1715 void insertShadowCheck(Value *Val, Instruction *OrigIns) {
1716 assert(Val);
1717 Value *Shadow, *Origin;
1718 if (ClCheckConstantShadow) {
1719 Shadow = getShadow(Val);
1720 if (!Shadow) return;
1721 Origin = getOrigin(Val);
1722 } else {
1723 Shadow = dyn_cast_or_null<Instruction>(getShadow(Val));
1724 if (!Shadow) return;
1725 Origin = dyn_cast_or_null<Instruction>(getOrigin(Val));
1726 }
1727 insertShadowCheck(Shadow, Origin, OrigIns);
1728 }
1729
addReleaseOrdering__anonde83a36c0811::MemorySanitizerVisitor1730 AtomicOrdering addReleaseOrdering(AtomicOrdering a) {
1731 switch (a) {
1732 case AtomicOrdering::NotAtomic:
1733 return AtomicOrdering::NotAtomic;
1734 case AtomicOrdering::Unordered:
1735 case AtomicOrdering::Monotonic:
1736 case AtomicOrdering::Release:
1737 return AtomicOrdering::Release;
1738 case AtomicOrdering::Acquire:
1739 case AtomicOrdering::AcquireRelease:
1740 return AtomicOrdering::AcquireRelease;
1741 case AtomicOrdering::SequentiallyConsistent:
1742 return AtomicOrdering::SequentiallyConsistent;
1743 }
1744 llvm_unreachable("Unknown ordering");
1745 }
1746
addAcquireOrdering__anonde83a36c0811::MemorySanitizerVisitor1747 AtomicOrdering addAcquireOrdering(AtomicOrdering a) {
1748 switch (a) {
1749 case AtomicOrdering::NotAtomic:
1750 return AtomicOrdering::NotAtomic;
1751 case AtomicOrdering::Unordered:
1752 case AtomicOrdering::Monotonic:
1753 case AtomicOrdering::Acquire:
1754 return AtomicOrdering::Acquire;
1755 case AtomicOrdering::Release:
1756 case AtomicOrdering::AcquireRelease:
1757 return AtomicOrdering::AcquireRelease;
1758 case AtomicOrdering::SequentiallyConsistent:
1759 return AtomicOrdering::SequentiallyConsistent;
1760 }
1761 llvm_unreachable("Unknown ordering");
1762 }
1763
1764 // ------------------- Visitors.
1765 using InstVisitor<MemorySanitizerVisitor>::visit;
visit__anonde83a36c0811::MemorySanitizerVisitor1766 void visit(Instruction &I) {
1767 if (!I.getMetadata("nosanitize"))
1768 InstVisitor<MemorySanitizerVisitor>::visit(I);
1769 }
1770
1771 /// Instrument LoadInst
1772 ///
1773 /// Loads the corresponding shadow and (optionally) origin.
1774 /// Optionally, checks that the load address is fully defined.
visitLoadInst__anonde83a36c0811::MemorySanitizerVisitor1775 void visitLoadInst(LoadInst &I) {
1776 assert(I.getType()->isSized() && "Load type must have size");
1777 assert(!I.getMetadata("nosanitize"));
1778 IRBuilder<> IRB(I.getNextNode());
1779 Type *ShadowTy = getShadowTy(&I);
1780 Value *Addr = I.getPointerOperand();
1781 Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
1782 const Align Alignment = assumeAligned(I.getAlignment());
1783 if (PropagateShadow) {
1784 std::tie(ShadowPtr, OriginPtr) =
1785 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
1786 setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr,
1787 Alignment.value(), "_msld"));
1788 } else {
1789 setShadow(&I, getCleanShadow(&I));
1790 }
1791
1792 if (ClCheckAccessAddress)
1793 insertShadowCheck(I.getPointerOperand(), &I);
1794
1795 if (I.isAtomic())
1796 I.setOrdering(addAcquireOrdering(I.getOrdering()));
1797
1798 if (MS.TrackOrigins) {
1799 if (PropagateShadow) {
1800 const Align OriginAlignment = std::max(kMinOriginAlignment, Alignment);
1801 setOrigin(&I, IRB.CreateAlignedLoad(MS.OriginTy, OriginPtr,
1802 OriginAlignment.value()));
1803 } else {
1804 setOrigin(&I, getCleanOrigin());
1805 }
1806 }
1807 }
1808
1809 /// Instrument StoreInst
1810 ///
1811 /// Stores the corresponding shadow and (optionally) origin.
1812 /// Optionally, checks that the store address is fully defined.
visitStoreInst__anonde83a36c0811::MemorySanitizerVisitor1813 void visitStoreInst(StoreInst &I) {
1814 StoreList.push_back(&I);
1815 if (ClCheckAccessAddress)
1816 insertShadowCheck(I.getPointerOperand(), &I);
1817 }
1818
handleCASOrRMW__anonde83a36c0811::MemorySanitizerVisitor1819 void handleCASOrRMW(Instruction &I) {
1820 assert(isa<AtomicRMWInst>(I) || isa<AtomicCmpXchgInst>(I));
1821
1822 IRBuilder<> IRB(&I);
1823 Value *Addr = I.getOperand(0);
1824 Value *ShadowPtr = getShadowOriginPtr(Addr, IRB, I.getType(), Align::None(),
1825 /*isStore*/ true)
1826 .first;
1827
1828 if (ClCheckAccessAddress)
1829 insertShadowCheck(Addr, &I);
1830
1831 // Only test the conditional argument of cmpxchg instruction.
1832 // The other argument can potentially be uninitialized, but we can not
1833 // detect this situation reliably without possible false positives.
1834 if (isa<AtomicCmpXchgInst>(I))
1835 insertShadowCheck(I.getOperand(1), &I);
1836
1837 IRB.CreateStore(getCleanShadow(&I), ShadowPtr);
1838
1839 setShadow(&I, getCleanShadow(&I));
1840 setOrigin(&I, getCleanOrigin());
1841 }
1842
visitAtomicRMWInst__anonde83a36c0811::MemorySanitizerVisitor1843 void visitAtomicRMWInst(AtomicRMWInst &I) {
1844 handleCASOrRMW(I);
1845 I.setOrdering(addReleaseOrdering(I.getOrdering()));
1846 }
1847
visitAtomicCmpXchgInst__anonde83a36c0811::MemorySanitizerVisitor1848 void visitAtomicCmpXchgInst(AtomicCmpXchgInst &I) {
1849 handleCASOrRMW(I);
1850 I.setSuccessOrdering(addReleaseOrdering(I.getSuccessOrdering()));
1851 }
1852
1853 // Vector manipulation.
visitExtractElementInst__anonde83a36c0811::MemorySanitizerVisitor1854 void visitExtractElementInst(ExtractElementInst &I) {
1855 insertShadowCheck(I.getOperand(1), &I);
1856 IRBuilder<> IRB(&I);
1857 setShadow(&I, IRB.CreateExtractElement(getShadow(&I, 0), I.getOperand(1),
1858 "_msprop"));
1859 setOrigin(&I, getOrigin(&I, 0));
1860 }
1861
visitInsertElementInst__anonde83a36c0811::MemorySanitizerVisitor1862 void visitInsertElementInst(InsertElementInst &I) {
1863 insertShadowCheck(I.getOperand(2), &I);
1864 IRBuilder<> IRB(&I);
1865 setShadow(&I, IRB.CreateInsertElement(getShadow(&I, 0), getShadow(&I, 1),
1866 I.getOperand(2), "_msprop"));
1867 setOriginForNaryOp(I);
1868 }
1869
visitShuffleVectorInst__anonde83a36c0811::MemorySanitizerVisitor1870 void visitShuffleVectorInst(ShuffleVectorInst &I) {
1871 insertShadowCheck(I.getOperand(2), &I);
1872 IRBuilder<> IRB(&I);
1873 setShadow(&I, IRB.CreateShuffleVector(getShadow(&I, 0), getShadow(&I, 1),
1874 I.getOperand(2), "_msprop"));
1875 setOriginForNaryOp(I);
1876 }
1877
1878 // Casts.
visitSExtInst__anonde83a36c0811::MemorySanitizerVisitor1879 void visitSExtInst(SExtInst &I) {
1880 IRBuilder<> IRB(&I);
1881 setShadow(&I, IRB.CreateSExt(getShadow(&I, 0), I.getType(), "_msprop"));
1882 setOrigin(&I, getOrigin(&I, 0));
1883 }
1884
visitZExtInst__anonde83a36c0811::MemorySanitizerVisitor1885 void visitZExtInst(ZExtInst &I) {
1886 IRBuilder<> IRB(&I);
1887 setShadow(&I, IRB.CreateZExt(getShadow(&I, 0), I.getType(), "_msprop"));
1888 setOrigin(&I, getOrigin(&I, 0));
1889 }
1890
visitTruncInst__anonde83a36c0811::MemorySanitizerVisitor1891 void visitTruncInst(TruncInst &I) {
1892 IRBuilder<> IRB(&I);
1893 setShadow(&I, IRB.CreateTrunc(getShadow(&I, 0), I.getType(), "_msprop"));
1894 setOrigin(&I, getOrigin(&I, 0));
1895 }
1896
visitBitCastInst__anonde83a36c0811::MemorySanitizerVisitor1897 void visitBitCastInst(BitCastInst &I) {
1898 // Special case: if this is the bitcast (there is exactly 1 allowed) between
1899 // a musttail call and a ret, don't instrument. New instructions are not
1900 // allowed after a musttail call.
1901 if (auto *CI = dyn_cast<CallInst>(I.getOperand(0)))
1902 if (CI->isMustTailCall())
1903 return;
1904 IRBuilder<> IRB(&I);
1905 setShadow(&I, IRB.CreateBitCast(getShadow(&I, 0), getShadowTy(&I)));
1906 setOrigin(&I, getOrigin(&I, 0));
1907 }
1908
visitPtrToIntInst__anonde83a36c0811::MemorySanitizerVisitor1909 void visitPtrToIntInst(PtrToIntInst &I) {
1910 IRBuilder<> IRB(&I);
1911 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1912 "_msprop_ptrtoint"));
1913 setOrigin(&I, getOrigin(&I, 0));
1914 }
1915
visitIntToPtrInst__anonde83a36c0811::MemorySanitizerVisitor1916 void visitIntToPtrInst(IntToPtrInst &I) {
1917 IRBuilder<> IRB(&I);
1918 setShadow(&I, IRB.CreateIntCast(getShadow(&I, 0), getShadowTy(&I), false,
1919 "_msprop_inttoptr"));
1920 setOrigin(&I, getOrigin(&I, 0));
1921 }
1922
visitFPToSIInst__anonde83a36c0811::MemorySanitizerVisitor1923 void visitFPToSIInst(CastInst& I) { handleShadowOr(I); }
visitFPToUIInst__anonde83a36c0811::MemorySanitizerVisitor1924 void visitFPToUIInst(CastInst& I) { handleShadowOr(I); }
visitSIToFPInst__anonde83a36c0811::MemorySanitizerVisitor1925 void visitSIToFPInst(CastInst& I) { handleShadowOr(I); }
visitUIToFPInst__anonde83a36c0811::MemorySanitizerVisitor1926 void visitUIToFPInst(CastInst& I) { handleShadowOr(I); }
visitFPExtInst__anonde83a36c0811::MemorySanitizerVisitor1927 void visitFPExtInst(CastInst& I) { handleShadowOr(I); }
visitFPTruncInst__anonde83a36c0811::MemorySanitizerVisitor1928 void visitFPTruncInst(CastInst& I) { handleShadowOr(I); }
1929
1930 /// Propagate shadow for bitwise AND.
1931 ///
1932 /// This code is exact, i.e. if, for example, a bit in the left argument
1933 /// is defined and 0, then neither the value not definedness of the
1934 /// corresponding bit in B don't affect the resulting shadow.
visitAnd__anonde83a36c0811::MemorySanitizerVisitor1935 void visitAnd(BinaryOperator &I) {
1936 IRBuilder<> IRB(&I);
1937 // "And" of 0 and a poisoned value results in unpoisoned value.
1938 // 1&1 => 1; 0&1 => 0; p&1 => p;
1939 // 1&0 => 0; 0&0 => 0; p&0 => 0;
1940 // 1&p => p; 0&p => 0; p&p => p;
1941 // S = (S1 & S2) | (V1 & S2) | (S1 & V2)
1942 Value *S1 = getShadow(&I, 0);
1943 Value *S2 = getShadow(&I, 1);
1944 Value *V1 = I.getOperand(0);
1945 Value *V2 = I.getOperand(1);
1946 if (V1->getType() != S1->getType()) {
1947 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1948 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1949 }
1950 Value *S1S2 = IRB.CreateAnd(S1, S2);
1951 Value *V1S2 = IRB.CreateAnd(V1, S2);
1952 Value *S1V2 = IRB.CreateAnd(S1, V2);
1953 setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
1954 setOriginForNaryOp(I);
1955 }
1956
visitOr__anonde83a36c0811::MemorySanitizerVisitor1957 void visitOr(BinaryOperator &I) {
1958 IRBuilder<> IRB(&I);
1959 // "Or" of 1 and a poisoned value results in unpoisoned value.
1960 // 1|1 => 1; 0|1 => 1; p|1 => 1;
1961 // 1|0 => 1; 0|0 => 0; p|0 => p;
1962 // 1|p => 1; 0|p => p; p|p => p;
1963 // S = (S1 & S2) | (~V1 & S2) | (S1 & ~V2)
1964 Value *S1 = getShadow(&I, 0);
1965 Value *S2 = getShadow(&I, 1);
1966 Value *V1 = IRB.CreateNot(I.getOperand(0));
1967 Value *V2 = IRB.CreateNot(I.getOperand(1));
1968 if (V1->getType() != S1->getType()) {
1969 V1 = IRB.CreateIntCast(V1, S1->getType(), false);
1970 V2 = IRB.CreateIntCast(V2, S2->getType(), false);
1971 }
1972 Value *S1S2 = IRB.CreateAnd(S1, S2);
1973 Value *V1S2 = IRB.CreateAnd(V1, S2);
1974 Value *S1V2 = IRB.CreateAnd(S1, V2);
1975 setShadow(&I, IRB.CreateOr({S1S2, V1S2, S1V2}));
1976 setOriginForNaryOp(I);
1977 }
1978
1979 /// Default propagation of shadow and/or origin.
1980 ///
1981 /// This class implements the general case of shadow propagation, used in all
1982 /// cases where we don't know and/or don't care about what the operation
1983 /// actually does. It converts all input shadow values to a common type
1984 /// (extending or truncating as necessary), and bitwise OR's them.
1985 ///
1986 /// This is much cheaper than inserting checks (i.e. requiring inputs to be
1987 /// fully initialized), and less prone to false positives.
1988 ///
1989 /// This class also implements the general case of origin propagation. For a
1990 /// Nary operation, result origin is set to the origin of an argument that is
1991 /// not entirely initialized. If there is more than one such arguments, the
1992 /// rightmost of them is picked. It does not matter which one is picked if all
1993 /// arguments are initialized.
1994 template <bool CombineShadow>
1995 class Combiner {
1996 Value *Shadow = nullptr;
1997 Value *Origin = nullptr;
1998 IRBuilder<> &IRB;
1999 MemorySanitizerVisitor *MSV;
2000
2001 public:
Combiner(MemorySanitizerVisitor * MSV,IRBuilder<> & IRB)2002 Combiner(MemorySanitizerVisitor *MSV, IRBuilder<> &IRB)
2003 : IRB(IRB), MSV(MSV) {}
2004
2005 /// Add a pair of shadow and origin values to the mix.
Add(Value * OpShadow,Value * OpOrigin)2006 Combiner &Add(Value *OpShadow, Value *OpOrigin) {
2007 if (CombineShadow) {
2008 assert(OpShadow);
2009 if (!Shadow)
2010 Shadow = OpShadow;
2011 else {
2012 OpShadow = MSV->CreateShadowCast(IRB, OpShadow, Shadow->getType());
2013 Shadow = IRB.CreateOr(Shadow, OpShadow, "_msprop");
2014 }
2015 }
2016
2017 if (MSV->MS.TrackOrigins) {
2018 assert(OpOrigin);
2019 if (!Origin) {
2020 Origin = OpOrigin;
2021 } else {
2022 Constant *ConstOrigin = dyn_cast<Constant>(OpOrigin);
2023 // No point in adding something that might result in 0 origin value.
2024 if (!ConstOrigin || !ConstOrigin->isNullValue()) {
2025 Value *FlatShadow = MSV->convertToShadowTyNoVec(OpShadow, IRB);
2026 Value *Cond =
2027 IRB.CreateICmpNE(FlatShadow, MSV->getCleanShadow(FlatShadow));
2028 Origin = IRB.CreateSelect(Cond, OpOrigin, Origin);
2029 }
2030 }
2031 }
2032 return *this;
2033 }
2034
2035 /// Add an application value to the mix.
Add(Value * V)2036 Combiner &Add(Value *V) {
2037 Value *OpShadow = MSV->getShadow(V);
2038 Value *OpOrigin = MSV->MS.TrackOrigins ? MSV->getOrigin(V) : nullptr;
2039 return Add(OpShadow, OpOrigin);
2040 }
2041
2042 /// Set the current combined values as the given instruction's shadow
2043 /// and origin.
Done(Instruction * I)2044 void Done(Instruction *I) {
2045 if (CombineShadow) {
2046 assert(Shadow);
2047 Shadow = MSV->CreateShadowCast(IRB, Shadow, MSV->getShadowTy(I));
2048 MSV->setShadow(I, Shadow);
2049 }
2050 if (MSV->MS.TrackOrigins) {
2051 assert(Origin);
2052 MSV->setOrigin(I, Origin);
2053 }
2054 }
2055 };
2056
2057 using ShadowAndOriginCombiner = Combiner<true>;
2058 using OriginCombiner = Combiner<false>;
2059
2060 /// Propagate origin for arbitrary operation.
setOriginForNaryOp__anonde83a36c0811::MemorySanitizerVisitor2061 void setOriginForNaryOp(Instruction &I) {
2062 if (!MS.TrackOrigins) return;
2063 IRBuilder<> IRB(&I);
2064 OriginCombiner OC(this, IRB);
2065 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2066 OC.Add(OI->get());
2067 OC.Done(&I);
2068 }
2069
VectorOrPrimitiveTypeSizeInBits__anonde83a36c0811::MemorySanitizerVisitor2070 size_t VectorOrPrimitiveTypeSizeInBits(Type *Ty) {
2071 assert(!(Ty->isVectorTy() && Ty->getScalarType()->isPointerTy()) &&
2072 "Vector of pointers is not a valid shadow type");
2073 return Ty->isVectorTy() ?
2074 Ty->getVectorNumElements() * Ty->getScalarSizeInBits() :
2075 Ty->getPrimitiveSizeInBits();
2076 }
2077
2078 /// Cast between two shadow types, extending or truncating as
2079 /// necessary.
CreateShadowCast__anonde83a36c0811::MemorySanitizerVisitor2080 Value *CreateShadowCast(IRBuilder<> &IRB, Value *V, Type *dstTy,
2081 bool Signed = false) {
2082 Type *srcTy = V->getType();
2083 size_t srcSizeInBits = VectorOrPrimitiveTypeSizeInBits(srcTy);
2084 size_t dstSizeInBits = VectorOrPrimitiveTypeSizeInBits(dstTy);
2085 if (srcSizeInBits > 1 && dstSizeInBits == 1)
2086 return IRB.CreateICmpNE(V, getCleanShadow(V));
2087
2088 if (dstTy->isIntegerTy() && srcTy->isIntegerTy())
2089 return IRB.CreateIntCast(V, dstTy, Signed);
2090 if (dstTy->isVectorTy() && srcTy->isVectorTy() &&
2091 dstTy->getVectorNumElements() == srcTy->getVectorNumElements())
2092 return IRB.CreateIntCast(V, dstTy, Signed);
2093 Value *V1 = IRB.CreateBitCast(V, Type::getIntNTy(*MS.C, srcSizeInBits));
2094 Value *V2 =
2095 IRB.CreateIntCast(V1, Type::getIntNTy(*MS.C, dstSizeInBits), Signed);
2096 return IRB.CreateBitCast(V2, dstTy);
2097 // TODO: handle struct types.
2098 }
2099
2100 /// Cast an application value to the type of its own shadow.
CreateAppToShadowCast__anonde83a36c0811::MemorySanitizerVisitor2101 Value *CreateAppToShadowCast(IRBuilder<> &IRB, Value *V) {
2102 Type *ShadowTy = getShadowTy(V);
2103 if (V->getType() == ShadowTy)
2104 return V;
2105 if (V->getType()->isPtrOrPtrVectorTy())
2106 return IRB.CreatePtrToInt(V, ShadowTy);
2107 else
2108 return IRB.CreateBitCast(V, ShadowTy);
2109 }
2110
2111 /// Propagate shadow for arbitrary operation.
handleShadowOr__anonde83a36c0811::MemorySanitizerVisitor2112 void handleShadowOr(Instruction &I) {
2113 IRBuilder<> IRB(&I);
2114 ShadowAndOriginCombiner SC(this, IRB);
2115 for (Instruction::op_iterator OI = I.op_begin(); OI != I.op_end(); ++OI)
2116 SC.Add(OI->get());
2117 SC.Done(&I);
2118 }
2119
visitFNeg__anonde83a36c0811::MemorySanitizerVisitor2120 void visitFNeg(UnaryOperator &I) { handleShadowOr(I); }
2121
2122 // Handle multiplication by constant.
2123 //
2124 // Handle a special case of multiplication by constant that may have one or
2125 // more zeros in the lower bits. This makes corresponding number of lower bits
2126 // of the result zero as well. We model it by shifting the other operand
2127 // shadow left by the required number of bits. Effectively, we transform
2128 // (X * (A * 2**B)) to ((X << B) * A) and instrument (X << B) as (Sx << B).
2129 // We use multiplication by 2**N instead of shift to cover the case of
2130 // multiplication by 0, which may occur in some elements of a vector operand.
handleMulByConstant__anonde83a36c0811::MemorySanitizerVisitor2131 void handleMulByConstant(BinaryOperator &I, Constant *ConstArg,
2132 Value *OtherArg) {
2133 Constant *ShadowMul;
2134 Type *Ty = ConstArg->getType();
2135 if (Ty->isVectorTy()) {
2136 unsigned NumElements = Ty->getVectorNumElements();
2137 Type *EltTy = Ty->getSequentialElementType();
2138 SmallVector<Constant *, 16> Elements;
2139 for (unsigned Idx = 0; Idx < NumElements; ++Idx) {
2140 if (ConstantInt *Elt =
2141 dyn_cast<ConstantInt>(ConstArg->getAggregateElement(Idx))) {
2142 const APInt &V = Elt->getValue();
2143 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2144 Elements.push_back(ConstantInt::get(EltTy, V2));
2145 } else {
2146 Elements.push_back(ConstantInt::get(EltTy, 1));
2147 }
2148 }
2149 ShadowMul = ConstantVector::get(Elements);
2150 } else {
2151 if (ConstantInt *Elt = dyn_cast<ConstantInt>(ConstArg)) {
2152 const APInt &V = Elt->getValue();
2153 APInt V2 = APInt(V.getBitWidth(), 1) << V.countTrailingZeros();
2154 ShadowMul = ConstantInt::get(Ty, V2);
2155 } else {
2156 ShadowMul = ConstantInt::get(Ty, 1);
2157 }
2158 }
2159
2160 IRBuilder<> IRB(&I);
2161 setShadow(&I,
2162 IRB.CreateMul(getShadow(OtherArg), ShadowMul, "msprop_mul_cst"));
2163 setOrigin(&I, getOrigin(OtherArg));
2164 }
2165
visitMul__anonde83a36c0811::MemorySanitizerVisitor2166 void visitMul(BinaryOperator &I) {
2167 Constant *constOp0 = dyn_cast<Constant>(I.getOperand(0));
2168 Constant *constOp1 = dyn_cast<Constant>(I.getOperand(1));
2169 if (constOp0 && !constOp1)
2170 handleMulByConstant(I, constOp0, I.getOperand(1));
2171 else if (constOp1 && !constOp0)
2172 handleMulByConstant(I, constOp1, I.getOperand(0));
2173 else
2174 handleShadowOr(I);
2175 }
2176
visitFAdd__anonde83a36c0811::MemorySanitizerVisitor2177 void visitFAdd(BinaryOperator &I) { handleShadowOr(I); }
visitFSub__anonde83a36c0811::MemorySanitizerVisitor2178 void visitFSub(BinaryOperator &I) { handleShadowOr(I); }
visitFMul__anonde83a36c0811::MemorySanitizerVisitor2179 void visitFMul(BinaryOperator &I) { handleShadowOr(I); }
visitAdd__anonde83a36c0811::MemorySanitizerVisitor2180 void visitAdd(BinaryOperator &I) { handleShadowOr(I); }
visitSub__anonde83a36c0811::MemorySanitizerVisitor2181 void visitSub(BinaryOperator &I) { handleShadowOr(I); }
visitXor__anonde83a36c0811::MemorySanitizerVisitor2182 void visitXor(BinaryOperator &I) { handleShadowOr(I); }
2183
handleIntegerDiv__anonde83a36c0811::MemorySanitizerVisitor2184 void handleIntegerDiv(Instruction &I) {
2185 IRBuilder<> IRB(&I);
2186 // Strict on the second argument.
2187 insertShadowCheck(I.getOperand(1), &I);
2188 setShadow(&I, getShadow(&I, 0));
2189 setOrigin(&I, getOrigin(&I, 0));
2190 }
2191
visitUDiv__anonde83a36c0811::MemorySanitizerVisitor2192 void visitUDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitSDiv__anonde83a36c0811::MemorySanitizerVisitor2193 void visitSDiv(BinaryOperator &I) { handleIntegerDiv(I); }
visitURem__anonde83a36c0811::MemorySanitizerVisitor2194 void visitURem(BinaryOperator &I) { handleIntegerDiv(I); }
visitSRem__anonde83a36c0811::MemorySanitizerVisitor2195 void visitSRem(BinaryOperator &I) { handleIntegerDiv(I); }
2196
2197 // Floating point division is side-effect free. We can not require that the
2198 // divisor is fully initialized and must propagate shadow. See PR37523.
visitFDiv__anonde83a36c0811::MemorySanitizerVisitor2199 void visitFDiv(BinaryOperator &I) { handleShadowOr(I); }
visitFRem__anonde83a36c0811::MemorySanitizerVisitor2200 void visitFRem(BinaryOperator &I) { handleShadowOr(I); }
2201
2202 /// Instrument == and != comparisons.
2203 ///
2204 /// Sometimes the comparison result is known even if some of the bits of the
2205 /// arguments are not.
handleEqualityComparison__anonde83a36c0811::MemorySanitizerVisitor2206 void handleEqualityComparison(ICmpInst &I) {
2207 IRBuilder<> IRB(&I);
2208 Value *A = I.getOperand(0);
2209 Value *B = I.getOperand(1);
2210 Value *Sa = getShadow(A);
2211 Value *Sb = getShadow(B);
2212
2213 // Get rid of pointers and vectors of pointers.
2214 // For ints (and vectors of ints), types of A and Sa match,
2215 // and this is a no-op.
2216 A = IRB.CreatePointerCast(A, Sa->getType());
2217 B = IRB.CreatePointerCast(B, Sb->getType());
2218
2219 // A == B <==> (C = A^B) == 0
2220 // A != B <==> (C = A^B) != 0
2221 // Sc = Sa | Sb
2222 Value *C = IRB.CreateXor(A, B);
2223 Value *Sc = IRB.CreateOr(Sa, Sb);
2224 // Now dealing with i = (C == 0) comparison (or C != 0, does not matter now)
2225 // Result is defined if one of the following is true
2226 // * there is a defined 1 bit in C
2227 // * C is fully defined
2228 // Si = !(C & ~Sc) && Sc
2229 Value *Zero = Constant::getNullValue(Sc->getType());
2230 Value *MinusOne = Constant::getAllOnesValue(Sc->getType());
2231 Value *Si =
2232 IRB.CreateAnd(IRB.CreateICmpNE(Sc, Zero),
2233 IRB.CreateICmpEQ(
2234 IRB.CreateAnd(IRB.CreateXor(Sc, MinusOne), C), Zero));
2235 Si->setName("_msprop_icmp");
2236 setShadow(&I, Si);
2237 setOriginForNaryOp(I);
2238 }
2239
2240 /// Build the lowest possible value of V, taking into account V's
2241 /// uninitialized bits.
getLowestPossibleValue__anonde83a36c0811::MemorySanitizerVisitor2242 Value *getLowestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2243 bool isSigned) {
2244 if (isSigned) {
2245 // Split shadow into sign bit and other bits.
2246 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2247 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2248 // Maximise the undefined shadow bit, minimize other undefined bits.
2249 return
2250 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaOtherBits)), SaSignBit);
2251 } else {
2252 // Minimize undefined bits.
2253 return IRB.CreateAnd(A, IRB.CreateNot(Sa));
2254 }
2255 }
2256
2257 /// Build the highest possible value of V, taking into account V's
2258 /// uninitialized bits.
getHighestPossibleValue__anonde83a36c0811::MemorySanitizerVisitor2259 Value *getHighestPossibleValue(IRBuilder<> &IRB, Value *A, Value *Sa,
2260 bool isSigned) {
2261 if (isSigned) {
2262 // Split shadow into sign bit and other bits.
2263 Value *SaOtherBits = IRB.CreateLShr(IRB.CreateShl(Sa, 1), 1);
2264 Value *SaSignBit = IRB.CreateXor(Sa, SaOtherBits);
2265 // Minimise the undefined shadow bit, maximise other undefined bits.
2266 return
2267 IRB.CreateOr(IRB.CreateAnd(A, IRB.CreateNot(SaSignBit)), SaOtherBits);
2268 } else {
2269 // Maximize undefined bits.
2270 return IRB.CreateOr(A, Sa);
2271 }
2272 }
2273
2274 /// Instrument relational comparisons.
2275 ///
2276 /// This function does exact shadow propagation for all relational
2277 /// comparisons of integers, pointers and vectors of those.
2278 /// FIXME: output seems suboptimal when one of the operands is a constant
handleRelationalComparisonExact__anonde83a36c0811::MemorySanitizerVisitor2279 void handleRelationalComparisonExact(ICmpInst &I) {
2280 IRBuilder<> IRB(&I);
2281 Value *A = I.getOperand(0);
2282 Value *B = I.getOperand(1);
2283 Value *Sa = getShadow(A);
2284 Value *Sb = getShadow(B);
2285
2286 // Get rid of pointers and vectors of pointers.
2287 // For ints (and vectors of ints), types of A and Sa match,
2288 // and this is a no-op.
2289 A = IRB.CreatePointerCast(A, Sa->getType());
2290 B = IRB.CreatePointerCast(B, Sb->getType());
2291
2292 // Let [a0, a1] be the interval of possible values of A, taking into account
2293 // its undefined bits. Let [b0, b1] be the interval of possible values of B.
2294 // Then (A cmp B) is defined iff (a0 cmp b1) == (a1 cmp b0).
2295 bool IsSigned = I.isSigned();
2296 Value *S1 = IRB.CreateICmp(I.getPredicate(),
2297 getLowestPossibleValue(IRB, A, Sa, IsSigned),
2298 getHighestPossibleValue(IRB, B, Sb, IsSigned));
2299 Value *S2 = IRB.CreateICmp(I.getPredicate(),
2300 getHighestPossibleValue(IRB, A, Sa, IsSigned),
2301 getLowestPossibleValue(IRB, B, Sb, IsSigned));
2302 Value *Si = IRB.CreateXor(S1, S2);
2303 setShadow(&I, Si);
2304 setOriginForNaryOp(I);
2305 }
2306
2307 /// Instrument signed relational comparisons.
2308 ///
2309 /// Handle sign bit tests: x<0, x>=0, x<=-1, x>-1 by propagating the highest
2310 /// bit of the shadow. Everything else is delegated to handleShadowOr().
handleSignedRelationalComparison__anonde83a36c0811::MemorySanitizerVisitor2311 void handleSignedRelationalComparison(ICmpInst &I) {
2312 Constant *constOp;
2313 Value *op = nullptr;
2314 CmpInst::Predicate pre;
2315 if ((constOp = dyn_cast<Constant>(I.getOperand(1)))) {
2316 op = I.getOperand(0);
2317 pre = I.getPredicate();
2318 } else if ((constOp = dyn_cast<Constant>(I.getOperand(0)))) {
2319 op = I.getOperand(1);
2320 pre = I.getSwappedPredicate();
2321 } else {
2322 handleShadowOr(I);
2323 return;
2324 }
2325
2326 if ((constOp->isNullValue() &&
2327 (pre == CmpInst::ICMP_SLT || pre == CmpInst::ICMP_SGE)) ||
2328 (constOp->isAllOnesValue() &&
2329 (pre == CmpInst::ICMP_SGT || pre == CmpInst::ICMP_SLE))) {
2330 IRBuilder<> IRB(&I);
2331 Value *Shadow = IRB.CreateICmpSLT(getShadow(op), getCleanShadow(op),
2332 "_msprop_icmp_s");
2333 setShadow(&I, Shadow);
2334 setOrigin(&I, getOrigin(op));
2335 } else {
2336 handleShadowOr(I);
2337 }
2338 }
2339
visitICmpInst__anonde83a36c0811::MemorySanitizerVisitor2340 void visitICmpInst(ICmpInst &I) {
2341 if (!ClHandleICmp) {
2342 handleShadowOr(I);
2343 return;
2344 }
2345 if (I.isEquality()) {
2346 handleEqualityComparison(I);
2347 return;
2348 }
2349
2350 assert(I.isRelational());
2351 if (ClHandleICmpExact) {
2352 handleRelationalComparisonExact(I);
2353 return;
2354 }
2355 if (I.isSigned()) {
2356 handleSignedRelationalComparison(I);
2357 return;
2358 }
2359
2360 assert(I.isUnsigned());
2361 if ((isa<Constant>(I.getOperand(0)) || isa<Constant>(I.getOperand(1)))) {
2362 handleRelationalComparisonExact(I);
2363 return;
2364 }
2365
2366 handleShadowOr(I);
2367 }
2368
visitFCmpInst__anonde83a36c0811::MemorySanitizerVisitor2369 void visitFCmpInst(FCmpInst &I) {
2370 handleShadowOr(I);
2371 }
2372
handleShift__anonde83a36c0811::MemorySanitizerVisitor2373 void handleShift(BinaryOperator &I) {
2374 IRBuilder<> IRB(&I);
2375 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2376 // Otherwise perform the same shift on S1.
2377 Value *S1 = getShadow(&I, 0);
2378 Value *S2 = getShadow(&I, 1);
2379 Value *S2Conv = IRB.CreateSExt(IRB.CreateICmpNE(S2, getCleanShadow(S2)),
2380 S2->getType());
2381 Value *V2 = I.getOperand(1);
2382 Value *Shift = IRB.CreateBinOp(I.getOpcode(), S1, V2);
2383 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2384 setOriginForNaryOp(I);
2385 }
2386
visitShl__anonde83a36c0811::MemorySanitizerVisitor2387 void visitShl(BinaryOperator &I) { handleShift(I); }
visitAShr__anonde83a36c0811::MemorySanitizerVisitor2388 void visitAShr(BinaryOperator &I) { handleShift(I); }
visitLShr__anonde83a36c0811::MemorySanitizerVisitor2389 void visitLShr(BinaryOperator &I) { handleShift(I); }
2390
2391 /// Instrument llvm.memmove
2392 ///
2393 /// At this point we don't know if llvm.memmove will be inlined or not.
2394 /// If we don't instrument it and it gets inlined,
2395 /// our interceptor will not kick in and we will lose the memmove.
2396 /// If we instrument the call here, but it does not get inlined,
2397 /// we will memove the shadow twice: which is bad in case
2398 /// of overlapping regions. So, we simply lower the intrinsic to a call.
2399 ///
2400 /// Similar situation exists for memcpy and memset.
visitMemMoveInst__anonde83a36c0811::MemorySanitizerVisitor2401 void visitMemMoveInst(MemMoveInst &I) {
2402 IRBuilder<> IRB(&I);
2403 IRB.CreateCall(
2404 MS.MemmoveFn,
2405 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2406 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2407 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2408 I.eraseFromParent();
2409 }
2410
2411 // Similar to memmove: avoid copying shadow twice.
2412 // This is somewhat unfortunate as it may slowdown small constant memcpys.
2413 // FIXME: consider doing manual inline for small constant sizes and proper
2414 // alignment.
visitMemCpyInst__anonde83a36c0811::MemorySanitizerVisitor2415 void visitMemCpyInst(MemCpyInst &I) {
2416 IRBuilder<> IRB(&I);
2417 IRB.CreateCall(
2418 MS.MemcpyFn,
2419 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2420 IRB.CreatePointerCast(I.getArgOperand(1), IRB.getInt8PtrTy()),
2421 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2422 I.eraseFromParent();
2423 }
2424
2425 // Same as memcpy.
visitMemSetInst__anonde83a36c0811::MemorySanitizerVisitor2426 void visitMemSetInst(MemSetInst &I) {
2427 IRBuilder<> IRB(&I);
2428 IRB.CreateCall(
2429 MS.MemsetFn,
2430 {IRB.CreatePointerCast(I.getArgOperand(0), IRB.getInt8PtrTy()),
2431 IRB.CreateIntCast(I.getArgOperand(1), IRB.getInt32Ty(), false),
2432 IRB.CreateIntCast(I.getArgOperand(2), MS.IntptrTy, false)});
2433 I.eraseFromParent();
2434 }
2435
visitVAStartInst__anonde83a36c0811::MemorySanitizerVisitor2436 void visitVAStartInst(VAStartInst &I) {
2437 VAHelper->visitVAStartInst(I);
2438 }
2439
visitVACopyInst__anonde83a36c0811::MemorySanitizerVisitor2440 void visitVACopyInst(VACopyInst &I) {
2441 VAHelper->visitVACopyInst(I);
2442 }
2443
2444 /// Handle vector store-like intrinsics.
2445 ///
2446 /// Instrument intrinsics that look like a simple SIMD store: writes memory,
2447 /// has 1 pointer argument and 1 vector argument, returns void.
handleVectorStoreIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2448 bool handleVectorStoreIntrinsic(IntrinsicInst &I) {
2449 IRBuilder<> IRB(&I);
2450 Value* Addr = I.getArgOperand(0);
2451 Value *Shadow = getShadow(&I, 1);
2452 Value *ShadowPtr, *OriginPtr;
2453
2454 // We don't know the pointer alignment (could be unaligned SSE store!).
2455 // Have to assume to worst case.
2456 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2457 Addr, IRB, Shadow->getType(), Align::None(), /*isStore*/ true);
2458 IRB.CreateAlignedStore(Shadow, ShadowPtr, 1);
2459
2460 if (ClCheckAccessAddress)
2461 insertShadowCheck(Addr, &I);
2462
2463 // FIXME: factor out common code from materializeStores
2464 if (MS.TrackOrigins) IRB.CreateStore(getOrigin(&I, 1), OriginPtr);
2465 return true;
2466 }
2467
2468 /// Handle vector load-like intrinsics.
2469 ///
2470 /// Instrument intrinsics that look like a simple SIMD load: reads memory,
2471 /// has 1 pointer argument, returns a vector.
handleVectorLoadIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2472 bool handleVectorLoadIntrinsic(IntrinsicInst &I) {
2473 IRBuilder<> IRB(&I);
2474 Value *Addr = I.getArgOperand(0);
2475
2476 Type *ShadowTy = getShadowTy(&I);
2477 Value *ShadowPtr = nullptr, *OriginPtr = nullptr;
2478 if (PropagateShadow) {
2479 // We don't know the pointer alignment (could be unaligned SSE load!).
2480 // Have to assume to worst case.
2481 const Align Alignment = Align::None();
2482 std::tie(ShadowPtr, OriginPtr) =
2483 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2484 setShadow(&I, IRB.CreateAlignedLoad(ShadowTy, ShadowPtr,
2485 Alignment.value(), "_msld"));
2486 } else {
2487 setShadow(&I, getCleanShadow(&I));
2488 }
2489
2490 if (ClCheckAccessAddress)
2491 insertShadowCheck(Addr, &I);
2492
2493 if (MS.TrackOrigins) {
2494 if (PropagateShadow)
2495 setOrigin(&I, IRB.CreateLoad(MS.OriginTy, OriginPtr));
2496 else
2497 setOrigin(&I, getCleanOrigin());
2498 }
2499 return true;
2500 }
2501
2502 /// Handle (SIMD arithmetic)-like intrinsics.
2503 ///
2504 /// Instrument intrinsics with any number of arguments of the same type,
2505 /// equal to the return type. The type should be simple (no aggregates or
2506 /// pointers; vectors are fine).
2507 /// Caller guarantees that this intrinsic does not access memory.
maybeHandleSimpleNomemIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2508 bool maybeHandleSimpleNomemIntrinsic(IntrinsicInst &I) {
2509 Type *RetTy = I.getType();
2510 if (!(RetTy->isIntOrIntVectorTy() ||
2511 RetTy->isFPOrFPVectorTy() ||
2512 RetTy->isX86_MMXTy()))
2513 return false;
2514
2515 unsigned NumArgOperands = I.getNumArgOperands();
2516
2517 for (unsigned i = 0; i < NumArgOperands; ++i) {
2518 Type *Ty = I.getArgOperand(i)->getType();
2519 if (Ty != RetTy)
2520 return false;
2521 }
2522
2523 IRBuilder<> IRB(&I);
2524 ShadowAndOriginCombiner SC(this, IRB);
2525 for (unsigned i = 0; i < NumArgOperands; ++i)
2526 SC.Add(I.getArgOperand(i));
2527 SC.Done(&I);
2528
2529 return true;
2530 }
2531
2532 /// Heuristically instrument unknown intrinsics.
2533 ///
2534 /// The main purpose of this code is to do something reasonable with all
2535 /// random intrinsics we might encounter, most importantly - SIMD intrinsics.
2536 /// We recognize several classes of intrinsics by their argument types and
2537 /// ModRefBehaviour and apply special intrumentation when we are reasonably
2538 /// sure that we know what the intrinsic does.
2539 ///
2540 /// We special-case intrinsics where this approach fails. See llvm.bswap
2541 /// handling as an example of that.
handleUnknownIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2542 bool handleUnknownIntrinsic(IntrinsicInst &I) {
2543 unsigned NumArgOperands = I.getNumArgOperands();
2544 if (NumArgOperands == 0)
2545 return false;
2546
2547 if (NumArgOperands == 2 &&
2548 I.getArgOperand(0)->getType()->isPointerTy() &&
2549 I.getArgOperand(1)->getType()->isVectorTy() &&
2550 I.getType()->isVoidTy() &&
2551 !I.onlyReadsMemory()) {
2552 // This looks like a vector store.
2553 return handleVectorStoreIntrinsic(I);
2554 }
2555
2556 if (NumArgOperands == 1 &&
2557 I.getArgOperand(0)->getType()->isPointerTy() &&
2558 I.getType()->isVectorTy() &&
2559 I.onlyReadsMemory()) {
2560 // This looks like a vector load.
2561 return handleVectorLoadIntrinsic(I);
2562 }
2563
2564 if (I.doesNotAccessMemory())
2565 if (maybeHandleSimpleNomemIntrinsic(I))
2566 return true;
2567
2568 // FIXME: detect and handle SSE maskstore/maskload
2569 return false;
2570 }
2571
handleInvariantGroup__anonde83a36c0811::MemorySanitizerVisitor2572 void handleInvariantGroup(IntrinsicInst &I) {
2573 setShadow(&I, getShadow(&I, 0));
2574 setOrigin(&I, getOrigin(&I, 0));
2575 }
2576
handleLifetimeStart__anonde83a36c0811::MemorySanitizerVisitor2577 void handleLifetimeStart(IntrinsicInst &I) {
2578 if (!PoisonStack)
2579 return;
2580 DenseMap<Value *, AllocaInst *> AllocaForValue;
2581 AllocaInst *AI =
2582 llvm::findAllocaForValue(I.getArgOperand(1), AllocaForValue);
2583 if (!AI)
2584 InstrumentLifetimeStart = false;
2585 LifetimeStartList.push_back(std::make_pair(&I, AI));
2586 }
2587
handleBswap__anonde83a36c0811::MemorySanitizerVisitor2588 void handleBswap(IntrinsicInst &I) {
2589 IRBuilder<> IRB(&I);
2590 Value *Op = I.getArgOperand(0);
2591 Type *OpType = Op->getType();
2592 Function *BswapFunc = Intrinsic::getDeclaration(
2593 F.getParent(), Intrinsic::bswap, makeArrayRef(&OpType, 1));
2594 setShadow(&I, IRB.CreateCall(BswapFunc, getShadow(Op)));
2595 setOrigin(&I, getOrigin(Op));
2596 }
2597
2598 // Instrument vector convert instrinsic.
2599 //
2600 // This function instruments intrinsics like cvtsi2ss:
2601 // %Out = int_xxx_cvtyyy(%ConvertOp)
2602 // or
2603 // %Out = int_xxx_cvtyyy(%CopyOp, %ConvertOp)
2604 // Intrinsic converts \p NumUsedElements elements of \p ConvertOp to the same
2605 // number \p Out elements, and (if has 2 arguments) copies the rest of the
2606 // elements from \p CopyOp.
2607 // In most cases conversion involves floating-point value which may trigger a
2608 // hardware exception when not fully initialized. For this reason we require
2609 // \p ConvertOp[0:NumUsedElements] to be fully initialized and trap otherwise.
2610 // We copy the shadow of \p CopyOp[NumUsedElements:] to \p
2611 // Out[NumUsedElements:]. This means that intrinsics without \p CopyOp always
2612 // return a fully initialized value.
handleVectorConvertIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2613 void handleVectorConvertIntrinsic(IntrinsicInst &I, int NumUsedElements) {
2614 IRBuilder<> IRB(&I);
2615 Value *CopyOp, *ConvertOp;
2616
2617 switch (I.getNumArgOperands()) {
2618 case 3:
2619 assert(isa<ConstantInt>(I.getArgOperand(2)) && "Invalid rounding mode");
2620 LLVM_FALLTHROUGH;
2621 case 2:
2622 CopyOp = I.getArgOperand(0);
2623 ConvertOp = I.getArgOperand(1);
2624 break;
2625 case 1:
2626 ConvertOp = I.getArgOperand(0);
2627 CopyOp = nullptr;
2628 break;
2629 default:
2630 llvm_unreachable("Cvt intrinsic with unsupported number of arguments.");
2631 }
2632
2633 // The first *NumUsedElements* elements of ConvertOp are converted to the
2634 // same number of output elements. The rest of the output is copied from
2635 // CopyOp, or (if not available) filled with zeroes.
2636 // Combine shadow for elements of ConvertOp that are used in this operation,
2637 // and insert a check.
2638 // FIXME: consider propagating shadow of ConvertOp, at least in the case of
2639 // int->any conversion.
2640 Value *ConvertShadow = getShadow(ConvertOp);
2641 Value *AggShadow = nullptr;
2642 if (ConvertOp->getType()->isVectorTy()) {
2643 AggShadow = IRB.CreateExtractElement(
2644 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2645 for (int i = 1; i < NumUsedElements; ++i) {
2646 Value *MoreShadow = IRB.CreateExtractElement(
2647 ConvertShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2648 AggShadow = IRB.CreateOr(AggShadow, MoreShadow);
2649 }
2650 } else {
2651 AggShadow = ConvertShadow;
2652 }
2653 assert(AggShadow->getType()->isIntegerTy());
2654 insertShadowCheck(AggShadow, getOrigin(ConvertOp), &I);
2655
2656 // Build result shadow by zero-filling parts of CopyOp shadow that come from
2657 // ConvertOp.
2658 if (CopyOp) {
2659 assert(CopyOp->getType() == I.getType());
2660 assert(CopyOp->getType()->isVectorTy());
2661 Value *ResultShadow = getShadow(CopyOp);
2662 Type *EltTy = ResultShadow->getType()->getVectorElementType();
2663 for (int i = 0; i < NumUsedElements; ++i) {
2664 ResultShadow = IRB.CreateInsertElement(
2665 ResultShadow, ConstantInt::getNullValue(EltTy),
2666 ConstantInt::get(IRB.getInt32Ty(), i));
2667 }
2668 setShadow(&I, ResultShadow);
2669 setOrigin(&I, getOrigin(CopyOp));
2670 } else {
2671 setShadow(&I, getCleanShadow(&I));
2672 setOrigin(&I, getCleanOrigin());
2673 }
2674 }
2675
2676 // Given a scalar or vector, extract lower 64 bits (or less), and return all
2677 // zeroes if it is zero, and all ones otherwise.
Lower64ShadowExtend__anonde83a36c0811::MemorySanitizerVisitor2678 Value *Lower64ShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2679 if (S->getType()->isVectorTy())
2680 S = CreateShadowCast(IRB, S, IRB.getInt64Ty(), /* Signed */ true);
2681 assert(S->getType()->getPrimitiveSizeInBits() <= 64);
2682 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2683 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2684 }
2685
2686 // Given a vector, extract its first element, and return all
2687 // zeroes if it is zero, and all ones otherwise.
LowerElementShadowExtend__anonde83a36c0811::MemorySanitizerVisitor2688 Value *LowerElementShadowExtend(IRBuilder<> &IRB, Value *S, Type *T) {
2689 Value *S1 = IRB.CreateExtractElement(S, (uint64_t)0);
2690 Value *S2 = IRB.CreateICmpNE(S1, getCleanShadow(S1));
2691 return CreateShadowCast(IRB, S2, T, /* Signed */ true);
2692 }
2693
VariableShadowExtend__anonde83a36c0811::MemorySanitizerVisitor2694 Value *VariableShadowExtend(IRBuilder<> &IRB, Value *S) {
2695 Type *T = S->getType();
2696 assert(T->isVectorTy());
2697 Value *S2 = IRB.CreateICmpNE(S, getCleanShadow(S));
2698 return IRB.CreateSExt(S2, T);
2699 }
2700
2701 // Instrument vector shift instrinsic.
2702 //
2703 // This function instruments intrinsics like int_x86_avx2_psll_w.
2704 // Intrinsic shifts %In by %ShiftSize bits.
2705 // %ShiftSize may be a vector. In that case the lower 64 bits determine shift
2706 // size, and the rest is ignored. Behavior is defined even if shift size is
2707 // greater than register (or field) width.
handleVectorShiftIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2708 void handleVectorShiftIntrinsic(IntrinsicInst &I, bool Variable) {
2709 assert(I.getNumArgOperands() == 2);
2710 IRBuilder<> IRB(&I);
2711 // If any of the S2 bits are poisoned, the whole thing is poisoned.
2712 // Otherwise perform the same shift on S1.
2713 Value *S1 = getShadow(&I, 0);
2714 Value *S2 = getShadow(&I, 1);
2715 Value *S2Conv = Variable ? VariableShadowExtend(IRB, S2)
2716 : Lower64ShadowExtend(IRB, S2, getShadowTy(&I));
2717 Value *V1 = I.getOperand(0);
2718 Value *V2 = I.getOperand(1);
2719 Value *Shift = IRB.CreateCall(I.getFunctionType(), I.getCalledValue(),
2720 {IRB.CreateBitCast(S1, V1->getType()), V2});
2721 Shift = IRB.CreateBitCast(Shift, getShadowTy(&I));
2722 setShadow(&I, IRB.CreateOr(Shift, S2Conv));
2723 setOriginForNaryOp(I);
2724 }
2725
2726 // Get an X86_MMX-sized vector type.
getMMXVectorTy__anonde83a36c0811::MemorySanitizerVisitor2727 Type *getMMXVectorTy(unsigned EltSizeInBits) {
2728 const unsigned X86_MMXSizeInBits = 64;
2729 assert(EltSizeInBits != 0 && (X86_MMXSizeInBits % EltSizeInBits) == 0 &&
2730 "Illegal MMX vector element size");
2731 return VectorType::get(IntegerType::get(*MS.C, EltSizeInBits),
2732 X86_MMXSizeInBits / EltSizeInBits);
2733 }
2734
2735 // Returns a signed counterpart for an (un)signed-saturate-and-pack
2736 // intrinsic.
getSignedPackIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2737 Intrinsic::ID getSignedPackIntrinsic(Intrinsic::ID id) {
2738 switch (id) {
2739 case Intrinsic::x86_sse2_packsswb_128:
2740 case Intrinsic::x86_sse2_packuswb_128:
2741 return Intrinsic::x86_sse2_packsswb_128;
2742
2743 case Intrinsic::x86_sse2_packssdw_128:
2744 case Intrinsic::x86_sse41_packusdw:
2745 return Intrinsic::x86_sse2_packssdw_128;
2746
2747 case Intrinsic::x86_avx2_packsswb:
2748 case Intrinsic::x86_avx2_packuswb:
2749 return Intrinsic::x86_avx2_packsswb;
2750
2751 case Intrinsic::x86_avx2_packssdw:
2752 case Intrinsic::x86_avx2_packusdw:
2753 return Intrinsic::x86_avx2_packssdw;
2754
2755 case Intrinsic::x86_mmx_packsswb:
2756 case Intrinsic::x86_mmx_packuswb:
2757 return Intrinsic::x86_mmx_packsswb;
2758
2759 case Intrinsic::x86_mmx_packssdw:
2760 return Intrinsic::x86_mmx_packssdw;
2761 default:
2762 llvm_unreachable("unexpected intrinsic id");
2763 }
2764 }
2765
2766 // Instrument vector pack instrinsic.
2767 //
2768 // This function instruments intrinsics like x86_mmx_packsswb, that
2769 // packs elements of 2 input vectors into half as many bits with saturation.
2770 // Shadow is propagated with the signed variant of the same intrinsic applied
2771 // to sext(Sa != zeroinitializer), sext(Sb != zeroinitializer).
2772 // EltSizeInBits is used only for x86mmx arguments.
handleVectorPackIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2773 void handleVectorPackIntrinsic(IntrinsicInst &I, unsigned EltSizeInBits = 0) {
2774 assert(I.getNumArgOperands() == 2);
2775 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2776 IRBuilder<> IRB(&I);
2777 Value *S1 = getShadow(&I, 0);
2778 Value *S2 = getShadow(&I, 1);
2779 assert(isX86_MMX || S1->getType()->isVectorTy());
2780
2781 // SExt and ICmpNE below must apply to individual elements of input vectors.
2782 // In case of x86mmx arguments, cast them to appropriate vector types and
2783 // back.
2784 Type *T = isX86_MMX ? getMMXVectorTy(EltSizeInBits) : S1->getType();
2785 if (isX86_MMX) {
2786 S1 = IRB.CreateBitCast(S1, T);
2787 S2 = IRB.CreateBitCast(S2, T);
2788 }
2789 Value *S1_ext = IRB.CreateSExt(
2790 IRB.CreateICmpNE(S1, Constant::getNullValue(T)), T);
2791 Value *S2_ext = IRB.CreateSExt(
2792 IRB.CreateICmpNE(S2, Constant::getNullValue(T)), T);
2793 if (isX86_MMX) {
2794 Type *X86_MMXTy = Type::getX86_MMXTy(*MS.C);
2795 S1_ext = IRB.CreateBitCast(S1_ext, X86_MMXTy);
2796 S2_ext = IRB.CreateBitCast(S2_ext, X86_MMXTy);
2797 }
2798
2799 Function *ShadowFn = Intrinsic::getDeclaration(
2800 F.getParent(), getSignedPackIntrinsic(I.getIntrinsicID()));
2801
2802 Value *S =
2803 IRB.CreateCall(ShadowFn, {S1_ext, S2_ext}, "_msprop_vector_pack");
2804 if (isX86_MMX) S = IRB.CreateBitCast(S, getShadowTy(&I));
2805 setShadow(&I, S);
2806 setOriginForNaryOp(I);
2807 }
2808
2809 // Instrument sum-of-absolute-differencies intrinsic.
handleVectorSadIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2810 void handleVectorSadIntrinsic(IntrinsicInst &I) {
2811 const unsigned SignificantBitsPerResultElement = 16;
2812 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2813 Type *ResTy = isX86_MMX ? IntegerType::get(*MS.C, 64) : I.getType();
2814 unsigned ZeroBitsPerResultElement =
2815 ResTy->getScalarSizeInBits() - SignificantBitsPerResultElement;
2816
2817 IRBuilder<> IRB(&I);
2818 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2819 S = IRB.CreateBitCast(S, ResTy);
2820 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2821 ResTy);
2822 S = IRB.CreateLShr(S, ZeroBitsPerResultElement);
2823 S = IRB.CreateBitCast(S, getShadowTy(&I));
2824 setShadow(&I, S);
2825 setOriginForNaryOp(I);
2826 }
2827
2828 // Instrument multiply-add intrinsic.
handleVectorPmaddIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2829 void handleVectorPmaddIntrinsic(IntrinsicInst &I,
2830 unsigned EltSizeInBits = 0) {
2831 bool isX86_MMX = I.getOperand(0)->getType()->isX86_MMXTy();
2832 Type *ResTy = isX86_MMX ? getMMXVectorTy(EltSizeInBits * 2) : I.getType();
2833 IRBuilder<> IRB(&I);
2834 Value *S = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2835 S = IRB.CreateBitCast(S, ResTy);
2836 S = IRB.CreateSExt(IRB.CreateICmpNE(S, Constant::getNullValue(ResTy)),
2837 ResTy);
2838 S = IRB.CreateBitCast(S, getShadowTy(&I));
2839 setShadow(&I, S);
2840 setOriginForNaryOp(I);
2841 }
2842
2843 // Instrument compare-packed intrinsic.
2844 // Basically, an or followed by sext(icmp ne 0) to end up with all-zeros or
2845 // all-ones shadow.
handleVectorComparePackedIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2846 void handleVectorComparePackedIntrinsic(IntrinsicInst &I) {
2847 IRBuilder<> IRB(&I);
2848 Type *ResTy = getShadowTy(&I);
2849 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2850 Value *S = IRB.CreateSExt(
2851 IRB.CreateICmpNE(S0, Constant::getNullValue(ResTy)), ResTy);
2852 setShadow(&I, S);
2853 setOriginForNaryOp(I);
2854 }
2855
2856 // Instrument compare-scalar intrinsic.
2857 // This handles both cmp* intrinsics which return the result in the first
2858 // element of a vector, and comi* which return the result as i32.
handleVectorCompareScalarIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2859 void handleVectorCompareScalarIntrinsic(IntrinsicInst &I) {
2860 IRBuilder<> IRB(&I);
2861 Value *S0 = IRB.CreateOr(getShadow(&I, 0), getShadow(&I, 1));
2862 Value *S = LowerElementShadowExtend(IRB, S0, getShadowTy(&I));
2863 setShadow(&I, S);
2864 setOriginForNaryOp(I);
2865 }
2866
handleStmxcsr__anonde83a36c0811::MemorySanitizerVisitor2867 void handleStmxcsr(IntrinsicInst &I) {
2868 IRBuilder<> IRB(&I);
2869 Value* Addr = I.getArgOperand(0);
2870 Type *Ty = IRB.getInt32Ty();
2871 Value *ShadowPtr =
2872 getShadowOriginPtr(Addr, IRB, Ty, Align::None(), /*isStore*/ true)
2873 .first;
2874
2875 IRB.CreateStore(getCleanShadow(Ty),
2876 IRB.CreatePointerCast(ShadowPtr, Ty->getPointerTo()));
2877
2878 if (ClCheckAccessAddress)
2879 insertShadowCheck(Addr, &I);
2880 }
2881
handleLdmxcsr__anonde83a36c0811::MemorySanitizerVisitor2882 void handleLdmxcsr(IntrinsicInst &I) {
2883 if (!InsertChecks) return;
2884
2885 IRBuilder<> IRB(&I);
2886 Value *Addr = I.getArgOperand(0);
2887 Type *Ty = IRB.getInt32Ty();
2888 const Align Alignment = Align::None();
2889 Value *ShadowPtr, *OriginPtr;
2890 std::tie(ShadowPtr, OriginPtr) =
2891 getShadowOriginPtr(Addr, IRB, Ty, Alignment, /*isStore*/ false);
2892
2893 if (ClCheckAccessAddress)
2894 insertShadowCheck(Addr, &I);
2895
2896 Value *Shadow =
2897 IRB.CreateAlignedLoad(Ty, ShadowPtr, Alignment.value(), "_ldmxcsr");
2898 Value *Origin = MS.TrackOrigins ? IRB.CreateLoad(MS.OriginTy, OriginPtr)
2899 : getCleanOrigin();
2900 insertShadowCheck(Shadow, Origin, &I);
2901 }
2902
handleMaskedStore__anonde83a36c0811::MemorySanitizerVisitor2903 void handleMaskedStore(IntrinsicInst &I) {
2904 IRBuilder<> IRB(&I);
2905 Value *V = I.getArgOperand(0);
2906 Value *Addr = I.getArgOperand(1);
2907 const MaybeAlign Alignment(
2908 cast<ConstantInt>(I.getArgOperand(2))->getZExtValue());
2909 Value *Mask = I.getArgOperand(3);
2910 Value *Shadow = getShadow(V);
2911
2912 Value *ShadowPtr;
2913 Value *OriginPtr;
2914 std::tie(ShadowPtr, OriginPtr) = getShadowOriginPtr(
2915 Addr, IRB, Shadow->getType(), Alignment, /*isStore*/ true);
2916
2917 if (ClCheckAccessAddress) {
2918 insertShadowCheck(Addr, &I);
2919 // Uninitialized mask is kind of like uninitialized address, but not as
2920 // scary.
2921 insertShadowCheck(Mask, &I);
2922 }
2923
2924 IRB.CreateMaskedStore(Shadow, ShadowPtr, Alignment ? Alignment->value() : 0,
2925 Mask);
2926
2927 if (MS.TrackOrigins) {
2928 auto &DL = F.getParent()->getDataLayout();
2929 paintOrigin(IRB, getOrigin(V), OriginPtr,
2930 DL.getTypeStoreSize(Shadow->getType()),
2931 llvm::max(Alignment, kMinOriginAlignment));
2932 }
2933 }
2934
handleMaskedLoad__anonde83a36c0811::MemorySanitizerVisitor2935 bool handleMaskedLoad(IntrinsicInst &I) {
2936 IRBuilder<> IRB(&I);
2937 Value *Addr = I.getArgOperand(0);
2938 const MaybeAlign Alignment(
2939 cast<ConstantInt>(I.getArgOperand(1))->getZExtValue());
2940 Value *Mask = I.getArgOperand(2);
2941 Value *PassThru = I.getArgOperand(3);
2942
2943 Type *ShadowTy = getShadowTy(&I);
2944 Value *ShadowPtr, *OriginPtr;
2945 if (PropagateShadow) {
2946 std::tie(ShadowPtr, OriginPtr) =
2947 getShadowOriginPtr(Addr, IRB, ShadowTy, Alignment, /*isStore*/ false);
2948 setShadow(&I, IRB.CreateMaskedLoad(
2949 ShadowPtr, Alignment ? Alignment->value() : 0, Mask,
2950 getShadow(PassThru), "_msmaskedld"));
2951 } else {
2952 setShadow(&I, getCleanShadow(&I));
2953 }
2954
2955 if (ClCheckAccessAddress) {
2956 insertShadowCheck(Addr, &I);
2957 insertShadowCheck(Mask, &I);
2958 }
2959
2960 if (MS.TrackOrigins) {
2961 if (PropagateShadow) {
2962 // Choose between PassThru's and the loaded value's origins.
2963 Value *MaskedPassThruShadow = IRB.CreateAnd(
2964 getShadow(PassThru), IRB.CreateSExt(IRB.CreateNeg(Mask), ShadowTy));
2965
2966 Value *Acc = IRB.CreateExtractElement(
2967 MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), 0));
2968 for (int i = 1, N = PassThru->getType()->getVectorNumElements(); i < N;
2969 ++i) {
2970 Value *More = IRB.CreateExtractElement(
2971 MaskedPassThruShadow, ConstantInt::get(IRB.getInt32Ty(), i));
2972 Acc = IRB.CreateOr(Acc, More);
2973 }
2974
2975 Value *Origin = IRB.CreateSelect(
2976 IRB.CreateICmpNE(Acc, Constant::getNullValue(Acc->getType())),
2977 getOrigin(PassThru), IRB.CreateLoad(MS.OriginTy, OriginPtr));
2978
2979 setOrigin(&I, Origin);
2980 } else {
2981 setOrigin(&I, getCleanOrigin());
2982 }
2983 }
2984 return true;
2985 }
2986
2987 // Instrument BMI / BMI2 intrinsics.
2988 // All of these intrinsics are Z = I(X, Y)
2989 // where the types of all operands and the result match, and are either i32 or i64.
2990 // The following instrumentation happens to work for all of them:
2991 // Sz = I(Sx, Y) | (sext (Sy != 0))
handleBmiIntrinsic__anonde83a36c0811::MemorySanitizerVisitor2992 void handleBmiIntrinsic(IntrinsicInst &I) {
2993 IRBuilder<> IRB(&I);
2994 Type *ShadowTy = getShadowTy(&I);
2995
2996 // If any bit of the mask operand is poisoned, then the whole thing is.
2997 Value *SMask = getShadow(&I, 1);
2998 SMask = IRB.CreateSExt(IRB.CreateICmpNE(SMask, getCleanShadow(ShadowTy)),
2999 ShadowTy);
3000 // Apply the same intrinsic to the shadow of the first operand.
3001 Value *S = IRB.CreateCall(I.getCalledFunction(),
3002 {getShadow(&I, 0), I.getOperand(1)});
3003 S = IRB.CreateOr(SMask, S);
3004 setShadow(&I, S);
3005 setOriginForNaryOp(I);
3006 }
3007
getPclmulMask__anonde83a36c0811::MemorySanitizerVisitor3008 Constant *getPclmulMask(IRBuilder<> &IRB, unsigned Width, bool OddElements) {
3009 SmallVector<Constant *, 8> Mask;
3010 for (unsigned X = OddElements ? 1 : 0; X < Width; X += 2) {
3011 Constant *C = ConstantInt::get(IRB.getInt32Ty(), X);
3012 Mask.push_back(C);
3013 Mask.push_back(C);
3014 }
3015 return ConstantVector::get(Mask);
3016 }
3017
3018 // Instrument pclmul intrinsics.
3019 // These intrinsics operate either on odd or on even elements of the input
3020 // vectors, depending on the constant in the 3rd argument, ignoring the rest.
3021 // Replace the unused elements with copies of the used ones, ex:
3022 // (0, 1, 2, 3) -> (0, 0, 2, 2) (even case)
3023 // or
3024 // (0, 1, 2, 3) -> (1, 1, 3, 3) (odd case)
3025 // and then apply the usual shadow combining logic.
handlePclmulIntrinsic__anonde83a36c0811::MemorySanitizerVisitor3026 void handlePclmulIntrinsic(IntrinsicInst &I) {
3027 IRBuilder<> IRB(&I);
3028 Type *ShadowTy = getShadowTy(&I);
3029 unsigned Width = I.getArgOperand(0)->getType()->getVectorNumElements();
3030 assert(isa<ConstantInt>(I.getArgOperand(2)) &&
3031 "pclmul 3rd operand must be a constant");
3032 unsigned Imm = cast<ConstantInt>(I.getArgOperand(2))->getZExtValue();
3033 Value *Shuf0 =
3034 IRB.CreateShuffleVector(getShadow(&I, 0), UndefValue::get(ShadowTy),
3035 getPclmulMask(IRB, Width, Imm & 0x01));
3036 Value *Shuf1 =
3037 IRB.CreateShuffleVector(getShadow(&I, 1), UndefValue::get(ShadowTy),
3038 getPclmulMask(IRB, Width, Imm & 0x10));
3039 ShadowAndOriginCombiner SOC(this, IRB);
3040 SOC.Add(Shuf0, getOrigin(&I, 0));
3041 SOC.Add(Shuf1, getOrigin(&I, 1));
3042 SOC.Done(&I);
3043 }
3044
visitIntrinsicInst__anonde83a36c0811::MemorySanitizerVisitor3045 void visitIntrinsicInst(IntrinsicInst &I) {
3046 switch (I.getIntrinsicID()) {
3047 case Intrinsic::lifetime_start:
3048 handleLifetimeStart(I);
3049 break;
3050 case Intrinsic::launder_invariant_group:
3051 case Intrinsic::strip_invariant_group:
3052 handleInvariantGroup(I);
3053 break;
3054 case Intrinsic::bswap:
3055 handleBswap(I);
3056 break;
3057 case Intrinsic::masked_store:
3058 handleMaskedStore(I);
3059 break;
3060 case Intrinsic::masked_load:
3061 handleMaskedLoad(I);
3062 break;
3063 case Intrinsic::x86_sse_stmxcsr:
3064 handleStmxcsr(I);
3065 break;
3066 case Intrinsic::x86_sse_ldmxcsr:
3067 handleLdmxcsr(I);
3068 break;
3069 case Intrinsic::x86_avx512_vcvtsd2usi64:
3070 case Intrinsic::x86_avx512_vcvtsd2usi32:
3071 case Intrinsic::x86_avx512_vcvtss2usi64:
3072 case Intrinsic::x86_avx512_vcvtss2usi32:
3073 case Intrinsic::x86_avx512_cvttss2usi64:
3074 case Intrinsic::x86_avx512_cvttss2usi:
3075 case Intrinsic::x86_avx512_cvttsd2usi64:
3076 case Intrinsic::x86_avx512_cvttsd2usi:
3077 case Intrinsic::x86_avx512_cvtusi2ss:
3078 case Intrinsic::x86_avx512_cvtusi642sd:
3079 case Intrinsic::x86_avx512_cvtusi642ss:
3080 case Intrinsic::x86_sse2_cvtsd2si64:
3081 case Intrinsic::x86_sse2_cvtsd2si:
3082 case Intrinsic::x86_sse2_cvtsd2ss:
3083 case Intrinsic::x86_sse2_cvttsd2si64:
3084 case Intrinsic::x86_sse2_cvttsd2si:
3085 case Intrinsic::x86_sse_cvtss2si64:
3086 case Intrinsic::x86_sse_cvtss2si:
3087 case Intrinsic::x86_sse_cvttss2si64:
3088 case Intrinsic::x86_sse_cvttss2si:
3089 handleVectorConvertIntrinsic(I, 1);
3090 break;
3091 case Intrinsic::x86_sse_cvtps2pi:
3092 case Intrinsic::x86_sse_cvttps2pi:
3093 handleVectorConvertIntrinsic(I, 2);
3094 break;
3095
3096 case Intrinsic::x86_avx512_psll_w_512:
3097 case Intrinsic::x86_avx512_psll_d_512:
3098 case Intrinsic::x86_avx512_psll_q_512:
3099 case Intrinsic::x86_avx512_pslli_w_512:
3100 case Intrinsic::x86_avx512_pslli_d_512:
3101 case Intrinsic::x86_avx512_pslli_q_512:
3102 case Intrinsic::x86_avx512_psrl_w_512:
3103 case Intrinsic::x86_avx512_psrl_d_512:
3104 case Intrinsic::x86_avx512_psrl_q_512:
3105 case Intrinsic::x86_avx512_psra_w_512:
3106 case Intrinsic::x86_avx512_psra_d_512:
3107 case Intrinsic::x86_avx512_psra_q_512:
3108 case Intrinsic::x86_avx512_psrli_w_512:
3109 case Intrinsic::x86_avx512_psrli_d_512:
3110 case Intrinsic::x86_avx512_psrli_q_512:
3111 case Intrinsic::x86_avx512_psrai_w_512:
3112 case Intrinsic::x86_avx512_psrai_d_512:
3113 case Intrinsic::x86_avx512_psrai_q_512:
3114 case Intrinsic::x86_avx512_psra_q_256:
3115 case Intrinsic::x86_avx512_psra_q_128:
3116 case Intrinsic::x86_avx512_psrai_q_256:
3117 case Intrinsic::x86_avx512_psrai_q_128:
3118 case Intrinsic::x86_avx2_psll_w:
3119 case Intrinsic::x86_avx2_psll_d:
3120 case Intrinsic::x86_avx2_psll_q:
3121 case Intrinsic::x86_avx2_pslli_w:
3122 case Intrinsic::x86_avx2_pslli_d:
3123 case Intrinsic::x86_avx2_pslli_q:
3124 case Intrinsic::x86_avx2_psrl_w:
3125 case Intrinsic::x86_avx2_psrl_d:
3126 case Intrinsic::x86_avx2_psrl_q:
3127 case Intrinsic::x86_avx2_psra_w:
3128 case Intrinsic::x86_avx2_psra_d:
3129 case Intrinsic::x86_avx2_psrli_w:
3130 case Intrinsic::x86_avx2_psrli_d:
3131 case Intrinsic::x86_avx2_psrli_q:
3132 case Intrinsic::x86_avx2_psrai_w:
3133 case Intrinsic::x86_avx2_psrai_d:
3134 case Intrinsic::x86_sse2_psll_w:
3135 case Intrinsic::x86_sse2_psll_d:
3136 case Intrinsic::x86_sse2_psll_q:
3137 case Intrinsic::x86_sse2_pslli_w:
3138 case Intrinsic::x86_sse2_pslli_d:
3139 case Intrinsic::x86_sse2_pslli_q:
3140 case Intrinsic::x86_sse2_psrl_w:
3141 case Intrinsic::x86_sse2_psrl_d:
3142 case Intrinsic::x86_sse2_psrl_q:
3143 case Intrinsic::x86_sse2_psra_w:
3144 case Intrinsic::x86_sse2_psra_d:
3145 case Intrinsic::x86_sse2_psrli_w:
3146 case Intrinsic::x86_sse2_psrli_d:
3147 case Intrinsic::x86_sse2_psrli_q:
3148 case Intrinsic::x86_sse2_psrai_w:
3149 case Intrinsic::x86_sse2_psrai_d:
3150 case Intrinsic::x86_mmx_psll_w:
3151 case Intrinsic::x86_mmx_psll_d:
3152 case Intrinsic::x86_mmx_psll_q:
3153 case Intrinsic::x86_mmx_pslli_w:
3154 case Intrinsic::x86_mmx_pslli_d:
3155 case Intrinsic::x86_mmx_pslli_q:
3156 case Intrinsic::x86_mmx_psrl_w:
3157 case Intrinsic::x86_mmx_psrl_d:
3158 case Intrinsic::x86_mmx_psrl_q:
3159 case Intrinsic::x86_mmx_psra_w:
3160 case Intrinsic::x86_mmx_psra_d:
3161 case Intrinsic::x86_mmx_psrli_w:
3162 case Intrinsic::x86_mmx_psrli_d:
3163 case Intrinsic::x86_mmx_psrli_q:
3164 case Intrinsic::x86_mmx_psrai_w:
3165 case Intrinsic::x86_mmx_psrai_d:
3166 handleVectorShiftIntrinsic(I, /* Variable */ false);
3167 break;
3168 case Intrinsic::x86_avx2_psllv_d:
3169 case Intrinsic::x86_avx2_psllv_d_256:
3170 case Intrinsic::x86_avx512_psllv_d_512:
3171 case Intrinsic::x86_avx2_psllv_q:
3172 case Intrinsic::x86_avx2_psllv_q_256:
3173 case Intrinsic::x86_avx512_psllv_q_512:
3174 case Intrinsic::x86_avx2_psrlv_d:
3175 case Intrinsic::x86_avx2_psrlv_d_256:
3176 case Intrinsic::x86_avx512_psrlv_d_512:
3177 case Intrinsic::x86_avx2_psrlv_q:
3178 case Intrinsic::x86_avx2_psrlv_q_256:
3179 case Intrinsic::x86_avx512_psrlv_q_512:
3180 case Intrinsic::x86_avx2_psrav_d:
3181 case Intrinsic::x86_avx2_psrav_d_256:
3182 case Intrinsic::x86_avx512_psrav_d_512:
3183 case Intrinsic::x86_avx512_psrav_q_128:
3184 case Intrinsic::x86_avx512_psrav_q_256:
3185 case Intrinsic::x86_avx512_psrav_q_512:
3186 handleVectorShiftIntrinsic(I, /* Variable */ true);
3187 break;
3188
3189 case Intrinsic::x86_sse2_packsswb_128:
3190 case Intrinsic::x86_sse2_packssdw_128:
3191 case Intrinsic::x86_sse2_packuswb_128:
3192 case Intrinsic::x86_sse41_packusdw:
3193 case Intrinsic::x86_avx2_packsswb:
3194 case Intrinsic::x86_avx2_packssdw:
3195 case Intrinsic::x86_avx2_packuswb:
3196 case Intrinsic::x86_avx2_packusdw:
3197 handleVectorPackIntrinsic(I);
3198 break;
3199
3200 case Intrinsic::x86_mmx_packsswb:
3201 case Intrinsic::x86_mmx_packuswb:
3202 handleVectorPackIntrinsic(I, 16);
3203 break;
3204
3205 case Intrinsic::x86_mmx_packssdw:
3206 handleVectorPackIntrinsic(I, 32);
3207 break;
3208
3209 case Intrinsic::x86_mmx_psad_bw:
3210 case Intrinsic::x86_sse2_psad_bw:
3211 case Intrinsic::x86_avx2_psad_bw:
3212 handleVectorSadIntrinsic(I);
3213 break;
3214
3215 case Intrinsic::x86_sse2_pmadd_wd:
3216 case Intrinsic::x86_avx2_pmadd_wd:
3217 case Intrinsic::x86_ssse3_pmadd_ub_sw_128:
3218 case Intrinsic::x86_avx2_pmadd_ub_sw:
3219 handleVectorPmaddIntrinsic(I);
3220 break;
3221
3222 case Intrinsic::x86_ssse3_pmadd_ub_sw:
3223 handleVectorPmaddIntrinsic(I, 8);
3224 break;
3225
3226 case Intrinsic::x86_mmx_pmadd_wd:
3227 handleVectorPmaddIntrinsic(I, 16);
3228 break;
3229
3230 case Intrinsic::x86_sse_cmp_ss:
3231 case Intrinsic::x86_sse2_cmp_sd:
3232 case Intrinsic::x86_sse_comieq_ss:
3233 case Intrinsic::x86_sse_comilt_ss:
3234 case Intrinsic::x86_sse_comile_ss:
3235 case Intrinsic::x86_sse_comigt_ss:
3236 case Intrinsic::x86_sse_comige_ss:
3237 case Intrinsic::x86_sse_comineq_ss:
3238 case Intrinsic::x86_sse_ucomieq_ss:
3239 case Intrinsic::x86_sse_ucomilt_ss:
3240 case Intrinsic::x86_sse_ucomile_ss:
3241 case Intrinsic::x86_sse_ucomigt_ss:
3242 case Intrinsic::x86_sse_ucomige_ss:
3243 case Intrinsic::x86_sse_ucomineq_ss:
3244 case Intrinsic::x86_sse2_comieq_sd:
3245 case Intrinsic::x86_sse2_comilt_sd:
3246 case Intrinsic::x86_sse2_comile_sd:
3247 case Intrinsic::x86_sse2_comigt_sd:
3248 case Intrinsic::x86_sse2_comige_sd:
3249 case Intrinsic::x86_sse2_comineq_sd:
3250 case Intrinsic::x86_sse2_ucomieq_sd:
3251 case Intrinsic::x86_sse2_ucomilt_sd:
3252 case Intrinsic::x86_sse2_ucomile_sd:
3253 case Intrinsic::x86_sse2_ucomigt_sd:
3254 case Intrinsic::x86_sse2_ucomige_sd:
3255 case Intrinsic::x86_sse2_ucomineq_sd:
3256 handleVectorCompareScalarIntrinsic(I);
3257 break;
3258
3259 case Intrinsic::x86_sse_cmp_ps:
3260 case Intrinsic::x86_sse2_cmp_pd:
3261 // FIXME: For x86_avx_cmp_pd_256 and x86_avx_cmp_ps_256 this function
3262 // generates reasonably looking IR that fails in the backend with "Do not
3263 // know how to split the result of this operator!".
3264 handleVectorComparePackedIntrinsic(I);
3265 break;
3266
3267 case Intrinsic::x86_bmi_bextr_32:
3268 case Intrinsic::x86_bmi_bextr_64:
3269 case Intrinsic::x86_bmi_bzhi_32:
3270 case Intrinsic::x86_bmi_bzhi_64:
3271 case Intrinsic::x86_bmi_pdep_32:
3272 case Intrinsic::x86_bmi_pdep_64:
3273 case Intrinsic::x86_bmi_pext_32:
3274 case Intrinsic::x86_bmi_pext_64:
3275 handleBmiIntrinsic(I);
3276 break;
3277
3278 case Intrinsic::x86_pclmulqdq:
3279 case Intrinsic::x86_pclmulqdq_256:
3280 case Intrinsic::x86_pclmulqdq_512:
3281 handlePclmulIntrinsic(I);
3282 break;
3283
3284 case Intrinsic::is_constant:
3285 // The result of llvm.is.constant() is always defined.
3286 setShadow(&I, getCleanShadow(&I));
3287 setOrigin(&I, getCleanOrigin());
3288 break;
3289
3290 default:
3291 if (!handleUnknownIntrinsic(I))
3292 visitInstruction(I);
3293 break;
3294 }
3295 }
3296
visitCallSite__anonde83a36c0811::MemorySanitizerVisitor3297 void visitCallSite(CallSite CS) {
3298 Instruction &I = *CS.getInstruction();
3299 assert(!I.getMetadata("nosanitize"));
3300 assert((CS.isCall() || CS.isInvoke() || CS.isCallBr()) &&
3301 "Unknown type of CallSite");
3302 if (CS.isCallBr() || (CS.isCall() && cast<CallInst>(&I)->isInlineAsm())) {
3303 // For inline asm (either a call to asm function, or callbr instruction),
3304 // do the usual thing: check argument shadow and mark all outputs as
3305 // clean. Note that any side effects of the inline asm that are not
3306 // immediately visible in its constraints are not handled.
3307 if (ClHandleAsmConservative && MS.CompileKernel)
3308 visitAsmInstruction(I);
3309 else
3310 visitInstruction(I);
3311 return;
3312 }
3313 if (CS.isCall()) {
3314 CallInst *Call = cast<CallInst>(&I);
3315 assert(!isa<IntrinsicInst>(&I) && "intrinsics are handled elsewhere");
3316
3317 // We are going to insert code that relies on the fact that the callee
3318 // will become a non-readonly function after it is instrumented by us. To
3319 // prevent this code from being optimized out, mark that function
3320 // non-readonly in advance.
3321 if (Function *Func = Call->getCalledFunction()) {
3322 // Clear out readonly/readnone attributes.
3323 AttrBuilder B;
3324 B.addAttribute(Attribute::ReadOnly)
3325 .addAttribute(Attribute::ReadNone)
3326 .addAttribute(Attribute::WriteOnly)
3327 .addAttribute(Attribute::ArgMemOnly)
3328 .addAttribute(Attribute::Speculatable);
3329 Func->removeAttributes(AttributeList::FunctionIndex, B);
3330 }
3331
3332 maybeMarkSanitizerLibraryCallNoBuiltin(Call, TLI);
3333 }
3334 IRBuilder<> IRB(&I);
3335
3336 unsigned ArgOffset = 0;
3337 LLVM_DEBUG(dbgs() << " CallSite: " << I << "\n");
3338 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3339 ArgIt != End; ++ArgIt) {
3340 Value *A = *ArgIt;
3341 unsigned i = ArgIt - CS.arg_begin();
3342 if (!A->getType()->isSized()) {
3343 LLVM_DEBUG(dbgs() << "Arg " << i << " is not sized: " << I << "\n");
3344 continue;
3345 }
3346 unsigned Size = 0;
3347 Value *Store = nullptr;
3348 // Compute the Shadow for arg even if it is ByVal, because
3349 // in that case getShadow() will copy the actual arg shadow to
3350 // __msan_param_tls.
3351 Value *ArgShadow = getShadow(A);
3352 Value *ArgShadowBase = getShadowPtrForArgument(A, IRB, ArgOffset);
3353 LLVM_DEBUG(dbgs() << " Arg#" << i << ": " << *A
3354 << " Shadow: " << *ArgShadow << "\n");
3355 bool ArgIsInitialized = false;
3356 const DataLayout &DL = F.getParent()->getDataLayout();
3357 if (CS.paramHasAttr(i, Attribute::ByVal)) {
3358 assert(A->getType()->isPointerTy() &&
3359 "ByVal argument is not a pointer!");
3360 Size = DL.getTypeAllocSize(A->getType()->getPointerElementType());
3361 if (ArgOffset + Size > kParamTLSSize) break;
3362 const MaybeAlign ParamAlignment(CS.getParamAlignment(i));
3363 MaybeAlign Alignment = llvm::None;
3364 if (ParamAlignment)
3365 Alignment = std::min(*ParamAlignment, kShadowTLSAlignment);
3366 Value *AShadowPtr =
3367 getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), Alignment,
3368 /*isStore*/ false)
3369 .first;
3370
3371 Store = IRB.CreateMemCpy(ArgShadowBase, Alignment, AShadowPtr,
3372 Alignment, Size);
3373 // TODO(glider): need to copy origins.
3374 } else {
3375 Size = DL.getTypeAllocSize(A->getType());
3376 if (ArgOffset + Size > kParamTLSSize) break;
3377 Store = IRB.CreateAlignedStore(ArgShadow, ArgShadowBase,
3378 kShadowTLSAlignment.value());
3379 Constant *Cst = dyn_cast<Constant>(ArgShadow);
3380 if (Cst && Cst->isNullValue()) ArgIsInitialized = true;
3381 }
3382 if (MS.TrackOrigins && !ArgIsInitialized)
3383 IRB.CreateStore(getOrigin(A),
3384 getOriginPtrForArgument(A, IRB, ArgOffset));
3385 (void)Store;
3386 assert(Size != 0 && Store != nullptr);
3387 LLVM_DEBUG(dbgs() << " Param:" << *Store << "\n");
3388 ArgOffset += alignTo(Size, 8);
3389 }
3390 LLVM_DEBUG(dbgs() << " done with call args\n");
3391
3392 FunctionType *FT = CS.getFunctionType();
3393 if (FT->isVarArg()) {
3394 VAHelper->visitCallSite(CS, IRB);
3395 }
3396
3397 // Now, get the shadow for the RetVal.
3398 if (!I.getType()->isSized()) return;
3399 // Don't emit the epilogue for musttail call returns.
3400 if (CS.isCall() && cast<CallInst>(&I)->isMustTailCall()) return;
3401 IRBuilder<> IRBBefore(&I);
3402 // Until we have full dynamic coverage, make sure the retval shadow is 0.
3403 Value *Base = getShadowPtrForRetval(&I, IRBBefore);
3404 IRBBefore.CreateAlignedStore(getCleanShadow(&I), Base,
3405 kShadowTLSAlignment.value());
3406 BasicBlock::iterator NextInsn;
3407 if (CS.isCall()) {
3408 NextInsn = ++I.getIterator();
3409 assert(NextInsn != I.getParent()->end());
3410 } else {
3411 BasicBlock *NormalDest = cast<InvokeInst>(&I)->getNormalDest();
3412 if (!NormalDest->getSinglePredecessor()) {
3413 // FIXME: this case is tricky, so we are just conservative here.
3414 // Perhaps we need to split the edge between this BB and NormalDest,
3415 // but a naive attempt to use SplitEdge leads to a crash.
3416 setShadow(&I, getCleanShadow(&I));
3417 setOrigin(&I, getCleanOrigin());
3418 return;
3419 }
3420 // FIXME: NextInsn is likely in a basic block that has not been visited yet.
3421 // Anything inserted there will be instrumented by MSan later!
3422 NextInsn = NormalDest->getFirstInsertionPt();
3423 assert(NextInsn != NormalDest->end() &&
3424 "Could not find insertion point for retval shadow load");
3425 }
3426 IRBuilder<> IRBAfter(&*NextInsn);
3427 Value *RetvalShadow = IRBAfter.CreateAlignedLoad(
3428 getShadowTy(&I), getShadowPtrForRetval(&I, IRBAfter),
3429 kShadowTLSAlignment.value(), "_msret");
3430 setShadow(&I, RetvalShadow);
3431 if (MS.TrackOrigins)
3432 setOrigin(&I, IRBAfter.CreateLoad(MS.OriginTy,
3433 getOriginPtrForRetval(IRBAfter)));
3434 }
3435
isAMustTailRetVal__anonde83a36c0811::MemorySanitizerVisitor3436 bool isAMustTailRetVal(Value *RetVal) {
3437 if (auto *I = dyn_cast<BitCastInst>(RetVal)) {
3438 RetVal = I->getOperand(0);
3439 }
3440 if (auto *I = dyn_cast<CallInst>(RetVal)) {
3441 return I->isMustTailCall();
3442 }
3443 return false;
3444 }
3445
visitReturnInst__anonde83a36c0811::MemorySanitizerVisitor3446 void visitReturnInst(ReturnInst &I) {
3447 IRBuilder<> IRB(&I);
3448 Value *RetVal = I.getReturnValue();
3449 if (!RetVal) return;
3450 // Don't emit the epilogue for musttail call returns.
3451 if (isAMustTailRetVal(RetVal)) return;
3452 Value *ShadowPtr = getShadowPtrForRetval(RetVal, IRB);
3453 if (CheckReturnValue) {
3454 insertShadowCheck(RetVal, &I);
3455 Value *Shadow = getCleanShadow(RetVal);
3456 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value());
3457 } else {
3458 Value *Shadow = getShadow(RetVal);
3459 IRB.CreateAlignedStore(Shadow, ShadowPtr, kShadowTLSAlignment.value());
3460 if (MS.TrackOrigins)
3461 IRB.CreateStore(getOrigin(RetVal), getOriginPtrForRetval(IRB));
3462 }
3463 }
3464
visitPHINode__anonde83a36c0811::MemorySanitizerVisitor3465 void visitPHINode(PHINode &I) {
3466 IRBuilder<> IRB(&I);
3467 if (!PropagateShadow) {
3468 setShadow(&I, getCleanShadow(&I));
3469 setOrigin(&I, getCleanOrigin());
3470 return;
3471 }
3472
3473 ShadowPHINodes.push_back(&I);
3474 setShadow(&I, IRB.CreatePHI(getShadowTy(&I), I.getNumIncomingValues(),
3475 "_msphi_s"));
3476 if (MS.TrackOrigins)
3477 setOrigin(&I, IRB.CreatePHI(MS.OriginTy, I.getNumIncomingValues(),
3478 "_msphi_o"));
3479 }
3480
getLocalVarDescription__anonde83a36c0811::MemorySanitizerVisitor3481 Value *getLocalVarDescription(AllocaInst &I) {
3482 SmallString<2048> StackDescriptionStorage;
3483 raw_svector_ostream StackDescription(StackDescriptionStorage);
3484 // We create a string with a description of the stack allocation and
3485 // pass it into __msan_set_alloca_origin.
3486 // It will be printed by the run-time if stack-originated UMR is found.
3487 // The first 4 bytes of the string are set to '----' and will be replaced
3488 // by __msan_va_arg_overflow_size_tls at the first call.
3489 StackDescription << "----" << I.getName() << "@" << F.getName();
3490 return createPrivateNonConstGlobalForString(*F.getParent(),
3491 StackDescription.str());
3492 }
3493
poisonAllocaUserspace__anonde83a36c0811::MemorySanitizerVisitor3494 void poisonAllocaUserspace(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3495 if (PoisonStack && ClPoisonStackWithCall) {
3496 IRB.CreateCall(MS.MsanPoisonStackFn,
3497 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3498 } else {
3499 Value *ShadowBase, *OriginBase;
3500 std::tie(ShadowBase, OriginBase) = getShadowOriginPtr(
3501 &I, IRB, IRB.getInt8Ty(), Align::None(), /*isStore*/ true);
3502
3503 Value *PoisonValue = IRB.getInt8(PoisonStack ? ClPoisonStackPattern : 0);
3504 IRB.CreateMemSet(ShadowBase, PoisonValue, Len,
3505 MaybeAlign(I.getAlignment()));
3506 }
3507
3508 if (PoisonStack && MS.TrackOrigins) {
3509 Value *Descr = getLocalVarDescription(I);
3510 IRB.CreateCall(MS.MsanSetAllocaOrigin4Fn,
3511 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3512 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy()),
3513 IRB.CreatePointerCast(&F, MS.IntptrTy)});
3514 }
3515 }
3516
poisonAllocaKmsan__anonde83a36c0811::MemorySanitizerVisitor3517 void poisonAllocaKmsan(AllocaInst &I, IRBuilder<> &IRB, Value *Len) {
3518 Value *Descr = getLocalVarDescription(I);
3519 if (PoisonStack) {
3520 IRB.CreateCall(MS.MsanPoisonAllocaFn,
3521 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len,
3522 IRB.CreatePointerCast(Descr, IRB.getInt8PtrTy())});
3523 } else {
3524 IRB.CreateCall(MS.MsanUnpoisonAllocaFn,
3525 {IRB.CreatePointerCast(&I, IRB.getInt8PtrTy()), Len});
3526 }
3527 }
3528
instrumentAlloca__anonde83a36c0811::MemorySanitizerVisitor3529 void instrumentAlloca(AllocaInst &I, Instruction *InsPoint = nullptr) {
3530 if (!InsPoint)
3531 InsPoint = &I;
3532 IRBuilder<> IRB(InsPoint->getNextNode());
3533 const DataLayout &DL = F.getParent()->getDataLayout();
3534 uint64_t TypeSize = DL.getTypeAllocSize(I.getAllocatedType());
3535 Value *Len = ConstantInt::get(MS.IntptrTy, TypeSize);
3536 if (I.isArrayAllocation())
3537 Len = IRB.CreateMul(Len, I.getArraySize());
3538
3539 if (MS.CompileKernel)
3540 poisonAllocaKmsan(I, IRB, Len);
3541 else
3542 poisonAllocaUserspace(I, IRB, Len);
3543 }
3544
visitAllocaInst__anonde83a36c0811::MemorySanitizerVisitor3545 void visitAllocaInst(AllocaInst &I) {
3546 setShadow(&I, getCleanShadow(&I));
3547 setOrigin(&I, getCleanOrigin());
3548 // We'll get to this alloca later unless it's poisoned at the corresponding
3549 // llvm.lifetime.start.
3550 AllocaSet.insert(&I);
3551 }
3552
visitSelectInst__anonde83a36c0811::MemorySanitizerVisitor3553 void visitSelectInst(SelectInst& I) {
3554 IRBuilder<> IRB(&I);
3555 // a = select b, c, d
3556 Value *B = I.getCondition();
3557 Value *C = I.getTrueValue();
3558 Value *D = I.getFalseValue();
3559 Value *Sb = getShadow(B);
3560 Value *Sc = getShadow(C);
3561 Value *Sd = getShadow(D);
3562
3563 // Result shadow if condition shadow is 0.
3564 Value *Sa0 = IRB.CreateSelect(B, Sc, Sd);
3565 Value *Sa1;
3566 if (I.getType()->isAggregateType()) {
3567 // To avoid "sign extending" i1 to an arbitrary aggregate type, we just do
3568 // an extra "select". This results in much more compact IR.
3569 // Sa = select Sb, poisoned, (select b, Sc, Sd)
3570 Sa1 = getPoisonedShadow(getShadowTy(I.getType()));
3571 } else {
3572 // Sa = select Sb, [ (c^d) | Sc | Sd ], [ b ? Sc : Sd ]
3573 // If Sb (condition is poisoned), look for bits in c and d that are equal
3574 // and both unpoisoned.
3575 // If !Sb (condition is unpoisoned), simply pick one of Sc and Sd.
3576
3577 // Cast arguments to shadow-compatible type.
3578 C = CreateAppToShadowCast(IRB, C);
3579 D = CreateAppToShadowCast(IRB, D);
3580
3581 // Result shadow if condition shadow is 1.
3582 Sa1 = IRB.CreateOr({IRB.CreateXor(C, D), Sc, Sd});
3583 }
3584 Value *Sa = IRB.CreateSelect(Sb, Sa1, Sa0, "_msprop_select");
3585 setShadow(&I, Sa);
3586 if (MS.TrackOrigins) {
3587 // Origins are always i32, so any vector conditions must be flattened.
3588 // FIXME: consider tracking vector origins for app vectors?
3589 if (B->getType()->isVectorTy()) {
3590 Type *FlatTy = getShadowTyNoVec(B->getType());
3591 B = IRB.CreateICmpNE(IRB.CreateBitCast(B, FlatTy),
3592 ConstantInt::getNullValue(FlatTy));
3593 Sb = IRB.CreateICmpNE(IRB.CreateBitCast(Sb, FlatTy),
3594 ConstantInt::getNullValue(FlatTy));
3595 }
3596 // a = select b, c, d
3597 // Oa = Sb ? Ob : (b ? Oc : Od)
3598 setOrigin(
3599 &I, IRB.CreateSelect(Sb, getOrigin(I.getCondition()),
3600 IRB.CreateSelect(B, getOrigin(I.getTrueValue()),
3601 getOrigin(I.getFalseValue()))));
3602 }
3603 }
3604
visitLandingPadInst__anonde83a36c0811::MemorySanitizerVisitor3605 void visitLandingPadInst(LandingPadInst &I) {
3606 // Do nothing.
3607 // See https://github.com/google/sanitizers/issues/504
3608 setShadow(&I, getCleanShadow(&I));
3609 setOrigin(&I, getCleanOrigin());
3610 }
3611
visitCatchSwitchInst__anonde83a36c0811::MemorySanitizerVisitor3612 void visitCatchSwitchInst(CatchSwitchInst &I) {
3613 setShadow(&I, getCleanShadow(&I));
3614 setOrigin(&I, getCleanOrigin());
3615 }
3616
visitFuncletPadInst__anonde83a36c0811::MemorySanitizerVisitor3617 void visitFuncletPadInst(FuncletPadInst &I) {
3618 setShadow(&I, getCleanShadow(&I));
3619 setOrigin(&I, getCleanOrigin());
3620 }
3621
visitGetElementPtrInst__anonde83a36c0811::MemorySanitizerVisitor3622 void visitGetElementPtrInst(GetElementPtrInst &I) {
3623 handleShadowOr(I);
3624 }
3625
visitExtractValueInst__anonde83a36c0811::MemorySanitizerVisitor3626 void visitExtractValueInst(ExtractValueInst &I) {
3627 IRBuilder<> IRB(&I);
3628 Value *Agg = I.getAggregateOperand();
3629 LLVM_DEBUG(dbgs() << "ExtractValue: " << I << "\n");
3630 Value *AggShadow = getShadow(Agg);
3631 LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
3632 Value *ResShadow = IRB.CreateExtractValue(AggShadow, I.getIndices());
3633 LLVM_DEBUG(dbgs() << " ResShadow: " << *ResShadow << "\n");
3634 setShadow(&I, ResShadow);
3635 setOriginForNaryOp(I);
3636 }
3637
visitInsertValueInst__anonde83a36c0811::MemorySanitizerVisitor3638 void visitInsertValueInst(InsertValueInst &I) {
3639 IRBuilder<> IRB(&I);
3640 LLVM_DEBUG(dbgs() << "InsertValue: " << I << "\n");
3641 Value *AggShadow = getShadow(I.getAggregateOperand());
3642 Value *InsShadow = getShadow(I.getInsertedValueOperand());
3643 LLVM_DEBUG(dbgs() << " AggShadow: " << *AggShadow << "\n");
3644 LLVM_DEBUG(dbgs() << " InsShadow: " << *InsShadow << "\n");
3645 Value *Res = IRB.CreateInsertValue(AggShadow, InsShadow, I.getIndices());
3646 LLVM_DEBUG(dbgs() << " Res: " << *Res << "\n");
3647 setShadow(&I, Res);
3648 setOriginForNaryOp(I);
3649 }
3650
dumpInst__anonde83a36c0811::MemorySanitizerVisitor3651 void dumpInst(Instruction &I) {
3652 if (CallInst *CI = dyn_cast<CallInst>(&I)) {
3653 errs() << "ZZZ call " << CI->getCalledFunction()->getName() << "\n";
3654 } else {
3655 errs() << "ZZZ " << I.getOpcodeName() << "\n";
3656 }
3657 errs() << "QQQ " << I << "\n";
3658 }
3659
visitResumeInst__anonde83a36c0811::MemorySanitizerVisitor3660 void visitResumeInst(ResumeInst &I) {
3661 LLVM_DEBUG(dbgs() << "Resume: " << I << "\n");
3662 // Nothing to do here.
3663 }
3664
visitCleanupReturnInst__anonde83a36c0811::MemorySanitizerVisitor3665 void visitCleanupReturnInst(CleanupReturnInst &CRI) {
3666 LLVM_DEBUG(dbgs() << "CleanupReturn: " << CRI << "\n");
3667 // Nothing to do here.
3668 }
3669
visitCatchReturnInst__anonde83a36c0811::MemorySanitizerVisitor3670 void visitCatchReturnInst(CatchReturnInst &CRI) {
3671 LLVM_DEBUG(dbgs() << "CatchReturn: " << CRI << "\n");
3672 // Nothing to do here.
3673 }
3674
instrumentAsmArgument__anonde83a36c0811::MemorySanitizerVisitor3675 void instrumentAsmArgument(Value *Operand, Instruction &I, IRBuilder<> &IRB,
3676 const DataLayout &DL, bool isOutput) {
3677 // For each assembly argument, we check its value for being initialized.
3678 // If the argument is a pointer, we assume it points to a single element
3679 // of the corresponding type (or to a 8-byte word, if the type is unsized).
3680 // Each such pointer is instrumented with a call to the runtime library.
3681 Type *OpType = Operand->getType();
3682 // Check the operand value itself.
3683 insertShadowCheck(Operand, &I);
3684 if (!OpType->isPointerTy() || !isOutput) {
3685 assert(!isOutput);
3686 return;
3687 }
3688 Type *ElType = OpType->getPointerElementType();
3689 if (!ElType->isSized())
3690 return;
3691 int Size = DL.getTypeStoreSize(ElType);
3692 Value *Ptr = IRB.CreatePointerCast(Operand, IRB.getInt8PtrTy());
3693 Value *SizeVal = ConstantInt::get(MS.IntptrTy, Size);
3694 IRB.CreateCall(MS.MsanInstrumentAsmStoreFn, {Ptr, SizeVal});
3695 }
3696
3697 /// Get the number of output arguments returned by pointers.
getNumOutputArgs__anonde83a36c0811::MemorySanitizerVisitor3698 int getNumOutputArgs(InlineAsm *IA, CallBase *CB) {
3699 int NumRetOutputs = 0;
3700 int NumOutputs = 0;
3701 Type *RetTy = cast<Value>(CB)->getType();
3702 if (!RetTy->isVoidTy()) {
3703 // Register outputs are returned via the CallInst return value.
3704 auto *ST = dyn_cast<StructType>(RetTy);
3705 if (ST)
3706 NumRetOutputs = ST->getNumElements();
3707 else
3708 NumRetOutputs = 1;
3709 }
3710 InlineAsm::ConstraintInfoVector Constraints = IA->ParseConstraints();
3711 for (size_t i = 0, n = Constraints.size(); i < n; i++) {
3712 InlineAsm::ConstraintInfo Info = Constraints[i];
3713 switch (Info.Type) {
3714 case InlineAsm::isOutput:
3715 NumOutputs++;
3716 break;
3717 default:
3718 break;
3719 }
3720 }
3721 return NumOutputs - NumRetOutputs;
3722 }
3723
visitAsmInstruction__anonde83a36c0811::MemorySanitizerVisitor3724 void visitAsmInstruction(Instruction &I) {
3725 // Conservative inline assembly handling: check for poisoned shadow of
3726 // asm() arguments, then unpoison the result and all the memory locations
3727 // pointed to by those arguments.
3728 // An inline asm() statement in C++ contains lists of input and output
3729 // arguments used by the assembly code. These are mapped to operands of the
3730 // CallInst as follows:
3731 // - nR register outputs ("=r) are returned by value in a single structure
3732 // (SSA value of the CallInst);
3733 // - nO other outputs ("=m" and others) are returned by pointer as first
3734 // nO operands of the CallInst;
3735 // - nI inputs ("r", "m" and others) are passed to CallInst as the
3736 // remaining nI operands.
3737 // The total number of asm() arguments in the source is nR+nO+nI, and the
3738 // corresponding CallInst has nO+nI+1 operands (the last operand is the
3739 // function to be called).
3740 const DataLayout &DL = F.getParent()->getDataLayout();
3741 CallBase *CB = cast<CallBase>(&I);
3742 IRBuilder<> IRB(&I);
3743 InlineAsm *IA = cast<InlineAsm>(CB->getCalledValue());
3744 int OutputArgs = getNumOutputArgs(IA, CB);
3745 // The last operand of a CallInst is the function itself.
3746 int NumOperands = CB->getNumOperands() - 1;
3747
3748 // Check input arguments. Doing so before unpoisoning output arguments, so
3749 // that we won't overwrite uninit values before checking them.
3750 for (int i = OutputArgs; i < NumOperands; i++) {
3751 Value *Operand = CB->getOperand(i);
3752 instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ false);
3753 }
3754 // Unpoison output arguments. This must happen before the actual InlineAsm
3755 // call, so that the shadow for memory published in the asm() statement
3756 // remains valid.
3757 for (int i = 0; i < OutputArgs; i++) {
3758 Value *Operand = CB->getOperand(i);
3759 instrumentAsmArgument(Operand, I, IRB, DL, /*isOutput*/ true);
3760 }
3761
3762 setShadow(&I, getCleanShadow(&I));
3763 setOrigin(&I, getCleanOrigin());
3764 }
3765
visitInstruction__anonde83a36c0811::MemorySanitizerVisitor3766 void visitInstruction(Instruction &I) {
3767 // Everything else: stop propagating and check for poisoned shadow.
3768 if (ClDumpStrictInstructions)
3769 dumpInst(I);
3770 LLVM_DEBUG(dbgs() << "DEFAULT: " << I << "\n");
3771 for (size_t i = 0, n = I.getNumOperands(); i < n; i++) {
3772 Value *Operand = I.getOperand(i);
3773 if (Operand->getType()->isSized())
3774 insertShadowCheck(Operand, &I);
3775 }
3776 setShadow(&I, getCleanShadow(&I));
3777 setOrigin(&I, getCleanOrigin());
3778 }
3779 };
3780
3781 /// AMD64-specific implementation of VarArgHelper.
3782 struct VarArgAMD64Helper : public VarArgHelper {
3783 // An unfortunate workaround for asymmetric lowering of va_arg stuff.
3784 // See a comment in visitCallSite for more details.
3785 static const unsigned AMD64GpEndOffset = 48; // AMD64 ABI Draft 0.99.6 p3.5.7
3786 static const unsigned AMD64FpEndOffsetSSE = 176;
3787 // If SSE is disabled, fp_offset in va_list is zero.
3788 static const unsigned AMD64FpEndOffsetNoSSE = AMD64GpEndOffset;
3789
3790 unsigned AMD64FpEndOffset;
3791 Function &F;
3792 MemorySanitizer &MS;
3793 MemorySanitizerVisitor &MSV;
3794 Value *VAArgTLSCopy = nullptr;
3795 Value *VAArgTLSOriginCopy = nullptr;
3796 Value *VAArgOverflowSize = nullptr;
3797
3798 SmallVector<CallInst*, 16> VAStartInstrumentationList;
3799
3800 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
3801
VarArgAMD64Helper__anonde83a36c0811::VarArgAMD64Helper3802 VarArgAMD64Helper(Function &F, MemorySanitizer &MS,
3803 MemorySanitizerVisitor &MSV)
3804 : F(F), MS(MS), MSV(MSV) {
3805 AMD64FpEndOffset = AMD64FpEndOffsetSSE;
3806 for (const auto &Attr : F.getAttributes().getFnAttributes()) {
3807 if (Attr.isStringAttribute() &&
3808 (Attr.getKindAsString() == "target-features")) {
3809 if (Attr.getValueAsString().contains("-sse"))
3810 AMD64FpEndOffset = AMD64FpEndOffsetNoSSE;
3811 break;
3812 }
3813 }
3814 }
3815
classifyArgument__anonde83a36c0811::VarArgAMD64Helper3816 ArgKind classifyArgument(Value* arg) {
3817 // A very rough approximation of X86_64 argument classification rules.
3818 Type *T = arg->getType();
3819 if (T->isFPOrFPVectorTy() || T->isX86_MMXTy())
3820 return AK_FloatingPoint;
3821 if (T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
3822 return AK_GeneralPurpose;
3823 if (T->isPointerTy())
3824 return AK_GeneralPurpose;
3825 return AK_Memory;
3826 }
3827
3828 // For VarArg functions, store the argument shadow in an ABI-specific format
3829 // that corresponds to va_list layout.
3830 // We do this because Clang lowers va_arg in the frontend, and this pass
3831 // only sees the low level code that deals with va_list internals.
3832 // A much easier alternative (provided that Clang emits va_arg instructions)
3833 // would have been to associate each live instance of va_list with a copy of
3834 // MSanParamTLS, and extract shadow on va_arg() call in the argument list
3835 // order.
visitCallSite__anonde83a36c0811::VarArgAMD64Helper3836 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
3837 unsigned GpOffset = 0;
3838 unsigned FpOffset = AMD64GpEndOffset;
3839 unsigned OverflowOffset = AMD64FpEndOffset;
3840 const DataLayout &DL = F.getParent()->getDataLayout();
3841 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
3842 ArgIt != End; ++ArgIt) {
3843 Value *A = *ArgIt;
3844 unsigned ArgNo = CS.getArgumentNo(ArgIt);
3845 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
3846 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
3847 if (IsByVal) {
3848 // ByVal arguments always go to the overflow area.
3849 // Fixed arguments passed through the overflow area will be stepped
3850 // over by va_start, so don't count them towards the offset.
3851 if (IsFixed)
3852 continue;
3853 assert(A->getType()->isPointerTy());
3854 Type *RealTy = A->getType()->getPointerElementType();
3855 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
3856 Value *ShadowBase = getShadowPtrForVAArgument(
3857 RealTy, IRB, OverflowOffset, alignTo(ArgSize, 8));
3858 Value *OriginBase = nullptr;
3859 if (MS.TrackOrigins)
3860 OriginBase = getOriginPtrForVAArgument(RealTy, IRB, OverflowOffset);
3861 OverflowOffset += alignTo(ArgSize, 8);
3862 if (!ShadowBase)
3863 continue;
3864 Value *ShadowPtr, *OriginPtr;
3865 std::tie(ShadowPtr, OriginPtr) =
3866 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(), kShadowTLSAlignment,
3867 /*isStore*/ false);
3868
3869 IRB.CreateMemCpy(ShadowBase, kShadowTLSAlignment, ShadowPtr,
3870 kShadowTLSAlignment, ArgSize);
3871 if (MS.TrackOrigins)
3872 IRB.CreateMemCpy(OriginBase, kShadowTLSAlignment, OriginPtr,
3873 kShadowTLSAlignment, ArgSize);
3874 } else {
3875 ArgKind AK = classifyArgument(A);
3876 if (AK == AK_GeneralPurpose && GpOffset >= AMD64GpEndOffset)
3877 AK = AK_Memory;
3878 if (AK == AK_FloatingPoint && FpOffset >= AMD64FpEndOffset)
3879 AK = AK_Memory;
3880 Value *ShadowBase, *OriginBase = nullptr;
3881 switch (AK) {
3882 case AK_GeneralPurpose:
3883 ShadowBase =
3884 getShadowPtrForVAArgument(A->getType(), IRB, GpOffset, 8);
3885 if (MS.TrackOrigins)
3886 OriginBase =
3887 getOriginPtrForVAArgument(A->getType(), IRB, GpOffset);
3888 GpOffset += 8;
3889 break;
3890 case AK_FloatingPoint:
3891 ShadowBase =
3892 getShadowPtrForVAArgument(A->getType(), IRB, FpOffset, 16);
3893 if (MS.TrackOrigins)
3894 OriginBase =
3895 getOriginPtrForVAArgument(A->getType(), IRB, FpOffset);
3896 FpOffset += 16;
3897 break;
3898 case AK_Memory:
3899 if (IsFixed)
3900 continue;
3901 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
3902 ShadowBase =
3903 getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset, 8);
3904 if (MS.TrackOrigins)
3905 OriginBase =
3906 getOriginPtrForVAArgument(A->getType(), IRB, OverflowOffset);
3907 OverflowOffset += alignTo(ArgSize, 8);
3908 }
3909 // Take fixed arguments into account for GpOffset and FpOffset,
3910 // but don't actually store shadows for them.
3911 // TODO(glider): don't call get*PtrForVAArgument() for them.
3912 if (IsFixed)
3913 continue;
3914 if (!ShadowBase)
3915 continue;
3916 Value *Shadow = MSV.getShadow(A);
3917 IRB.CreateAlignedStore(Shadow, ShadowBase, kShadowTLSAlignment.value());
3918 if (MS.TrackOrigins) {
3919 Value *Origin = MSV.getOrigin(A);
3920 unsigned StoreSize = DL.getTypeStoreSize(Shadow->getType());
3921 MSV.paintOrigin(IRB, Origin, OriginBase, StoreSize,
3922 std::max(kShadowTLSAlignment, kMinOriginAlignment));
3923 }
3924 }
3925 }
3926 Constant *OverflowSize =
3927 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AMD64FpEndOffset);
3928 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
3929 }
3930
3931 /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonde83a36c0811::VarArgAMD64Helper3932 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
3933 unsigned ArgOffset, unsigned ArgSize) {
3934 // Make sure we don't overflow __msan_va_arg_tls.
3935 if (ArgOffset + ArgSize > kParamTLSSize)
3936 return nullptr;
3937 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
3938 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3939 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
3940 "_msarg_va_s");
3941 }
3942
3943 /// Compute the origin address for a given va_arg.
getOriginPtrForVAArgument__anonde83a36c0811::VarArgAMD64Helper3944 Value *getOriginPtrForVAArgument(Type *Ty, IRBuilder<> &IRB, int ArgOffset) {
3945 Value *Base = IRB.CreatePointerCast(MS.VAArgOriginTLS, MS.IntptrTy);
3946 // getOriginPtrForVAArgument() is always called after
3947 // getShadowPtrForVAArgument(), so __msan_va_arg_origin_tls can never
3948 // overflow.
3949 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
3950 return IRB.CreateIntToPtr(Base, PointerType::get(MS.OriginTy, 0),
3951 "_msarg_va_o");
3952 }
3953
unpoisonVAListTagForInst__anonde83a36c0811::VarArgAMD64Helper3954 void unpoisonVAListTagForInst(IntrinsicInst &I) {
3955 IRBuilder<> IRB(&I);
3956 Value *VAListTag = I.getArgOperand(0);
3957 Value *ShadowPtr, *OriginPtr;
3958 const Align Alignment = Align(8);
3959 std::tie(ShadowPtr, OriginPtr) =
3960 MSV.getShadowOriginPtr(VAListTag, IRB, IRB.getInt8Ty(), Alignment,
3961 /*isStore*/ true);
3962
3963 // Unpoison the whole __va_list_tag.
3964 // FIXME: magic ABI constants.
3965 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
3966 /* size */ 24, Alignment, false);
3967 // We shouldn't need to zero out the origins, as they're only checked for
3968 // nonzero shadow.
3969 }
3970
visitVAStartInst__anonde83a36c0811::VarArgAMD64Helper3971 void visitVAStartInst(VAStartInst &I) override {
3972 if (F.getCallingConv() == CallingConv::Win64)
3973 return;
3974 VAStartInstrumentationList.push_back(&I);
3975 unpoisonVAListTagForInst(I);
3976 }
3977
visitVACopyInst__anonde83a36c0811::VarArgAMD64Helper3978 void visitVACopyInst(VACopyInst &I) override {
3979 if (F.getCallingConv() == CallingConv::Win64) return;
3980 unpoisonVAListTagForInst(I);
3981 }
3982
finalizeInstrumentation__anonde83a36c0811::VarArgAMD64Helper3983 void finalizeInstrumentation() override {
3984 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
3985 "finalizeInstrumentation called twice");
3986 if (!VAStartInstrumentationList.empty()) {
3987 // If there is a va_start in this function, make a backup copy of
3988 // va_arg_tls somewhere in the function entry block.
3989 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
3990 VAArgOverflowSize =
3991 IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
3992 Value *CopySize =
3993 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AMD64FpEndOffset),
3994 VAArgOverflowSize);
3995 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3996 IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
3997 if (MS.TrackOrigins) {
3998 VAArgTLSOriginCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
3999 IRB.CreateMemCpy(VAArgTLSOriginCopy, Align(8), MS.VAArgOriginTLS,
4000 Align(8), CopySize);
4001 }
4002 }
4003
4004 // Instrument va_start.
4005 // Copy va_list shadow from the backup copy of the TLS contents.
4006 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4007 CallInst *OrigInst = VAStartInstrumentationList[i];
4008 IRBuilder<> IRB(OrigInst->getNextNode());
4009 Value *VAListTag = OrigInst->getArgOperand(0);
4010
4011 Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4012 Value *RegSaveAreaPtrPtr = IRB.CreateIntToPtr(
4013 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4014 ConstantInt::get(MS.IntptrTy, 16)),
4015 PointerType::get(RegSaveAreaPtrTy, 0));
4016 Value *RegSaveAreaPtr =
4017 IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4018 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4019 const Align Alignment = Align(16);
4020 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4021 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4022 Alignment, /*isStore*/ true);
4023 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4024 AMD64FpEndOffset);
4025 if (MS.TrackOrigins)
4026 IRB.CreateMemCpy(RegSaveAreaOriginPtr, Alignment, VAArgTLSOriginCopy,
4027 Alignment, AMD64FpEndOffset);
4028 Type *OverflowArgAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4029 Value *OverflowArgAreaPtrPtr = IRB.CreateIntToPtr(
4030 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4031 ConstantInt::get(MS.IntptrTy, 8)),
4032 PointerType::get(OverflowArgAreaPtrTy, 0));
4033 Value *OverflowArgAreaPtr =
4034 IRB.CreateLoad(OverflowArgAreaPtrTy, OverflowArgAreaPtrPtr);
4035 Value *OverflowArgAreaShadowPtr, *OverflowArgAreaOriginPtr;
4036 std::tie(OverflowArgAreaShadowPtr, OverflowArgAreaOriginPtr) =
4037 MSV.getShadowOriginPtr(OverflowArgAreaPtr, IRB, IRB.getInt8Ty(),
4038 Alignment, /*isStore*/ true);
4039 Value *SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSCopy,
4040 AMD64FpEndOffset);
4041 IRB.CreateMemCpy(OverflowArgAreaShadowPtr, Alignment, SrcPtr, Alignment,
4042 VAArgOverflowSize);
4043 if (MS.TrackOrigins) {
4044 SrcPtr = IRB.CreateConstGEP1_32(IRB.getInt8Ty(), VAArgTLSOriginCopy,
4045 AMD64FpEndOffset);
4046 IRB.CreateMemCpy(OverflowArgAreaOriginPtr, Alignment, SrcPtr, Alignment,
4047 VAArgOverflowSize);
4048 }
4049 }
4050 }
4051 };
4052
4053 /// MIPS64-specific implementation of VarArgHelper.
4054 struct VarArgMIPS64Helper : public VarArgHelper {
4055 Function &F;
4056 MemorySanitizer &MS;
4057 MemorySanitizerVisitor &MSV;
4058 Value *VAArgTLSCopy = nullptr;
4059 Value *VAArgSize = nullptr;
4060
4061 SmallVector<CallInst*, 16> VAStartInstrumentationList;
4062
VarArgMIPS64Helper__anonde83a36c0811::VarArgMIPS64Helper4063 VarArgMIPS64Helper(Function &F, MemorySanitizer &MS,
4064 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4065
visitCallSite__anonde83a36c0811::VarArgMIPS64Helper4066 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
4067 unsigned VAArgOffset = 0;
4068 const DataLayout &DL = F.getParent()->getDataLayout();
4069 for (CallSite::arg_iterator ArgIt = CS.arg_begin() +
4070 CS.getFunctionType()->getNumParams(), End = CS.arg_end();
4071 ArgIt != End; ++ArgIt) {
4072 Triple TargetTriple(F.getParent()->getTargetTriple());
4073 Value *A = *ArgIt;
4074 Value *Base;
4075 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4076 if (TargetTriple.getArch() == Triple::mips64) {
4077 // Adjusting the shadow for argument with size < 8 to match the placement
4078 // of bits in big endian system
4079 if (ArgSize < 8)
4080 VAArgOffset += (8 - ArgSize);
4081 }
4082 Base = getShadowPtrForVAArgument(A->getType(), IRB, VAArgOffset, ArgSize);
4083 VAArgOffset += ArgSize;
4084 VAArgOffset = alignTo(VAArgOffset, 8);
4085 if (!Base)
4086 continue;
4087 IRB.CreateAlignedStore(MSV.getShadow(A), Base,
4088 kShadowTLSAlignment.value());
4089 }
4090
4091 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(), VAArgOffset);
4092 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4093 // a new class member i.e. it is the total size of all VarArgs.
4094 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4095 }
4096
4097 /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonde83a36c0811::VarArgMIPS64Helper4098 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4099 unsigned ArgOffset, unsigned ArgSize) {
4100 // Make sure we don't overflow __msan_va_arg_tls.
4101 if (ArgOffset + ArgSize > kParamTLSSize)
4102 return nullptr;
4103 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4104 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4105 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4106 "_msarg");
4107 }
4108
visitVAStartInst__anonde83a36c0811::VarArgMIPS64Helper4109 void visitVAStartInst(VAStartInst &I) override {
4110 IRBuilder<> IRB(&I);
4111 VAStartInstrumentationList.push_back(&I);
4112 Value *VAListTag = I.getArgOperand(0);
4113 Value *ShadowPtr, *OriginPtr;
4114 const Align Alignment = Align(8);
4115 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4116 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4117 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4118 /* size */ 8, Alignment, false);
4119 }
4120
visitVACopyInst__anonde83a36c0811::VarArgMIPS64Helper4121 void visitVACopyInst(VACopyInst &I) override {
4122 IRBuilder<> IRB(&I);
4123 VAStartInstrumentationList.push_back(&I);
4124 Value *VAListTag = I.getArgOperand(0);
4125 Value *ShadowPtr, *OriginPtr;
4126 const Align Alignment = Align(8);
4127 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4128 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4129 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4130 /* size */ 8, Alignment, false);
4131 }
4132
finalizeInstrumentation__anonde83a36c0811::VarArgMIPS64Helper4133 void finalizeInstrumentation() override {
4134 assert(!VAArgSize && !VAArgTLSCopy &&
4135 "finalizeInstrumentation called twice");
4136 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4137 VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4138 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4139 VAArgSize);
4140
4141 if (!VAStartInstrumentationList.empty()) {
4142 // If there is a va_start in this function, make a backup copy of
4143 // va_arg_tls somewhere in the function entry block.
4144 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4145 IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4146 }
4147
4148 // Instrument va_start.
4149 // Copy va_list shadow from the backup copy of the TLS contents.
4150 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4151 CallInst *OrigInst = VAStartInstrumentationList[i];
4152 IRBuilder<> IRB(OrigInst->getNextNode());
4153 Value *VAListTag = OrigInst->getArgOperand(0);
4154 Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4155 Value *RegSaveAreaPtrPtr =
4156 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4157 PointerType::get(RegSaveAreaPtrTy, 0));
4158 Value *RegSaveAreaPtr =
4159 IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4160 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4161 const Align Alignment = Align(8);
4162 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4163 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4164 Alignment, /*isStore*/ true);
4165 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4166 CopySize);
4167 }
4168 }
4169 };
4170
4171 /// AArch64-specific implementation of VarArgHelper.
4172 struct VarArgAArch64Helper : public VarArgHelper {
4173 static const unsigned kAArch64GrArgSize = 64;
4174 static const unsigned kAArch64VrArgSize = 128;
4175
4176 static const unsigned AArch64GrBegOffset = 0;
4177 static const unsigned AArch64GrEndOffset = kAArch64GrArgSize;
4178 // Make VR space aligned to 16 bytes.
4179 static const unsigned AArch64VrBegOffset = AArch64GrEndOffset;
4180 static const unsigned AArch64VrEndOffset = AArch64VrBegOffset
4181 + kAArch64VrArgSize;
4182 static const unsigned AArch64VAEndOffset = AArch64VrEndOffset;
4183
4184 Function &F;
4185 MemorySanitizer &MS;
4186 MemorySanitizerVisitor &MSV;
4187 Value *VAArgTLSCopy = nullptr;
4188 Value *VAArgOverflowSize = nullptr;
4189
4190 SmallVector<CallInst*, 16> VAStartInstrumentationList;
4191
4192 enum ArgKind { AK_GeneralPurpose, AK_FloatingPoint, AK_Memory };
4193
VarArgAArch64Helper__anonde83a36c0811::VarArgAArch64Helper4194 VarArgAArch64Helper(Function &F, MemorySanitizer &MS,
4195 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4196
classifyArgument__anonde83a36c0811::VarArgAArch64Helper4197 ArgKind classifyArgument(Value* arg) {
4198 Type *T = arg->getType();
4199 if (T->isFPOrFPVectorTy())
4200 return AK_FloatingPoint;
4201 if ((T->isIntegerTy() && T->getPrimitiveSizeInBits() <= 64)
4202 || (T->isPointerTy()))
4203 return AK_GeneralPurpose;
4204 return AK_Memory;
4205 }
4206
4207 // The instrumentation stores the argument shadow in a non ABI-specific
4208 // format because it does not know which argument is named (since Clang,
4209 // like x86_64 case, lowers the va_args in the frontend and this pass only
4210 // sees the low level code that deals with va_list internals).
4211 // The first seven GR registers are saved in the first 56 bytes of the
4212 // va_arg tls arra, followers by the first 8 FP/SIMD registers, and then
4213 // the remaining arguments.
4214 // Using constant offset within the va_arg TLS array allows fast copy
4215 // in the finalize instrumentation.
visitCallSite__anonde83a36c0811::VarArgAArch64Helper4216 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
4217 unsigned GrOffset = AArch64GrBegOffset;
4218 unsigned VrOffset = AArch64VrBegOffset;
4219 unsigned OverflowOffset = AArch64VAEndOffset;
4220
4221 const DataLayout &DL = F.getParent()->getDataLayout();
4222 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
4223 ArgIt != End; ++ArgIt) {
4224 Value *A = *ArgIt;
4225 unsigned ArgNo = CS.getArgumentNo(ArgIt);
4226 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
4227 ArgKind AK = classifyArgument(A);
4228 if (AK == AK_GeneralPurpose && GrOffset >= AArch64GrEndOffset)
4229 AK = AK_Memory;
4230 if (AK == AK_FloatingPoint && VrOffset >= AArch64VrEndOffset)
4231 AK = AK_Memory;
4232 Value *Base;
4233 switch (AK) {
4234 case AK_GeneralPurpose:
4235 Base = getShadowPtrForVAArgument(A->getType(), IRB, GrOffset, 8);
4236 GrOffset += 8;
4237 break;
4238 case AK_FloatingPoint:
4239 Base = getShadowPtrForVAArgument(A->getType(), IRB, VrOffset, 8);
4240 VrOffset += 16;
4241 break;
4242 case AK_Memory:
4243 // Don't count fixed arguments in the overflow area - va_start will
4244 // skip right over them.
4245 if (IsFixed)
4246 continue;
4247 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4248 Base = getShadowPtrForVAArgument(A->getType(), IRB, OverflowOffset,
4249 alignTo(ArgSize, 8));
4250 OverflowOffset += alignTo(ArgSize, 8);
4251 break;
4252 }
4253 // Count Gp/Vr fixed arguments to their respective offsets, but don't
4254 // bother to actually store a shadow.
4255 if (IsFixed)
4256 continue;
4257 if (!Base)
4258 continue;
4259 IRB.CreateAlignedStore(MSV.getShadow(A), Base,
4260 kShadowTLSAlignment.value());
4261 }
4262 Constant *OverflowSize =
4263 ConstantInt::get(IRB.getInt64Ty(), OverflowOffset - AArch64VAEndOffset);
4264 IRB.CreateStore(OverflowSize, MS.VAArgOverflowSizeTLS);
4265 }
4266
4267 /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonde83a36c0811::VarArgAArch64Helper4268 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4269 unsigned ArgOffset, unsigned ArgSize) {
4270 // Make sure we don't overflow __msan_va_arg_tls.
4271 if (ArgOffset + ArgSize > kParamTLSSize)
4272 return nullptr;
4273 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4274 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4275 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4276 "_msarg");
4277 }
4278
visitVAStartInst__anonde83a36c0811::VarArgAArch64Helper4279 void visitVAStartInst(VAStartInst &I) override {
4280 IRBuilder<> IRB(&I);
4281 VAStartInstrumentationList.push_back(&I);
4282 Value *VAListTag = I.getArgOperand(0);
4283 Value *ShadowPtr, *OriginPtr;
4284 const Align Alignment = Align(8);
4285 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4286 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4287 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4288 /* size */ 32, Alignment, false);
4289 }
4290
visitVACopyInst__anonde83a36c0811::VarArgAArch64Helper4291 void visitVACopyInst(VACopyInst &I) override {
4292 IRBuilder<> IRB(&I);
4293 VAStartInstrumentationList.push_back(&I);
4294 Value *VAListTag = I.getArgOperand(0);
4295 Value *ShadowPtr, *OriginPtr;
4296 const Align Alignment = Align(8);
4297 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4298 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4299 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4300 /* size */ 32, Alignment, false);
4301 }
4302
4303 // Retrieve a va_list field of 'void*' size.
getVAField64__anonde83a36c0811::VarArgAArch64Helper4304 Value* getVAField64(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4305 Value *SaveAreaPtrPtr =
4306 IRB.CreateIntToPtr(
4307 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4308 ConstantInt::get(MS.IntptrTy, offset)),
4309 Type::getInt64PtrTy(*MS.C));
4310 return IRB.CreateLoad(Type::getInt64Ty(*MS.C), SaveAreaPtrPtr);
4311 }
4312
4313 // Retrieve a va_list field of 'int' size.
getVAField32__anonde83a36c0811::VarArgAArch64Helper4314 Value* getVAField32(IRBuilder<> &IRB, Value *VAListTag, int offset) {
4315 Value *SaveAreaPtr =
4316 IRB.CreateIntToPtr(
4317 IRB.CreateAdd(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4318 ConstantInt::get(MS.IntptrTy, offset)),
4319 Type::getInt32PtrTy(*MS.C));
4320 Value *SaveArea32 = IRB.CreateLoad(IRB.getInt32Ty(), SaveAreaPtr);
4321 return IRB.CreateSExt(SaveArea32, MS.IntptrTy);
4322 }
4323
finalizeInstrumentation__anonde83a36c0811::VarArgAArch64Helper4324 void finalizeInstrumentation() override {
4325 assert(!VAArgOverflowSize && !VAArgTLSCopy &&
4326 "finalizeInstrumentation called twice");
4327 if (!VAStartInstrumentationList.empty()) {
4328 // If there is a va_start in this function, make a backup copy of
4329 // va_arg_tls somewhere in the function entry block.
4330 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4331 VAArgOverflowSize =
4332 IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4333 Value *CopySize =
4334 IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, AArch64VAEndOffset),
4335 VAArgOverflowSize);
4336 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4337 IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4338 }
4339
4340 Value *GrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64GrArgSize);
4341 Value *VrArgSize = ConstantInt::get(MS.IntptrTy, kAArch64VrArgSize);
4342
4343 // Instrument va_start, copy va_list shadow from the backup copy of
4344 // the TLS contents.
4345 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4346 CallInst *OrigInst = VAStartInstrumentationList[i];
4347 IRBuilder<> IRB(OrigInst->getNextNode());
4348
4349 Value *VAListTag = OrigInst->getArgOperand(0);
4350
4351 // The variadic ABI for AArch64 creates two areas to save the incoming
4352 // argument registers (one for 64-bit general register xn-x7 and another
4353 // for 128-bit FP/SIMD vn-v7).
4354 // We need then to propagate the shadow arguments on both regions
4355 // 'va::__gr_top + va::__gr_offs' and 'va::__vr_top + va::__vr_offs'.
4356 // The remaning arguments are saved on shadow for 'va::stack'.
4357 // One caveat is it requires only to propagate the non-named arguments,
4358 // however on the call site instrumentation 'all' the arguments are
4359 // saved. So to copy the shadow values from the va_arg TLS array
4360 // we need to adjust the offset for both GR and VR fields based on
4361 // the __{gr,vr}_offs value (since they are stores based on incoming
4362 // named arguments).
4363
4364 // Read the stack pointer from the va_list.
4365 Value *StackSaveAreaPtr = getVAField64(IRB, VAListTag, 0);
4366
4367 // Read both the __gr_top and __gr_off and add them up.
4368 Value *GrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 8);
4369 Value *GrOffSaveArea = getVAField32(IRB, VAListTag, 24);
4370
4371 Value *GrRegSaveAreaPtr = IRB.CreateAdd(GrTopSaveAreaPtr, GrOffSaveArea);
4372
4373 // Read both the __vr_top and __vr_off and add them up.
4374 Value *VrTopSaveAreaPtr = getVAField64(IRB, VAListTag, 16);
4375 Value *VrOffSaveArea = getVAField32(IRB, VAListTag, 28);
4376
4377 Value *VrRegSaveAreaPtr = IRB.CreateAdd(VrTopSaveAreaPtr, VrOffSaveArea);
4378
4379 // It does not know how many named arguments is being used and, on the
4380 // callsite all the arguments were saved. Since __gr_off is defined as
4381 // '0 - ((8 - named_gr) * 8)', the idea is to just propagate the variadic
4382 // argument by ignoring the bytes of shadow from named arguments.
4383 Value *GrRegSaveAreaShadowPtrOff =
4384 IRB.CreateAdd(GrArgSize, GrOffSaveArea);
4385
4386 Value *GrRegSaveAreaShadowPtr =
4387 MSV.getShadowOriginPtr(GrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4388 Align(8), /*isStore*/ true)
4389 .first;
4390
4391 Value *GrSrcPtr = IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4392 GrRegSaveAreaShadowPtrOff);
4393 Value *GrCopySize = IRB.CreateSub(GrArgSize, GrRegSaveAreaShadowPtrOff);
4394
4395 IRB.CreateMemCpy(GrRegSaveAreaShadowPtr, Align(8), GrSrcPtr, Align(8),
4396 GrCopySize);
4397
4398 // Again, but for FP/SIMD values.
4399 Value *VrRegSaveAreaShadowPtrOff =
4400 IRB.CreateAdd(VrArgSize, VrOffSaveArea);
4401
4402 Value *VrRegSaveAreaShadowPtr =
4403 MSV.getShadowOriginPtr(VrRegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4404 Align(8), /*isStore*/ true)
4405 .first;
4406
4407 Value *VrSrcPtr = IRB.CreateInBoundsGEP(
4408 IRB.getInt8Ty(),
4409 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4410 IRB.getInt32(AArch64VrBegOffset)),
4411 VrRegSaveAreaShadowPtrOff);
4412 Value *VrCopySize = IRB.CreateSub(VrArgSize, VrRegSaveAreaShadowPtrOff);
4413
4414 IRB.CreateMemCpy(VrRegSaveAreaShadowPtr, Align(8), VrSrcPtr, Align(8),
4415 VrCopySize);
4416
4417 // And finally for remaining arguments.
4418 Value *StackSaveAreaShadowPtr =
4419 MSV.getShadowOriginPtr(StackSaveAreaPtr, IRB, IRB.getInt8Ty(),
4420 Align(16), /*isStore*/ true)
4421 .first;
4422
4423 Value *StackSrcPtr =
4424 IRB.CreateInBoundsGEP(IRB.getInt8Ty(), VAArgTLSCopy,
4425 IRB.getInt32(AArch64VAEndOffset));
4426
4427 IRB.CreateMemCpy(StackSaveAreaShadowPtr, Align(16), StackSrcPtr,
4428 Align(16), VAArgOverflowSize);
4429 }
4430 }
4431 };
4432
4433 /// PowerPC64-specific implementation of VarArgHelper.
4434 struct VarArgPowerPC64Helper : public VarArgHelper {
4435 Function &F;
4436 MemorySanitizer &MS;
4437 MemorySanitizerVisitor &MSV;
4438 Value *VAArgTLSCopy = nullptr;
4439 Value *VAArgSize = nullptr;
4440
4441 SmallVector<CallInst*, 16> VAStartInstrumentationList;
4442
VarArgPowerPC64Helper__anonde83a36c0811::VarArgPowerPC64Helper4443 VarArgPowerPC64Helper(Function &F, MemorySanitizer &MS,
4444 MemorySanitizerVisitor &MSV) : F(F), MS(MS), MSV(MSV) {}
4445
visitCallSite__anonde83a36c0811::VarArgPowerPC64Helper4446 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {
4447 // For PowerPC, we need to deal with alignment of stack arguments -
4448 // they are mostly aligned to 8 bytes, but vectors and i128 arrays
4449 // are aligned to 16 bytes, byvals can be aligned to 8 or 16 bytes,
4450 // and QPX vectors are aligned to 32 bytes. For that reason, we
4451 // compute current offset from stack pointer (which is always properly
4452 // aligned), and offset for the first vararg, then subtract them.
4453 unsigned VAArgBase;
4454 Triple TargetTriple(F.getParent()->getTargetTriple());
4455 // Parameter save area starts at 48 bytes from frame pointer for ABIv1,
4456 // and 32 bytes for ABIv2. This is usually determined by target
4457 // endianness, but in theory could be overriden by function attribute.
4458 // For simplicity, we ignore it here (it'd only matter for QPX vectors).
4459 if (TargetTriple.getArch() == Triple::ppc64)
4460 VAArgBase = 48;
4461 else
4462 VAArgBase = 32;
4463 unsigned VAArgOffset = VAArgBase;
4464 const DataLayout &DL = F.getParent()->getDataLayout();
4465 for (CallSite::arg_iterator ArgIt = CS.arg_begin(), End = CS.arg_end();
4466 ArgIt != End; ++ArgIt) {
4467 Value *A = *ArgIt;
4468 unsigned ArgNo = CS.getArgumentNo(ArgIt);
4469 bool IsFixed = ArgNo < CS.getFunctionType()->getNumParams();
4470 bool IsByVal = CS.paramHasAttr(ArgNo, Attribute::ByVal);
4471 if (IsByVal) {
4472 assert(A->getType()->isPointerTy());
4473 Type *RealTy = A->getType()->getPointerElementType();
4474 uint64_t ArgSize = DL.getTypeAllocSize(RealTy);
4475 uint64_t ArgAlign = CS.getParamAlignment(ArgNo);
4476 if (ArgAlign < 8)
4477 ArgAlign = 8;
4478 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4479 if (!IsFixed) {
4480 Value *Base = getShadowPtrForVAArgument(
4481 RealTy, IRB, VAArgOffset - VAArgBase, ArgSize);
4482 if (Base) {
4483 Value *AShadowPtr, *AOriginPtr;
4484 std::tie(AShadowPtr, AOriginPtr) =
4485 MSV.getShadowOriginPtr(A, IRB, IRB.getInt8Ty(),
4486 kShadowTLSAlignment, /*isStore*/ false);
4487
4488 IRB.CreateMemCpy(Base, kShadowTLSAlignment, AShadowPtr,
4489 kShadowTLSAlignment, ArgSize);
4490 }
4491 }
4492 VAArgOffset += alignTo(ArgSize, 8);
4493 } else {
4494 Value *Base;
4495 uint64_t ArgSize = DL.getTypeAllocSize(A->getType());
4496 uint64_t ArgAlign = 8;
4497 if (A->getType()->isArrayTy()) {
4498 // Arrays are aligned to element size, except for long double
4499 // arrays, which are aligned to 8 bytes.
4500 Type *ElementTy = A->getType()->getArrayElementType();
4501 if (!ElementTy->isPPC_FP128Ty())
4502 ArgAlign = DL.getTypeAllocSize(ElementTy);
4503 } else if (A->getType()->isVectorTy()) {
4504 // Vectors are naturally aligned.
4505 ArgAlign = DL.getTypeAllocSize(A->getType());
4506 }
4507 if (ArgAlign < 8)
4508 ArgAlign = 8;
4509 VAArgOffset = alignTo(VAArgOffset, ArgAlign);
4510 if (DL.isBigEndian()) {
4511 // Adjusting the shadow for argument with size < 8 to match the placement
4512 // of bits in big endian system
4513 if (ArgSize < 8)
4514 VAArgOffset += (8 - ArgSize);
4515 }
4516 if (!IsFixed) {
4517 Base = getShadowPtrForVAArgument(A->getType(), IRB,
4518 VAArgOffset - VAArgBase, ArgSize);
4519 if (Base)
4520 IRB.CreateAlignedStore(MSV.getShadow(A), Base,
4521 kShadowTLSAlignment.value());
4522 }
4523 VAArgOffset += ArgSize;
4524 VAArgOffset = alignTo(VAArgOffset, 8);
4525 }
4526 if (IsFixed)
4527 VAArgBase = VAArgOffset;
4528 }
4529
4530 Constant *TotalVAArgSize = ConstantInt::get(IRB.getInt64Ty(),
4531 VAArgOffset - VAArgBase);
4532 // Here using VAArgOverflowSizeTLS as VAArgSizeTLS to avoid creation of
4533 // a new class member i.e. it is the total size of all VarArgs.
4534 IRB.CreateStore(TotalVAArgSize, MS.VAArgOverflowSizeTLS);
4535 }
4536
4537 /// Compute the shadow address for a given va_arg.
getShadowPtrForVAArgument__anonde83a36c0811::VarArgPowerPC64Helper4538 Value *getShadowPtrForVAArgument(Type *Ty, IRBuilder<> &IRB,
4539 unsigned ArgOffset, unsigned ArgSize) {
4540 // Make sure we don't overflow __msan_va_arg_tls.
4541 if (ArgOffset + ArgSize > kParamTLSSize)
4542 return nullptr;
4543 Value *Base = IRB.CreatePointerCast(MS.VAArgTLS, MS.IntptrTy);
4544 Base = IRB.CreateAdd(Base, ConstantInt::get(MS.IntptrTy, ArgOffset));
4545 return IRB.CreateIntToPtr(Base, PointerType::get(MSV.getShadowTy(Ty), 0),
4546 "_msarg");
4547 }
4548
visitVAStartInst__anonde83a36c0811::VarArgPowerPC64Helper4549 void visitVAStartInst(VAStartInst &I) override {
4550 IRBuilder<> IRB(&I);
4551 VAStartInstrumentationList.push_back(&I);
4552 Value *VAListTag = I.getArgOperand(0);
4553 Value *ShadowPtr, *OriginPtr;
4554 const Align Alignment = Align(8);
4555 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4556 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4557 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4558 /* size */ 8, Alignment, false);
4559 }
4560
visitVACopyInst__anonde83a36c0811::VarArgPowerPC64Helper4561 void visitVACopyInst(VACopyInst &I) override {
4562 IRBuilder<> IRB(&I);
4563 Value *VAListTag = I.getArgOperand(0);
4564 Value *ShadowPtr, *OriginPtr;
4565 const Align Alignment = Align(8);
4566 std::tie(ShadowPtr, OriginPtr) = MSV.getShadowOriginPtr(
4567 VAListTag, IRB, IRB.getInt8Ty(), Alignment, /*isStore*/ true);
4568 // Unpoison the whole __va_list_tag.
4569 // FIXME: magic ABI constants.
4570 IRB.CreateMemSet(ShadowPtr, Constant::getNullValue(IRB.getInt8Ty()),
4571 /* size */ 8, Alignment, false);
4572 }
4573
finalizeInstrumentation__anonde83a36c0811::VarArgPowerPC64Helper4574 void finalizeInstrumentation() override {
4575 assert(!VAArgSize && !VAArgTLSCopy &&
4576 "finalizeInstrumentation called twice");
4577 IRBuilder<> IRB(MSV.ActualFnStart->getFirstNonPHI());
4578 VAArgSize = IRB.CreateLoad(IRB.getInt64Ty(), MS.VAArgOverflowSizeTLS);
4579 Value *CopySize = IRB.CreateAdd(ConstantInt::get(MS.IntptrTy, 0),
4580 VAArgSize);
4581
4582 if (!VAStartInstrumentationList.empty()) {
4583 // If there is a va_start in this function, make a backup copy of
4584 // va_arg_tls somewhere in the function entry block.
4585 VAArgTLSCopy = IRB.CreateAlloca(Type::getInt8Ty(*MS.C), CopySize);
4586 IRB.CreateMemCpy(VAArgTLSCopy, Align(8), MS.VAArgTLS, Align(8), CopySize);
4587 }
4588
4589 // Instrument va_start.
4590 // Copy va_list shadow from the backup copy of the TLS contents.
4591 for (size_t i = 0, n = VAStartInstrumentationList.size(); i < n; i++) {
4592 CallInst *OrigInst = VAStartInstrumentationList[i];
4593 IRBuilder<> IRB(OrigInst->getNextNode());
4594 Value *VAListTag = OrigInst->getArgOperand(0);
4595 Type *RegSaveAreaPtrTy = Type::getInt64PtrTy(*MS.C);
4596 Value *RegSaveAreaPtrPtr =
4597 IRB.CreateIntToPtr(IRB.CreatePtrToInt(VAListTag, MS.IntptrTy),
4598 PointerType::get(RegSaveAreaPtrTy, 0));
4599 Value *RegSaveAreaPtr =
4600 IRB.CreateLoad(RegSaveAreaPtrTy, RegSaveAreaPtrPtr);
4601 Value *RegSaveAreaShadowPtr, *RegSaveAreaOriginPtr;
4602 const Align Alignment = Align(8);
4603 std::tie(RegSaveAreaShadowPtr, RegSaveAreaOriginPtr) =
4604 MSV.getShadowOriginPtr(RegSaveAreaPtr, IRB, IRB.getInt8Ty(),
4605 Alignment, /*isStore*/ true);
4606 IRB.CreateMemCpy(RegSaveAreaShadowPtr, Alignment, VAArgTLSCopy, Alignment,
4607 CopySize);
4608 }
4609 }
4610 };
4611
4612 /// A no-op implementation of VarArgHelper.
4613 struct VarArgNoOpHelper : public VarArgHelper {
VarArgNoOpHelper__anonde83a36c0811::VarArgNoOpHelper4614 VarArgNoOpHelper(Function &F, MemorySanitizer &MS,
4615 MemorySanitizerVisitor &MSV) {}
4616
visitCallSite__anonde83a36c0811::VarArgNoOpHelper4617 void visitCallSite(CallSite &CS, IRBuilder<> &IRB) override {}
4618
visitVAStartInst__anonde83a36c0811::VarArgNoOpHelper4619 void visitVAStartInst(VAStartInst &I) override {}
4620
visitVACopyInst__anonde83a36c0811::VarArgNoOpHelper4621 void visitVACopyInst(VACopyInst &I) override {}
4622
finalizeInstrumentation__anonde83a36c0811::VarArgNoOpHelper4623 void finalizeInstrumentation() override {}
4624 };
4625
4626 } // end anonymous namespace
4627
CreateVarArgHelper(Function & Func,MemorySanitizer & Msan,MemorySanitizerVisitor & Visitor)4628 static VarArgHelper *CreateVarArgHelper(Function &Func, MemorySanitizer &Msan,
4629 MemorySanitizerVisitor &Visitor) {
4630 // VarArg handling is only implemented on AMD64. False positives are possible
4631 // on other platforms.
4632 Triple TargetTriple(Func.getParent()->getTargetTriple());
4633 if (TargetTriple.getArch() == Triple::x86_64)
4634 return new VarArgAMD64Helper(Func, Msan, Visitor);
4635 else if (TargetTriple.isMIPS64())
4636 return new VarArgMIPS64Helper(Func, Msan, Visitor);
4637 else if (TargetTriple.getArch() == Triple::aarch64)
4638 return new VarArgAArch64Helper(Func, Msan, Visitor);
4639 else if (TargetTriple.getArch() == Triple::ppc64 ||
4640 TargetTriple.getArch() == Triple::ppc64le)
4641 return new VarArgPowerPC64Helper(Func, Msan, Visitor);
4642 else
4643 return new VarArgNoOpHelper(Func, Msan, Visitor);
4644 }
4645
sanitizeFunction(Function & F,TargetLibraryInfo & TLI)4646 bool MemorySanitizer::sanitizeFunction(Function &F, TargetLibraryInfo &TLI) {
4647 if (!CompileKernel && F.getName() == kMsanModuleCtorName)
4648 return false;
4649
4650 MemorySanitizerVisitor Visitor(F, *this, TLI);
4651
4652 // Clear out readonly/readnone attributes.
4653 AttrBuilder B;
4654 B.addAttribute(Attribute::ReadOnly)
4655 .addAttribute(Attribute::ReadNone)
4656 .addAttribute(Attribute::WriteOnly)
4657 .addAttribute(Attribute::ArgMemOnly)
4658 .addAttribute(Attribute::Speculatable);
4659 F.removeAttributes(AttributeList::FunctionIndex, B);
4660
4661 return Visitor.runOnFunction();
4662 }
4663