• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 //======- X86RetpolineThunks.cpp - Construct retpoline thunks for x86  --=====//
2 //
3 //                     The LLVM Compiler Infrastructure
4 //
5 // This file is distributed under the University of Illinois Open Source
6 // License. See LICENSE.TXT for details.
7 //
8 //===----------------------------------------------------------------------===//
9 /// \file
10 ///
11 /// Pass that injects an MI thunk implementing a "retpoline". This is
12 /// a RET-implemented trampoline that is used to lower indirect calls in a way
13 /// that prevents speculation on some x86 processors and can be used to mitigate
14 /// security vulnerabilities due to targeted speculative execution and side
15 /// channels such as CVE-2017-5715.
16 ///
17 /// TODO(chandlerc): All of this code could use better comments and
18 /// documentation.
19 ///
20 //===----------------------------------------------------------------------===//
21 
22 #include "X86.h"
23 #include "X86InstrBuilder.h"
24 #include "X86Subtarget.h"
25 #include "llvm/CodeGen/MachineFunction.h"
26 #include "llvm/CodeGen/MachineInstrBuilder.h"
27 #include "llvm/CodeGen/MachineModuleInfo.h"
28 #include "llvm/CodeGen/Passes.h"
29 #include "llvm/CodeGen/TargetPassConfig.h"
30 #include "llvm/IR/IRBuilder.h"
31 #include "llvm/IR/Instructions.h"
32 #include "llvm/IR/Module.h"
33 #include "llvm/Support/CommandLine.h"
34 #include "llvm/Support/Debug.h"
35 #include "llvm/Support/raw_ostream.h"
36 
37 using namespace llvm;
38 
39 #define DEBUG_TYPE "x86-retpoline-thunks"
40 
41 static const char ThunkNamePrefix[] = "__llvm_retpoline_";
42 static const char R11ThunkName[]    = "__llvm_retpoline_r11";
43 static const char EAXThunkName[]    = "__llvm_retpoline_eax";
44 static const char ECXThunkName[]    = "__llvm_retpoline_ecx";
45 static const char EDXThunkName[]    = "__llvm_retpoline_edx";
46 static const char EDIThunkName[]    = "__llvm_retpoline_edi";
47 
48 namespace {
49 class X86RetpolineThunks : public MachineFunctionPass {
50 public:
51   static char ID;
52 
X86RetpolineThunks()53   X86RetpolineThunks() : MachineFunctionPass(ID) {}
54 
getPassName() const55   StringRef getPassName() const override { return "X86 Retpoline Thunks"; }
56 
57   bool doInitialization(Module &M) override;
58   bool runOnMachineFunction(MachineFunction &F) override;
59 
getAnalysisUsage(AnalysisUsage & AU) const60   void getAnalysisUsage(AnalysisUsage &AU) const override {
61     MachineFunctionPass::getAnalysisUsage(AU);
62     AU.addRequired<MachineModuleInfo>();
63     AU.addPreserved<MachineModuleInfo>();
64   }
65 
66 private:
67   MachineModuleInfo *MMI;
68   const TargetMachine *TM;
69   bool Is64Bit;
70   const X86Subtarget *STI;
71   const X86InstrInfo *TII;
72 
73   bool InsertedThunks;
74 
75   void createThunkFunction(Module &M, StringRef Name);
76   void insertRegReturnAddrClobber(MachineBasicBlock &MBB, unsigned Reg);
77   void populateThunk(MachineFunction &MF, Optional<unsigned> Reg = None);
78 };
79 
80 } // end anonymous namespace
81 
createX86RetpolineThunksPass()82 FunctionPass *llvm::createX86RetpolineThunksPass() {
83   return new X86RetpolineThunks();
84 }
85 
86 char X86RetpolineThunks::ID = 0;
87 
doInitialization(Module & M)88 bool X86RetpolineThunks::doInitialization(Module &M) {
89   InsertedThunks = false;
90   return false;
91 }
92 
runOnMachineFunction(MachineFunction & MF)93 bool X86RetpolineThunks::runOnMachineFunction(MachineFunction &MF) {
94   LLVM_DEBUG(dbgs() << getPassName() << '\n');
95 
96   TM = &MF.getTarget();;
97   STI = &MF.getSubtarget<X86Subtarget>();
98   TII = STI->getInstrInfo();
99   Is64Bit = TM->getTargetTriple().getArch() == Triple::x86_64;
100 
101   MMI = &getAnalysis<MachineModuleInfo>();
102   Module &M = const_cast<Module &>(*MMI->getModule());
103 
104   // If this function is not a thunk, check to see if we need to insert
105   // a thunk.
106   if (!MF.getName().startswith(ThunkNamePrefix)) {
107     // If we've already inserted a thunk, nothing else to do.
108     if (InsertedThunks)
109       return false;
110 
111     // Only add a thunk if one of the functions has the retpoline feature
112     // enabled in its subtarget, and doesn't enable external thunks.
113     // FIXME: Conditionalize on indirect calls so we don't emit a thunk when
114     // nothing will end up calling it.
115     // FIXME: It's a little silly to look at every function just to enumerate
116     // the subtargets, but eventually we'll want to look at them for indirect
117     // calls, so maybe this is OK.
118     if (!STI->useRetpoline() || STI->useRetpolineExternalThunk())
119       return false;
120 
121     // Otherwise, we need to insert the thunk.
122     // WARNING: This is not really a well behaving thing to do in a function
123     // pass. We extract the module and insert a new function (and machine
124     // function) directly into the module.
125     if (Is64Bit)
126       createThunkFunction(M, R11ThunkName);
127     else
128       for (StringRef Name :
129            {EAXThunkName, ECXThunkName, EDXThunkName, EDIThunkName})
130         createThunkFunction(M, Name);
131     InsertedThunks = true;
132     return true;
133   }
134 
135   // If this *is* a thunk function, we need to populate it with the correct MI.
136   if (Is64Bit) {
137     assert(MF.getName() == "__llvm_retpoline_r11" &&
138            "Should only have an r11 thunk on 64-bit targets");
139 
140     // __llvm_retpoline_r11:
141     //   callq .Lr11_call_target
142     // .Lr11_capture_spec:
143     //   pause
144     //   lfence
145     //   jmp .Lr11_capture_spec
146     // .align 16
147     // .Lr11_call_target:
148     //   movq %r11, (%rsp)
149     //   retq
150     populateThunk(MF, X86::R11);
151   } else {
152     // For 32-bit targets we need to emit a collection of thunks for various
153     // possible scratch registers as well as a fallback that uses EDI, which is
154     // normally callee saved.
155     //   __llvm_retpoline_eax:
156     //         calll .Leax_call_target
157     //   .Leax_capture_spec:
158     //         pause
159     //         jmp .Leax_capture_spec
160     //   .align 16
161     //   .Leax_call_target:
162     //         movl %eax, (%esp)  # Clobber return addr
163     //         retl
164     //
165     //   __llvm_retpoline_ecx:
166     //   ... # Same setup
167     //         movl %ecx, (%esp)
168     //         retl
169     //
170     //   __llvm_retpoline_edx:
171     //   ... # Same setup
172     //         movl %edx, (%esp)
173     //         retl
174     //
175     //   __llvm_retpoline_edi:
176     //   ... # Same setup
177     //         movl %edi, (%esp)
178     //         retl
179     if (MF.getName() == EAXThunkName)
180       populateThunk(MF, X86::EAX);
181     else if (MF.getName() == ECXThunkName)
182       populateThunk(MF, X86::ECX);
183     else if (MF.getName() == EDXThunkName)
184       populateThunk(MF, X86::EDX);
185     else if (MF.getName() == EDIThunkName)
186       populateThunk(MF, X86::EDI);
187     else
188       llvm_unreachable("Invalid thunk name on x86-32!");
189   }
190 
191   return true;
192 }
193 
createThunkFunction(Module & M,StringRef Name)194 void X86RetpolineThunks::createThunkFunction(Module &M, StringRef Name) {
195   assert(Name.startswith(ThunkNamePrefix) &&
196          "Created a thunk with an unexpected prefix!");
197 
198   LLVMContext &Ctx = M.getContext();
199   auto Type = FunctionType::get(Type::getVoidTy(Ctx), false);
200   Function *F =
201       Function::Create(Type, GlobalValue::LinkOnceODRLinkage, Name, &M);
202   F->setVisibility(GlobalValue::HiddenVisibility);
203   F->setComdat(M.getOrInsertComdat(Name));
204 
205   // Add Attributes so that we don't create a frame, unwind information, or
206   // inline.
207   AttrBuilder B;
208   B.addAttribute(llvm::Attribute::NoUnwind);
209   B.addAttribute(llvm::Attribute::Naked);
210   F->addAttributes(llvm::AttributeList::FunctionIndex, B);
211 
212   // Populate our function a bit so that we can verify.
213   BasicBlock *Entry = BasicBlock::Create(Ctx, "entry", F);
214   IRBuilder<> Builder(Entry);
215 
216   Builder.CreateRetVoid();
217 
218   // MachineFunctions/MachineBasicBlocks aren't created automatically for the
219   // IR-level constructs we already made. Create them and insert them into the
220   // module.
221   MachineFunction &MF = MMI->getOrCreateMachineFunction(*F);
222   MachineBasicBlock *EntryMBB = MF.CreateMachineBasicBlock(Entry);
223 
224   // Insert EntryMBB into MF. It's not in the module until we do this.
225   MF.insert(MF.end(), EntryMBB);
226 }
227 
insertRegReturnAddrClobber(MachineBasicBlock & MBB,unsigned Reg)228 void X86RetpolineThunks::insertRegReturnAddrClobber(MachineBasicBlock &MBB,
229                                                     unsigned Reg) {
230   const unsigned MovOpc = Is64Bit ? X86::MOV64mr : X86::MOV32mr;
231   const unsigned SPReg = Is64Bit ? X86::RSP : X86::ESP;
232   addRegOffset(BuildMI(&MBB, DebugLoc(), TII->get(MovOpc)), SPReg, false, 0)
233       .addReg(Reg);
234 }
235 
populateThunk(MachineFunction & MF,Optional<unsigned> Reg)236 void X86RetpolineThunks::populateThunk(MachineFunction &MF,
237                                        Optional<unsigned> Reg) {
238   // Set MF properties. We never use vregs...
239   MF.getProperties().set(MachineFunctionProperties::Property::NoVRegs);
240 
241   MachineBasicBlock *Entry = &MF.front();
242   Entry->clear();
243 
244   MachineBasicBlock *CaptureSpec = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
245   MachineBasicBlock *CallTarget = MF.CreateMachineBasicBlock(Entry->getBasicBlock());
246   MF.push_back(CaptureSpec);
247   MF.push_back(CallTarget);
248 
249   const unsigned CallOpc = Is64Bit ? X86::CALL64pcrel32 : X86::CALLpcrel32;
250   const unsigned RetOpc = Is64Bit ? X86::RETQ : X86::RETL;
251 
252   BuildMI(Entry, DebugLoc(), TII->get(CallOpc)).addMBB(CallTarget);
253   Entry->addSuccessor(CallTarget);
254   Entry->addSuccessor(CaptureSpec);
255   CallTarget->setHasAddressTaken();
256 
257   // In the capture loop for speculation, we want to stop the processor from
258   // speculating as fast as possible. On Intel processors, the PAUSE instruction
259   // will block speculation without consuming any execution resources. On AMD
260   // processors, the PAUSE instruction is (essentially) a nop, so we also use an
261   // LFENCE instruction which they have advised will stop speculation as well
262   // with minimal resource utilization. We still end the capture with a jump to
263   // form an infinite loop to fully guarantee that no matter what implementation
264   // of the x86 ISA, speculating this code path never escapes.
265   BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::PAUSE));
266   BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::LFENCE));
267   BuildMI(CaptureSpec, DebugLoc(), TII->get(X86::JMP_1)).addMBB(CaptureSpec);
268   CaptureSpec->setHasAddressTaken();
269   CaptureSpec->addSuccessor(CaptureSpec);
270 
271   CallTarget->setAlignment(4);
272   insertRegReturnAddrClobber(*CallTarget, *Reg);
273   BuildMI(CallTarget, DebugLoc(), TII->get(RetOpc));
274 }
275