• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright (c) 2012 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4 
5 #include "sandbox/linux/seccomp-bpf/syscall.h"
6 
7 #include <asm/unistd.h>
8 #include <errno.h>
9 
10 #include "base/basictypes.h"
11 
12 namespace sandbox {
13 
14 namespace {
15 
16 asm(// We need to be able to tell the kernel exactly where we made a
17     // system call. The C++ compiler likes to sometimes clone or
18     // inline code, which would inadvertently end up duplicating
19     // the entry point.
20     // "gcc" can suppress code duplication with suitable function
21     // attributes, but "clang" doesn't have this ability.
22     // The "clang" developer mailing list suggested that the correct
23     // and portable solution is a file-scope assembly block.
24     // N.B. We do mark our code as a proper function so that backtraces
25     // work correctly. But we make absolutely no attempt to use the
26     // ABI's calling conventions for passing arguments. We will only
27     // ever be called from assembly code and thus can pick more
28     // suitable calling conventions.
29 #if defined(__i386__)
30     ".text\n"
31     ".align 16, 0x90\n"
32     ".type SyscallAsm, @function\n"
33     "SyscallAsm:.cfi_startproc\n"
34     // Check if "%eax" is negative. If so, do not attempt to make a
35     // system call. Instead, compute the return address that is visible
36     // to the kernel after we execute "int $0x80". This address can be
37     // used as a marker that BPF code inspects.
38     "test %eax, %eax\n"
39     "jge  1f\n"
40     // Always, make sure that our code is position-independent, or
41     // address space randomization might not work on i386. This means,
42     // we can't use "lea", but instead have to rely on "call/pop".
43     "call 0f;   .cfi_adjust_cfa_offset  4\n"
44     "0:pop  %eax; .cfi_adjust_cfa_offset -4\n"
45     "addl $2f-0b, %eax\n"
46     "ret\n"
47     // Save register that we don't want to clobber. On i386, we need to
48     // save relatively aggressively, as there are a couple or registers
49     // that are used internally (e.g. %ebx for position-independent
50     // code, and %ebp for the frame pointer), and as we need to keep at
51     // least a few registers available for the register allocator.
52     "1:push %esi; .cfi_adjust_cfa_offset 4\n"
53     "push %edi; .cfi_adjust_cfa_offset 4\n"
54     "push %ebx; .cfi_adjust_cfa_offset 4\n"
55     "push %ebp; .cfi_adjust_cfa_offset 4\n"
56     // Copy entries from the array holding the arguments into the
57     // correct CPU registers.
58     "movl  0(%edi), %ebx\n"
59     "movl  4(%edi), %ecx\n"
60     "movl  8(%edi), %edx\n"
61     "movl 12(%edi), %esi\n"
62     "movl 20(%edi), %ebp\n"
63     "movl 16(%edi), %edi\n"
64     // Enter the kernel.
65     "int  $0x80\n"
66     // This is our "magic" return address that the BPF filter sees.
67     "2:"
68     // Restore any clobbered registers that we didn't declare to the
69     // compiler.
70     "pop  %ebp; .cfi_adjust_cfa_offset -4\n"
71     "pop  %ebx; .cfi_adjust_cfa_offset -4\n"
72     "pop  %edi; .cfi_adjust_cfa_offset -4\n"
73     "pop  %esi; .cfi_adjust_cfa_offset -4\n"
74     "ret\n"
75     ".cfi_endproc\n"
76     "9:.size SyscallAsm, 9b-SyscallAsm\n"
77 #elif defined(__x86_64__)
78     ".text\n"
79     ".align 16, 0x90\n"
80     ".type SyscallAsm, @function\n"
81     "SyscallAsm:.cfi_startproc\n"
82     // Check if "%rax" is negative. If so, do not attempt to make a
83     // system call. Instead, compute the return address that is visible
84     // to the kernel after we execute "syscall". This address can be
85     // used as a marker that BPF code inspects.
86     "test %rax, %rax\n"
87     "jge  1f\n"
88     // Always make sure that our code is position-independent, or the
89     // linker will throw a hissy fit on x86-64.
90     "call 0f;   .cfi_adjust_cfa_offset  8\n"
91     "0:pop  %rax; .cfi_adjust_cfa_offset -8\n"
92     "addq $2f-0b, %rax\n"
93     "ret\n"
94     // We declared all clobbered registers to the compiler. On x86-64,
95     // there really isn't much of a problem with register pressure. So,
96     // we can go ahead and directly copy the entries from the arguments
97     // array into the appropriate CPU registers.
98     "1:movq  0(%r12), %rdi\n"
99     "movq  8(%r12), %rsi\n"
100     "movq 16(%r12), %rdx\n"
101     "movq 24(%r12), %r10\n"
102     "movq 32(%r12), %r8\n"
103     "movq 40(%r12), %r9\n"
104     // Enter the kernel.
105     "syscall\n"
106     // This is our "magic" return address that the BPF filter sees.
107     "2:ret\n"
108     ".cfi_endproc\n"
109     "9:.size SyscallAsm, 9b-SyscallAsm\n"
110 #elif defined(__arm__)
111     // Throughout this file, we use the same mode (ARM vs. thumb)
112     // that the C++ compiler uses. This means, when transfering control
113     // from C++ to assembly code, we do not need to switch modes (e.g.
114     // by using the "bx" instruction). It also means that our assembly
115     // code should not be invoked directly from code that lives in
116     // other compilation units, as we don't bother implementing thumb
117     // interworking. That's OK, as we don't make any of the assembly
118     // symbols public. They are all local to this file.
119     ".text\n"
120     ".align 2\n"
121     ".type SyscallAsm, %function\n"
122 #if defined(__thumb__)
123     ".thumb_func\n"
124 #else
125     ".arm\n"
126 #endif
127     "SyscallAsm:.fnstart\n"
128     "@ args = 0, pretend = 0, frame = 8\n"
129     "@ frame_needed = 1, uses_anonymous_args = 0\n"
130 #if defined(__thumb__)
131     ".cfi_startproc\n"
132     "push {r7, lr}\n"
133     ".cfi_offset 14, -4\n"
134     ".cfi_offset  7, -8\n"
135     "mov r7, sp\n"
136     ".cfi_def_cfa_register 7\n"
137     ".cfi_def_cfa_offset 8\n"
138 #else
139     "stmfd sp!, {fp, lr}\n"
140     "add fp, sp, #4\n"
141 #endif
142     // Check if "r0" is negative. If so, do not attempt to make a
143     // system call. Instead, compute the return address that is visible
144     // to the kernel after we execute "swi 0". This address can be
145     // used as a marker that BPF code inspects.
146     "cmp r0, #0\n"
147     "bge 1f\n"
148     "adr r0, 2f\n"
149     "b   2f\n"
150     // We declared (almost) all clobbered registers to the compiler. On
151     // ARM there is no particular register pressure. So, we can go
152     // ahead and directly copy the entries from the arguments array
153     // into the appropriate CPU registers.
154     "1:ldr r5, [r6, #20]\n"
155     "ldr r4, [r6, #16]\n"
156     "ldr r3, [r6, #12]\n"
157     "ldr r2, [r6, #8]\n"
158     "ldr r1, [r6, #4]\n"
159     "mov r7, r0\n"
160     "ldr r0, [r6, #0]\n"
161     // Enter the kernel
162     "swi 0\n"
163 // Restore the frame pointer. Also restore the program counter from
164 // the link register; this makes us return to the caller.
165 #if defined(__thumb__)
166     "2:pop {r7, pc}\n"
167     ".cfi_endproc\n"
168 #else
169     "2:ldmfd sp!, {fp, pc}\n"
170 #endif
171     ".fnend\n"
172     "9:.size SyscallAsm, 9b-SyscallAsm\n"
173 #endif
174     );  // asm
175 
176 }  // namespace
177 
Call(int nr,intptr_t p0,intptr_t p1,intptr_t p2,intptr_t p3,intptr_t p4,intptr_t p5)178 intptr_t Syscall::Call(int nr,
179                        intptr_t p0,
180                        intptr_t p1,
181                        intptr_t p2,
182                        intptr_t p3,
183                        intptr_t p4,
184                        intptr_t p5) {
185   // We rely on "intptr_t" to be the exact size as a "void *". This is
186   // typically true, but just in case, we add a check. The language
187   // specification allows platforms some leeway in cases, where
188   // "sizeof(void *)" is not the same as "sizeof(void (*)())". We expect
189   // that this would only be an issue for IA64, which we are currently not
190   // planning on supporting. And it is even possible that this would work
191   // on IA64, but for lack of actual hardware, I cannot test.
192   COMPILE_ASSERT(sizeof(void*) == sizeof(intptr_t),
193                  pointer_types_and_intptr_must_be_exactly_the_same_size);
194 
195   const intptr_t args[6] = {p0, p1, p2, p3, p4, p5};
196 
197 // Invoke our file-scope assembly code. The constraints have been picked
198 // carefully to match what the rest of the assembly code expects in input,
199 // output, and clobbered registers.
200 #if defined(__i386__)
201   intptr_t ret = nr;
202   asm volatile(
203       "call SyscallAsm\n"
204       // N.B. These are not the calling conventions normally used by the ABI.
205       : "=a"(ret)
206       : "0"(ret), "D"(args)
207       : "cc", "esp", "memory", "ecx", "edx");
208 #elif defined(__x86_64__)
209   intptr_t ret = nr;
210   {
211     register const intptr_t* data __asm__("r12") = args;
212     asm volatile(
213         "lea  -128(%%rsp), %%rsp\n"  // Avoid red zone.
214         "call SyscallAsm\n"
215         "lea  128(%%rsp), %%rsp\n"
216         // N.B. These are not the calling conventions normally used by the ABI.
217         : "=a"(ret)
218         : "0"(ret), "r"(data)
219         : "cc",
220           "rsp",
221           "memory",
222           "rcx",
223           "rdi",
224           "rsi",
225           "rdx",
226           "r8",
227           "r9",
228           "r10",
229           "r11");
230   }
231 #elif defined(__arm__)
232   intptr_t ret;
233   {
234     register intptr_t inout __asm__("r0") = nr;
235     register const intptr_t* data __asm__("r6") = args;
236     asm volatile(
237         "bl SyscallAsm\n"
238         // N.B. These are not the calling conventions normally used by the ABI.
239         : "=r"(inout)
240         : "0"(inout), "r"(data)
241         : "cc",
242           "lr",
243           "memory",
244           "r1",
245           "r2",
246           "r3",
247           "r4",
248           "r5"
249 #if !defined(__thumb__)
250           // In thumb mode, we cannot use "r7" as a general purpose register, as
251           // it is our frame pointer. We have to manually manage and preserve
252           // it.
253           // In ARM mode, we have a dedicated frame pointer register and "r7" is
254           // thus available as a general purpose register. We don't preserve it,
255           // but instead mark it as clobbered.
256           ,
257           "r7"
258 #endif  // !defined(__thumb__)
259         );
260     ret = inout;
261   }
262 #else
263 #error "Unimplemented architecture"
264 #endif
265   return ret;
266 }
267 
268 }  // namespace sandbox
269