• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2017 The Abseil Authors.
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 //      https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14 
15 // Allow dynamic symbol lookup in the kernel VDSO page.
16 //
17 // VDSOSupport -- a class representing kernel VDSO (if present).
18 
19 #include "absl/debugging/internal/vdso_support.h"
20 
21 #ifdef ABSL_HAVE_VDSO_SUPPORT     // defined in vdso_support.h
22 
23 #include <errno.h>
24 #include <fcntl.h>
25 #include <sys/syscall.h>
26 #include <unistd.h>
27 
28 #if __GLIBC_PREREQ(2, 16)  // GLIBC-2.16 implements getauxval.
29 #include <sys/auxv.h>
30 #endif
31 
32 #include "absl/base/dynamic_annotations.h"
33 #include "absl/base/internal/raw_logging.h"
34 #include "absl/base/port.h"
35 
36 #ifndef AT_SYSINFO_EHDR
37 #define AT_SYSINFO_EHDR 33  // for crosstoolv10
38 #endif
39 
40 namespace absl {
41 ABSL_NAMESPACE_BEGIN
42 namespace debugging_internal {
43 
44 ABSL_CONST_INIT
45 std::atomic<const void *> VDSOSupport::vdso_base_(
46     debugging_internal::ElfMemImage::kInvalidBase);
47 
48 std::atomic<VDSOSupport::GetCpuFn> VDSOSupport::getcpu_fn_(&InitAndGetCPU);
VDSOSupport()49 VDSOSupport::VDSOSupport()
50     // If vdso_base_ is still set to kInvalidBase, we got here
51     // before VDSOSupport::Init has been called. Call it now.
52     : image_(vdso_base_.load(std::memory_order_relaxed) ==
53                      debugging_internal::ElfMemImage::kInvalidBase
54                  ? Init()
55                  : vdso_base_.load(std::memory_order_relaxed)) {}
56 
57 // NOTE: we can't use GoogleOnceInit() below, because we can be
58 // called by tcmalloc, and none of the *once* stuff may be functional yet.
59 //
60 // In addition, we hope that the VDSOSupportHelper constructor
61 // causes this code to run before there are any threads, and before
62 // InitGoogle() has executed any chroot or setuid calls.
63 //
64 // Finally, even if there is a race here, it is harmless, because
65 // the operation should be idempotent.
Init()66 const void *VDSOSupport::Init() {
67   const auto kInvalidBase = debugging_internal::ElfMemImage::kInvalidBase;
68 #if __GLIBC_PREREQ(2, 16)
69   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
70     errno = 0;
71     const void *const sysinfo_ehdr =
72         reinterpret_cast<const void *>(getauxval(AT_SYSINFO_EHDR));
73     if (errno == 0) {
74       vdso_base_.store(sysinfo_ehdr, std::memory_order_relaxed);
75     }
76   }
77 #endif  // __GLIBC_PREREQ(2, 16)
78   if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
79     // Valgrind zaps AT_SYSINFO_EHDR and friends from the auxv[]
80     // on stack, and so glibc works as if VDSO was not present.
81     // But going directly to kernel via /proc/self/auxv below bypasses
82     // Valgrind zapping. So we check for Valgrind separately.
83     if (RunningOnValgrind()) {
84       vdso_base_.store(nullptr, std::memory_order_relaxed);
85       getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
86       return nullptr;
87     }
88     int fd = open("/proc/self/auxv", O_RDONLY);
89     if (fd == -1) {
90       // Kernel too old to have a VDSO.
91       vdso_base_.store(nullptr, std::memory_order_relaxed);
92       getcpu_fn_.store(&GetCPUViaSyscall, std::memory_order_relaxed);
93       return nullptr;
94     }
95     ElfW(auxv_t) aux;
96     while (read(fd, &aux, sizeof(aux)) == sizeof(aux)) {
97       if (aux.a_type == AT_SYSINFO_EHDR) {
98         vdso_base_.store(reinterpret_cast<void *>(aux.a_un.a_val),
99                          std::memory_order_relaxed);
100         break;
101       }
102     }
103     close(fd);
104     if (vdso_base_.load(std::memory_order_relaxed) == kInvalidBase) {
105       // Didn't find AT_SYSINFO_EHDR in auxv[].
106       vdso_base_.store(nullptr, std::memory_order_relaxed);
107     }
108   }
109   GetCpuFn fn = &GetCPUViaSyscall;  // default if VDSO not present.
110   if (vdso_base_.load(std::memory_order_relaxed)) {
111     VDSOSupport vdso;
112     SymbolInfo info;
113     if (vdso.LookupSymbol("__vdso_getcpu", "LINUX_2.6", STT_FUNC, &info)) {
114       fn = reinterpret_cast<GetCpuFn>(const_cast<void *>(info.address));
115     }
116   }
117   // Subtle: this code runs outside of any locks; prevent compiler
118   // from assigning to getcpu_fn_ more than once.
119   getcpu_fn_.store(fn, std::memory_order_relaxed);
120   return vdso_base_.load(std::memory_order_relaxed);
121 }
122 
SetBase(const void * base)123 const void *VDSOSupport::SetBase(const void *base) {
124   ABSL_RAW_CHECK(base != debugging_internal::ElfMemImage::kInvalidBase,
125                  "internal error");
126   const void *old_base = vdso_base_.load(std::memory_order_relaxed);
127   vdso_base_.store(base, std::memory_order_relaxed);
128   image_.Init(base);
129   // Also reset getcpu_fn_, so GetCPU could be tested with simulated VDSO.
130   getcpu_fn_.store(&InitAndGetCPU, std::memory_order_relaxed);
131   return old_base;
132 }
133 
LookupSymbol(const char * name,const char * version,int type,SymbolInfo * info) const134 bool VDSOSupport::LookupSymbol(const char *name,
135                                const char *version,
136                                int type,
137                                SymbolInfo *info) const {
138   return image_.LookupSymbol(name, version, type, info);
139 }
140 
LookupSymbolByAddress(const void * address,SymbolInfo * info_out) const141 bool VDSOSupport::LookupSymbolByAddress(const void *address,
142                                         SymbolInfo *info_out) const {
143   return image_.LookupSymbolByAddress(address, info_out);
144 }
145 
146 // NOLINT on 'long' because this routine mimics kernel api.
GetCPUViaSyscall(unsigned * cpu,void *,void *)147 long VDSOSupport::GetCPUViaSyscall(unsigned *cpu,  // NOLINT(runtime/int)
148                                    void *, void *) {
149 #ifdef SYS_getcpu
150   return syscall(SYS_getcpu, cpu, nullptr, nullptr);
151 #else
152   // x86_64 never implemented sys_getcpu(), except as a VDSO call.
153   static_cast<void>(cpu);  // Avoid an unused argument compiler warning.
154   errno = ENOSYS;
155   return -1;
156 #endif
157 }
158 
159 // Use fast __vdso_getcpu if available.
InitAndGetCPU(unsigned * cpu,void * x,void * y)160 long VDSOSupport::InitAndGetCPU(unsigned *cpu,  // NOLINT(runtime/int)
161                                 void *x, void *y) {
162   Init();
163   GetCpuFn fn = getcpu_fn_.load(std::memory_order_relaxed);
164   ABSL_RAW_CHECK(fn != &InitAndGetCPU, "Init() did not set getcpu_fn_");
165   return (*fn)(cpu, x, y);
166 }
167 
168 // This function must be very fast, and may be called from very
169 // low level (e.g. tcmalloc). Hence I avoid things like
170 // GoogleOnceInit() and ::operator new.
171 ABSL_ATTRIBUTE_NO_SANITIZE_MEMORY
GetCPU()172 int GetCPU() {
173   unsigned cpu;
174   int ret_code = (*VDSOSupport::getcpu_fn_)(&cpu, nullptr, nullptr);
175   return ret_code == 0 ? cpu : ret_code;
176 }
177 
178 // We need to make sure VDSOSupport::Init() is called before
179 // InitGoogle() does any setuid or chroot calls.  If VDSOSupport
180 // is used in any global constructor, this will happen, since
181 // VDSOSupport's constructor calls Init.  But if not, we need to
182 // ensure it here, with a global constructor of our own.  This
183 // is an allowed exception to the normal rule against non-trivial
184 // global constructors.
185 static class VDSOInitHelper {
186  public:
VDSOInitHelper()187   VDSOInitHelper() { VDSOSupport::Init(); }
188 } vdso_init_helper;
189 
190 }  // namespace debugging_internal
191 ABSL_NAMESPACE_END
192 }  // namespace absl
193 
194 #endif  // ABSL_HAVE_VDSO_SUPPORT
195