1 /*
2 * Copyright (C) 2017 The Android Open Source Project
3 * All rights reserved.
4 *
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
7 * are met:
8 * * Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * * Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in
12 * the documentation and/or other materials provided with the
13 * distribution.
14 *
15 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18 * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19 * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21 * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22 * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23 * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24 * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25 * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26 * SUCH DAMAGE.
27 */
28
29 #include <gtest/gtest.h>
30
31 #include <dlfcn.h>
32 #include <link.h>
33 #if __has_include(<sys/auxv.h>)
34 #include <sys/auxv.h>
35 #endif
36
37 #include <string>
38 #include <unordered_map>
39
TEST(link,dl_iterate_phdr_early_exit)40 TEST(link, dl_iterate_phdr_early_exit) {
41 static size_t call_count = 0;
42 ASSERT_EQ(123, dl_iterate_phdr([](dl_phdr_info*, size_t, void*) { ++call_count; return 123; },
43 nullptr));
44 ASSERT_EQ(1u, call_count);
45 }
46
TEST(link,dl_iterate_phdr)47 TEST(link, dl_iterate_phdr) {
48 struct Functor {
49 static int Callback(dl_phdr_info* i, size_t s, void* data) {
50 static_cast<Functor*>(data)->DoChecks(i, s);
51 return 0;
52 }
53 void DoChecks(dl_phdr_info* info, size_t s) {
54 ASSERT_EQ(sizeof(dl_phdr_info), s);
55
56 ASSERT_TRUE(info->dlpi_name != nullptr);
57
58 // An ELF file must have at least a PT_LOAD program header.
59 ASSERT_NE(nullptr, info->dlpi_phdr);
60 ASSERT_NE(0, info->dlpi_phnum);
61
62 // Find the first PT_LOAD program header so we can find the ELF header.
63 bool found_load = false;
64 for (ElfW(Half) i = 0; i < info->dlpi_phnum; ++i) {
65 const ElfW(Phdr)* phdr = reinterpret_cast<const ElfW(Phdr)*>(&info->dlpi_phdr[i]);
66 if (phdr->p_type == PT_LOAD) {
67 const ElfW(Ehdr)* ehdr = reinterpret_cast<const ElfW(Ehdr)*>(info->dlpi_addr +
68 phdr->p_vaddr);
69 // Does it look like an ELF file?
70 ASSERT_EQ(0, memcmp(ehdr, ELFMAG, SELFMAG));
71 // Does the e_phnum match what dl_iterate_phdr told us?
72 ASSERT_EQ(info->dlpi_phnum, ehdr->e_phnum);
73 found_load = true;
74 break;
75 }
76 }
77 ASSERT_EQ(true, found_load);
78 }
79 size_t count;
80 } f = {};
81 ASSERT_EQ(0, dl_iterate_phdr(Functor::Callback, &f));
82 }
83
84 // Verify that the module load/unload counters from dl_iterate_phdr are incremented.
TEST(link,dl_iterate_phdr_counters)85 TEST(link, dl_iterate_phdr_counters) {
86 struct Counters {
87 bool inited = false;
88 uint64_t adds = 0;
89 uint64_t subs = 0;
90 };
91
92 auto get_adds_subs = []() {
93 auto callback = [](dl_phdr_info* info, size_t size, void* data) {
94 Counters& counters = *static_cast<Counters*>(data);
95 EXPECT_GE(size, sizeof(dl_phdr_info));
96 if (!counters.inited) {
97 counters.inited = true;
98 counters.adds = info->dlpi_adds;
99 counters.subs = info->dlpi_subs;
100 } else {
101 // The counters have the same value for each module.
102 EXPECT_EQ(counters.adds, info->dlpi_adds);
103 EXPECT_EQ(counters.subs, info->dlpi_subs);
104 }
105 return 0;
106 };
107
108 Counters counters {};
109 EXPECT_EQ(0, dl_iterate_phdr(callback, &counters));
110 EXPECT_TRUE(counters.inited);
111 return counters;
112 };
113
114 // dlopen increments the 'adds' counter.
115 const auto before_dlopen = get_adds_subs();
116 void* const handle = dlopen("libtest_empty.so", RTLD_NOW);
117 ASSERT_NE(nullptr, handle);
118 const auto after_dlopen = get_adds_subs();
119 ASSERT_LT(before_dlopen.adds, after_dlopen.adds);
120 ASSERT_EQ(before_dlopen.subs, after_dlopen.subs);
121
122 // dlclose increments the 'subs' counter.
123 const auto before_dlclose = after_dlopen;
124 dlclose(handle);
125 const auto after_dlclose = get_adds_subs();
126 ASSERT_EQ(before_dlclose.adds, after_dlclose.adds);
127 ASSERT_LT(before_dlclose.subs, after_dlclose.subs);
128 }
129
130 struct ProgHdr {
131 const ElfW(Phdr)* table;
132 size_t size;
133 };
134
135 __attribute__((__unused__))
find_exe_load_bias(const ProgHdr & phdr)136 static ElfW(Addr) find_exe_load_bias(const ProgHdr& phdr) {
137 for (size_t i = 0; i < phdr.size; ++i) {
138 if (phdr.table[i].p_type == PT_PHDR) {
139 return reinterpret_cast<ElfW(Addr)>(phdr.table) - phdr.table[i].p_vaddr;
140 }
141 }
142 return 0;
143 }
144
145 __attribute__((__unused__))
ElfW(Dyn)146 static ElfW(Dyn)* find_dynamic(const ProgHdr& phdr, ElfW(Addr) load_bias) {
147 for (size_t i = 0; i < phdr.size; ++i) {
148 if (phdr.table[i].p_type == PT_DYNAMIC) {
149 return reinterpret_cast<ElfW(Dyn)*>(phdr.table[i].p_vaddr + load_bias);
150 }
151 }
152 return nullptr;
153 }
154
155 __attribute__((__unused__))
find_exe_r_debug(ElfW (Dyn)* dynamic)156 static r_debug* find_exe_r_debug(ElfW(Dyn)* dynamic) {
157 for (ElfW(Dyn)* d = dynamic; d->d_tag != DT_NULL; ++d) {
158 if (d->d_tag == DT_DEBUG) {
159 return reinterpret_cast<r_debug*>(d->d_un.d_val);
160 }
161 }
162 return nullptr;
163 }
164
165 // Walk the DT_DEBUG/_r_debug global module list and compare it with the same
166 // information from dl_iterate_phdr. Verify that the executable appears first
167 // in _r_debug.
TEST(link,r_debug)168 TEST(link, r_debug) {
169 #if __has_include(<sys/auxv.h>)
170 // Find the executable's PT_DYNAMIC segment and DT_DEBUG value. The linker
171 // will write the address of its _r_debug global into the .dynamic section.
172 ProgHdr exe_phdr = {
173 .table = reinterpret_cast<ElfW(Phdr)*>(getauxval(AT_PHDR)),
174 .size = getauxval(AT_PHNUM)
175 };
176 ASSERT_NE(nullptr, exe_phdr.table);
177 ElfW(Addr) exe_load_bias = find_exe_load_bias(exe_phdr);
178 ASSERT_NE(0u, exe_load_bias);
179 ElfW(Dyn)* exe_dynamic = find_dynamic(exe_phdr, exe_load_bias);
180 ASSERT_NE(nullptr, exe_dynamic);
181 r_debug* dbg = find_exe_r_debug(exe_dynamic);
182 ASSERT_NE(nullptr, dbg);
183
184 // Use dl_iterate_phdr to build a table mapping from load bias values to
185 // solib names and PT_DYNAMIC segments.
186 struct DlIterateInfo {
187 std::string name;
188 ElfW(Dyn)* dynamic;
189 };
190 struct Functor {
191 std::unordered_map<ElfW(Addr), DlIterateInfo> dl_iter_mods;
192 static int Callback(dl_phdr_info* i, size_t s, void* data) {
193 static_cast<Functor*>(data)->AddModule(i, s);
194 return 0;
195 }
196 void AddModule(dl_phdr_info* info, size_t s) {
197 ASSERT_EQ(sizeof(dl_phdr_info), s);
198 ASSERT_TRUE(dl_iter_mods.find(info->dlpi_addr) == dl_iter_mods.end());
199 ASSERT_TRUE(info->dlpi_name != nullptr);
200 dl_iter_mods[info->dlpi_addr] = {
201 .name = info->dlpi_name,
202 .dynamic = find_dynamic({ info->dlpi_phdr, info->dlpi_phnum }, info->dlpi_addr)
203 };
204 }
205 } f = {};
206 ASSERT_EQ(0, dl_iterate_phdr(Functor::Callback, &f));
207
208 size_t map_size = 0;
209
210 for (link_map* map = dbg->r_map; map != nullptr; map = map->l_next) {
211 ASSERT_NE(0u, map->l_addr);
212 ASSERT_NE(nullptr, map->l_ld);
213 ASSERT_NE(nullptr, map->l_name);
214
215 auto it = f.dl_iter_mods.find(map->l_addr);
216 ASSERT_TRUE(it != f.dl_iter_mods.end());
217 const DlIterateInfo& info = it->second;
218 ASSERT_EQ(info.name, map->l_name);
219 ASSERT_EQ(info.dynamic, map->l_ld);
220
221 ++map_size;
222 }
223
224 // _r_debug and dl_iterate_phdr should report the same set of modules. We
225 // verified above that every _r_debug module was reported by dl_iterate_phdr,
226 // so checking the sizes verifies the converse.
227 ASSERT_EQ(f.dl_iter_mods.size(), map_size);
228
229 // Make sure the first entry is the executable. gdbserver assumes this and
230 // removes the first entry from its list of shared objects that it sends back
231 // to gdb.
232 ASSERT_EQ(exe_load_bias, dbg->r_map->l_addr);
233 ASSERT_EQ(exe_dynamic, dbg->r_map->l_ld);
234 #endif
235 }
236
237 #if __arm__
read_exidx_func(uintptr_t * entry)238 static uintptr_t read_exidx_func(uintptr_t* entry) {
239 int32_t offset = *entry;
240 // Sign-extend from int31 to int32.
241 if ((offset & 0x40000000) != 0) {
242 offset += -0x7fffffff - 1;
243 }
244 return reinterpret_cast<uintptr_t>(entry) + offset;
245 }
another_function_in_same_ELF_file()246 __attribute__((__unused__)) static void another_function_in_same_ELF_file() {}
247 #endif
248
TEST(link,dl_unwind_find_exidx)249 TEST(link, dl_unwind_find_exidx) {
250 #if __arm__
251 int count = 0;
252 struct eit_entry_t {
253 uintptr_t one;
254 uintptr_t two;
255 };
256 eit_entry_t* entries = reinterpret_cast<eit_entry_t*>(dl_unwind_find_exidx(
257 reinterpret_cast<_Unwind_Ptr>(read_exidx_func), &count));
258 ASSERT_TRUE(entries != nullptr);
259 ASSERT_GT(count, 0);
260
261 // Validity checks.
262 uintptr_t func = reinterpret_cast<uintptr_t>(read_exidx_func);
263 bool found = false;
264 for (int i = 0; i < count; ++i) {
265 // Entries must have bit 31 clear.
266 ASSERT_TRUE((entries[i].one & (1<<31)) == 0);
267
268 uintptr_t exidx_func = read_exidx_func(&entries[i].one);
269
270 // If our function is compiled for thumb, exception table contains our address - 1.
271 if (func == exidx_func || func == exidx_func + 1) found = true;
272
273 // Entries must be sorted. Some addresses may appear twice if function
274 // is compiled for arm.
275 if (i > 0) {
276 EXPECT_GE(exidx_func, read_exidx_func(&entries[i - 1].one)) << i;
277 }
278 }
279 ASSERT_TRUE(found);
280 #else
281 GTEST_SKIP() << "dl_unwind_find_exidx is an ARM-only API";
282 #endif
283 }
284