1 // Copyright 2017 The Chromium Authors. All rights reserved.
2 // Use of this source code is governed by a BSD-style license that can be
3 // found in the LICENSE file.
4
5 #include "components/zucchini/address_translator.h"
6
7 #include <algorithm>
8 #include <utility>
9
10 #include "base/containers/cxx20_erase.h"
11
12 namespace zucchini {
13
14 /******** AddressTranslator::OffsetToRvaCache ********/
15
OffsetToRvaCache(const AddressTranslator & translator)16 AddressTranslator::OffsetToRvaCache::OffsetToRvaCache(
17 const AddressTranslator& translator)
18 : translator_(translator) {}
19
Convert(offset_t offset) const20 rva_t AddressTranslator::OffsetToRvaCache::Convert(offset_t offset) const {
21 if (offset >= translator_.fake_offset_begin_) {
22 // Rely on |translator_| to handle this special case.
23 return translator_.OffsetToRva(offset);
24 }
25 if (cached_unit_ && cached_unit_->CoversOffset(offset))
26 return cached_unit_->OffsetToRvaUnsafe(offset);
27 const AddressTranslator::Unit* unit = translator_.OffsetToUnit(offset);
28 if (!unit)
29 return kInvalidRva;
30 cached_unit_ = unit;
31 return unit->OffsetToRvaUnsafe(offset);
32 }
33
34 /******** AddressTranslator::RvaToOffsetCache ********/
35
RvaToOffsetCache(const AddressTranslator & translator)36 AddressTranslator::RvaToOffsetCache::RvaToOffsetCache(
37 const AddressTranslator& translator)
38 : translator_(translator) {}
39
IsValid(rva_t rva) const40 bool AddressTranslator::RvaToOffsetCache::IsValid(rva_t rva) const {
41 if (rva == kInvalidRva)
42 return false;
43 if (!cached_unit_ || !cached_unit_->CoversRva(rva)) {
44 const AddressTranslator::Unit* unit = translator_.RvaToUnit(rva);
45 if (!unit)
46 return false;
47 cached_unit_ = unit;
48 }
49 return true;
50 }
51
Convert(rva_t rva) const52 offset_t AddressTranslator::RvaToOffsetCache::Convert(rva_t rva) const {
53 if (!cached_unit_ || !cached_unit_->CoversRva(rva)) {
54 const AddressTranslator::Unit* unit = translator_.RvaToUnit(rva);
55 if (!unit)
56 return kInvalidOffset;
57 cached_unit_ = unit;
58 }
59 return cached_unit_->RvaToOffsetUnsafe(rva, translator_.fake_offset_begin_);
60 }
61
62 /******** AddressTranslator ********/
63
64 AddressTranslator::AddressTranslator() = default;
65
66 AddressTranslator::AddressTranslator(AddressTranslator&&) = default;
67
68 AddressTranslator::~AddressTranslator() = default;
69
Initialize(std::vector<Unit> && units)70 AddressTranslator::Status AddressTranslator::Initialize(
71 std::vector<Unit>&& units) {
72 for (Unit& unit : units) {
73 // Check for overflows and fail if found.
74 if (!RangeIsBounded<offset_t>(unit.offset_begin, unit.offset_size,
75 kOffsetBound) ||
76 !RangeIsBounded<rva_t>(unit.rva_begin, unit.rva_size, kRvaBound)) {
77 return kErrorOverflow;
78 }
79 // If |rva_size < offset_size|: Just shrink |offset_size| to accommodate.
80 unit.offset_size = std::min(unit.offset_size, unit.rva_size);
81 // Now |rva_size >= offset_size|. Note that |rva_size > offset_size| is
82 // allowed; these lead to dangling RVA.
83 }
84
85 // Remove all empty units.
86 base::EraseIf(units, [](const Unit& unit) { return unit.IsEmpty(); });
87
88 // Sort |units| by RVA, then uniquefy.
89 std::sort(units.begin(), units.end(), [](const Unit& a, const Unit& b) {
90 return std::tie(a.rva_begin, a.rva_size) <
91 std::tie(b.rva_begin, b.rva_size);
92 });
93 units.erase(std::unique(units.begin(), units.end()), units.end());
94
95 // Scan for RVA range overlaps, validate, and merge wherever possible.
96 if (units.size() > 1) {
97 // Traverse with two iterators: |slow| stays behind and modifies Units that
98 // absorb all overlapping (or tangent if suitable) Units; |fast| explores
99 // new Units as candidates for consistency checks and potential merge into
100 // |slow|.
101 auto slow = units.begin();
102
103 // All |it| with |slow| < |it| < |fast| contain garbage.
104 for (auto fast = slow + 1; fast != units.end(); ++fast) {
105 // Comment notation: S = slow offset, F = fast offset, O = overlap offset,
106 // s = slow RVA, f = fast RVA, o = overlap RVA.
107 DCHECK_GE(fast->rva_begin, slow->rva_begin);
108 if (slow->rva_end() < fast->rva_begin) {
109 // ..ssssss..ffffff..: Disjoint: Can advance |slow|.
110 *(++slow) = *fast;
111 continue;
112 }
113
114 // ..ssssffff..: Tangent: Merge is optional.
115 // ..sssooofff.. / ..sssooosss..: Overlap: Merge is required.
116 bool merge_is_optional = slow->rva_end() == fast->rva_begin;
117
118 // Check whether |fast| and |slow| have identical RVA -> offset shift.
119 // If not, then merge cannot be resolved. Examples:
120 // ..ssssffff.. -> ..SSSSFFFF..: Good, can merge.
121 // ..ssssffff.. -> ..SSSS..FFFF..: Non-fatal: don't merge.
122 // ..ssssffff.. -> ..FFFF..SSSS..: Non-fatal: don't merge.
123 // ..ssssffff.. -> ..SSOOFF..: Fatal: Ignore for now (handled later).
124 // ..sssooofff.. -> ..SSSOOOFFF..: Good, can merge.
125 // ..sssooofff.. -> ..SSSSSOFFFFF..: Fatal.
126 // ..sssooofff.. -> ..FFOOOOSS..: Fatal.
127 // ..sssooofff.. -> ..SSSOOOF..: Good, notice |fast| has dangling RVAs.
128 // ..oooooo.. -> ..OOOOOO..: Good, can merge.
129 if (fast->offset_begin < slow->offset_begin ||
130 fast->offset_begin - slow->offset_begin !=
131 fast->rva_begin - slow->rva_begin) {
132 if (merge_is_optional) {
133 *(++slow) = *fast;
134 continue;
135 }
136 return kErrorBadOverlap;
137 }
138
139 // Check whether dangling RVAs (if they exist) are consistent. Examples:
140 // ..sssooofff.. -> ..SSSOOOF..: Good, can merge.
141 // ..sssooosss.. -> ..SSSOOOS..: Good, can merge.
142 // ..sssooofff.. -> ..SSSOO..: Good, can merge.
143 // ..sssooofff.. -> ..SSSOFFF..: Fatal.
144 // ..sssooosss.. -> ..SSSOOFFFF..: Fatal.
145 // ..oooooo.. -> ..OOO..: Good, can merge.
146 // Idea of check: Suppose |fast| has dangling RVA, then
147 // |[fast->rva_start, fast->rva_start + fast->offset_start)| ->
148 // |[fast->offset_start, **fast->offset_end()**)|, with remaining RVA
149 // mapping to fake offsets. This means |fast->offset_end()| must be >=
150 // |slow->offset_end()|, and failure to do so resluts in error. The
151 // argument for |slow| havng dangling RVA is symmetric.
152 if ((fast->HasDanglingRva() && fast->offset_end() < slow->offset_end()) ||
153 (slow->HasDanglingRva() && slow->offset_end() < fast->offset_end())) {
154 if (merge_is_optional) {
155 *(++slow) = *fast;
156 continue;
157 }
158 return kErrorBadOverlapDanglingRva;
159 }
160
161 // Merge |fast| into |slow|.
162 slow->rva_size =
163 std::max(slow->rva_size, fast->rva_end() - slow->rva_begin);
164 slow->offset_size =
165 std::max(slow->offset_size, fast->offset_end() - slow->offset_begin);
166 }
167 ++slow;
168 units.erase(slow, units.end());
169 }
170
171 // After resolving RVA overlaps, any offset overlap would imply error.
172 std::sort(units.begin(), units.end(), [](const Unit& a, const Unit& b) {
173 return a.offset_begin < b.offset_begin;
174 });
175
176 if (units.size() > 1) {
177 auto previous = units.begin();
178 for (auto current = previous + 1; current != units.end(); ++current) {
179 if (previous->offset_end() > current->offset_begin)
180 return kErrorBadOverlap;
181 previous = current;
182 }
183 }
184
185 // For to fake offset heuristics: Compute exclusive upper bounds for offsets
186 // and RVAs.
187 offset_t offset_bound = 0;
188 rva_t rva_bound = 0;
189 for (const Unit& unit : units) {
190 offset_bound = std::max(offset_bound, unit.offset_end());
191 rva_bound = std::max(rva_bound, unit.rva_end());
192 }
193
194 // Compute pessimistic range and see if it still fits within space of valid
195 // offsets. This limits image size to one half of |kOffsetBound|, and is a
196 // main drawback for the current heuristic to convert dangling RVA to fake
197 // offsets.
198 if (!RangeIsBounded(offset_bound, rva_bound, kOffsetBound))
199 return kErrorFakeOffsetBeginTooLarge;
200
201 // Success. Store results. |units| is currently sorted by offset, so assign.
202 units_sorted_by_offset_.assign(units.begin(), units.end());
203
204 // Sort |units| by RVA, and just store it directly
205 std::sort(units.begin(), units.end(), [](const Unit& a, const Unit& b) {
206 return a.rva_begin < b.rva_begin;
207 });
208 units_sorted_by_rva_ = std::move(units);
209
210 fake_offset_begin_ = offset_bound;
211 return kSuccess;
212 }
213
OffsetToRva(offset_t offset) const214 rva_t AddressTranslator::OffsetToRva(offset_t offset) const {
215 if (offset >= fake_offset_begin_) {
216 // Handle dangling RVA: First shift it to regular RVA space.
217 rva_t rva = offset - fake_offset_begin_;
218 // If result is indeed a dangling RVA, return it; else return |kInvalidRva|.
219 const Unit* unit = RvaToUnit(rva);
220 return (unit && unit->HasDanglingRva() && unit->CoversDanglingRva(rva))
221 ? rva
222 : kInvalidRva;
223 }
224 const Unit* unit = OffsetToUnit(offset);
225 return unit ? unit->OffsetToRvaUnsafe(offset) : kInvalidRva;
226 }
227
RvaToOffset(rva_t rva) const228 offset_t AddressTranslator::RvaToOffset(rva_t rva) const {
229 const Unit* unit = RvaToUnit(rva);
230 // This also handles dangling RVA.
231 return unit ? unit->RvaToOffsetUnsafe(rva, fake_offset_begin_)
232 : kInvalidOffset;
233 }
234
OffsetToUnit(offset_t offset) const235 const AddressTranslator::Unit* AddressTranslator::OffsetToUnit(
236 offset_t offset) const {
237 // Finds first Unit with |offset_begin| > |offset|, rewind by 1 to find the
238 // last Unit with |offset_begin| >= |offset| (if it exists).
239 auto it = std::upper_bound(
240 units_sorted_by_offset_.begin(), units_sorted_by_offset_.end(), offset,
241 [](offset_t a, const Unit& b) { return a < b.offset_begin; });
242 if (it == units_sorted_by_offset_.begin())
243 return nullptr;
244 --it;
245 return it->CoversOffset(offset) ? &(*it) : nullptr;
246 }
247
RvaToUnit(rva_t rva) const248 const AddressTranslator::Unit* AddressTranslator::RvaToUnit(rva_t rva) const {
249 auto it = std::upper_bound(
250 units_sorted_by_rva_.begin(), units_sorted_by_rva_.end(), rva,
251 [](rva_t a, const Unit& b) { return a < b.rva_begin; });
252 if (it == units_sorted_by_rva_.begin())
253 return nullptr;
254 --it;
255 return it->CoversRva(rva) ? &(*it) : nullptr;
256 }
257
258 } // namespace zucchini
259