1 /*
2 * Copyright (C) 2015 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include "ResourceTable.h"
18 #include "ResourceValues.h"
19 #include "ValueVisitor.h"
20
21 #include "flatten/ChunkWriter.h"
22 #include "flatten/ResourceTypeExtensions.h"
23 #include "flatten/TableFlattener.h"
24 #include "util/BigBuffer.h"
25
26 #include <android-base/macros.h>
27 #include <algorithm>
28 #include <type_traits>
29 #include <numeric>
30
31 using namespace android;
32
33 namespace aapt {
34
35 namespace {
36
37 template <typename T>
cmpIds(const T * a,const T * b)38 static bool cmpIds(const T* a, const T* b) {
39 return a->id.value() < b->id.value();
40 }
41
strcpy16_htod(uint16_t * dst,size_t len,const StringPiece16 & src)42 static void strcpy16_htod(uint16_t* dst, size_t len, const StringPiece16& src) {
43 if (len == 0) {
44 return;
45 }
46
47 size_t i;
48 const char16_t* srcData = src.data();
49 for (i = 0; i < len - 1 && i < src.size(); i++) {
50 dst[i] = util::hostToDevice16((uint16_t) srcData[i]);
51 }
52 dst[i] = 0;
53 }
54
cmpStyleEntries(const Style::Entry & a,const Style::Entry & b)55 static bool cmpStyleEntries(const Style::Entry& a, const Style::Entry& b) {
56 if (a.key.id) {
57 if (b.key.id) {
58 return a.key.id.value() < b.key.id.value();
59 }
60 return true;
61 } else if (!b.key.id) {
62 return a.key.name.value() < b.key.name.value();
63 }
64 return false;
65 }
66
67 struct FlatEntry {
68 ResourceEntry* entry;
69 Value* value;
70
71 // The entry string pool index to the entry's name.
72 uint32_t entryKey;
73 };
74
75 class MapFlattenVisitor : public RawValueVisitor {
76 public:
77 using RawValueVisitor::visit;
78
MapFlattenVisitor(ResTable_entry_ext * outEntry,BigBuffer * buffer)79 MapFlattenVisitor(ResTable_entry_ext* outEntry, BigBuffer* buffer) :
80 mOutEntry(outEntry), mBuffer(buffer) {
81 }
82
visit(Attribute * attr)83 void visit(Attribute* attr) override {
84 {
85 Reference key = Reference(ResTable_map::ATTR_TYPE);
86 BinaryPrimitive val(Res_value::TYPE_INT_DEC, attr->typeMask);
87 flattenEntry(&key, &val);
88 }
89
90 if (attr->minInt != std::numeric_limits<int32_t>::min()) {
91 Reference key = Reference(ResTable_map::ATTR_MIN);
92 BinaryPrimitive val(Res_value::TYPE_INT_DEC, static_cast<uint32_t>(attr->minInt));
93 flattenEntry(&key, &val);
94 }
95
96 if (attr->maxInt != std::numeric_limits<int32_t>::max()) {
97 Reference key = Reference(ResTable_map::ATTR_MAX);
98 BinaryPrimitive val(Res_value::TYPE_INT_DEC, static_cast<uint32_t>(attr->maxInt));
99 flattenEntry(&key, &val);
100 }
101
102 for (Attribute::Symbol& s : attr->symbols) {
103 BinaryPrimitive val(Res_value::TYPE_INT_DEC, s.value);
104 flattenEntry(&s.symbol, &val);
105 }
106 }
107
visit(Style * style)108 void visit(Style* style) override {
109 if (style->parent) {
110 const Reference& parentRef = style->parent.value();
111 assert(parentRef.id && "parent has no ID");
112 mOutEntry->parent.ident = util::hostToDevice32(parentRef.id.value().id);
113 }
114
115 // Sort the style.
116 std::sort(style->entries.begin(), style->entries.end(), cmpStyleEntries);
117
118 for (Style::Entry& entry : style->entries) {
119 flattenEntry(&entry.key, entry.value.get());
120 }
121 }
122
visit(Styleable * styleable)123 void visit(Styleable* styleable) override {
124 for (auto& attrRef : styleable->entries) {
125 BinaryPrimitive val(Res_value{});
126 flattenEntry(&attrRef, &val);
127 }
128
129 }
130
visit(Array * array)131 void visit(Array* array) override {
132 for (auto& item : array->items) {
133 ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>();
134 flattenValue(item.get(), outEntry);
135 outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value));
136 mEntryCount++;
137 }
138 }
139
visit(Plural * plural)140 void visit(Plural* plural) override {
141 const size_t count = plural->values.size();
142 for (size_t i = 0; i < count; i++) {
143 if (!plural->values[i]) {
144 continue;
145 }
146
147 ResourceId q;
148 switch (i) {
149 case Plural::Zero:
150 q.id = android::ResTable_map::ATTR_ZERO;
151 break;
152
153 case Plural::One:
154 q.id = android::ResTable_map::ATTR_ONE;
155 break;
156
157 case Plural::Two:
158 q.id = android::ResTable_map::ATTR_TWO;
159 break;
160
161 case Plural::Few:
162 q.id = android::ResTable_map::ATTR_FEW;
163 break;
164
165 case Plural::Many:
166 q.id = android::ResTable_map::ATTR_MANY;
167 break;
168
169 case Plural::Other:
170 q.id = android::ResTable_map::ATTR_OTHER;
171 break;
172
173 default:
174 assert(false);
175 break;
176 }
177
178 Reference key(q);
179 flattenEntry(&key, plural->values[i].get());
180 }
181 }
182
183 /**
184 * Call this after visiting a Value. This will finish any work that
185 * needs to be done to prepare the entry.
186 */
finish()187 void finish() {
188 mOutEntry->count = util::hostToDevice32(mEntryCount);
189 }
190
191 private:
flattenKey(Reference * key,ResTable_map * outEntry)192 void flattenKey(Reference* key, ResTable_map* outEntry) {
193 assert(key->id && "key has no ID");
194 outEntry->name.ident = util::hostToDevice32(key->id.value().id);
195 }
196
flattenValue(Item * value,ResTable_map * outEntry)197 void flattenValue(Item* value, ResTable_map* outEntry) {
198 bool result = value->flatten(&outEntry->value);
199 assert(result && "flatten failed");
200 }
201
flattenEntry(Reference * key,Item * value)202 void flattenEntry(Reference* key, Item* value) {
203 ResTable_map* outEntry = mBuffer->nextBlock<ResTable_map>();
204 flattenKey(key, outEntry);
205 flattenValue(value, outEntry);
206 outEntry->value.size = util::hostToDevice16(sizeof(outEntry->value));
207 mEntryCount++;
208 }
209
210 ResTable_entry_ext* mOutEntry;
211 BigBuffer* mBuffer;
212 size_t mEntryCount = 0;
213 };
214
215 class PackageFlattener {
216 public:
PackageFlattener(IDiagnostics * diag,ResourceTablePackage * package)217 PackageFlattener(IDiagnostics* diag, ResourceTablePackage* package) :
218 mDiag(diag), mPackage(package) {
219 }
220
flattenPackage(BigBuffer * buffer)221 bool flattenPackage(BigBuffer* buffer) {
222 ChunkWriter pkgWriter(buffer);
223 ResTable_package* pkgHeader = pkgWriter.startChunk<ResTable_package>(
224 RES_TABLE_PACKAGE_TYPE);
225 pkgHeader->id = util::hostToDevice32(mPackage->id.value());
226
227 if (mPackage->name.size() >= arraysize(pkgHeader->name)) {
228 mDiag->error(DiagMessage() <<
229 "package name '" << mPackage->name << "' is too long");
230 return false;
231 }
232
233 // Copy the package name in device endianness.
234 strcpy16_htod(pkgHeader->name, arraysize(pkgHeader->name), mPackage->name);
235
236 // Serialize the types. We do this now so that our type and key strings
237 // are populated. We write those first.
238 BigBuffer typeBuffer(1024);
239 flattenTypes(&typeBuffer);
240
241 pkgHeader->typeStrings = util::hostToDevice32(pkgWriter.size());
242 StringPool::flattenUtf16(pkgWriter.getBuffer(), mTypePool);
243
244 pkgHeader->keyStrings = util::hostToDevice32(pkgWriter.size());
245 StringPool::flattenUtf16(pkgWriter.getBuffer(), mKeyPool);
246
247 // Append the types.
248 buffer->appendBuffer(std::move(typeBuffer));
249
250 pkgWriter.finish();
251 return true;
252 }
253
254 private:
255 IDiagnostics* mDiag;
256 ResourceTablePackage* mPackage;
257 StringPool mTypePool;
258 StringPool mKeyPool;
259
260 template <typename T, bool IsItem>
writeEntry(FlatEntry * entry,BigBuffer * buffer)261 T* writeEntry(FlatEntry* entry, BigBuffer* buffer) {
262 static_assert(std::is_same<ResTable_entry, T>::value ||
263 std::is_same<ResTable_entry_ext, T>::value,
264 "T must be ResTable_entry or ResTable_entry_ext");
265
266 T* result = buffer->nextBlock<T>();
267 ResTable_entry* outEntry = (ResTable_entry*)(result);
268 if (entry->entry->symbolStatus.state == SymbolState::kPublic) {
269 outEntry->flags |= ResTable_entry::FLAG_PUBLIC;
270 }
271
272 if (entry->value->isWeak()) {
273 outEntry->flags |= ResTable_entry::FLAG_WEAK;
274 }
275
276 if (!IsItem) {
277 outEntry->flags |= ResTable_entry::FLAG_COMPLEX;
278 }
279
280 outEntry->flags = util::hostToDevice16(outEntry->flags);
281 outEntry->key.index = util::hostToDevice32(entry->entryKey);
282 outEntry->size = util::hostToDevice16(sizeof(T));
283 return result;
284 }
285
flattenValue(FlatEntry * entry,BigBuffer * buffer)286 bool flattenValue(FlatEntry* entry, BigBuffer* buffer) {
287 if (Item* item = valueCast<Item>(entry->value)) {
288 writeEntry<ResTable_entry, true>(entry, buffer);
289 Res_value* outValue = buffer->nextBlock<Res_value>();
290 bool result = item->flatten(outValue);
291 assert(result && "flatten failed");
292 outValue->size = util::hostToDevice16(sizeof(*outValue));
293 } else {
294 ResTable_entry_ext* outEntry = writeEntry<ResTable_entry_ext, false>(entry, buffer);
295 MapFlattenVisitor visitor(outEntry, buffer);
296 entry->value->accept(&visitor);
297 visitor.finish();
298 }
299 return true;
300 }
301
flattenConfig(const ResourceTableType * type,const ConfigDescription & config,std::vector<FlatEntry> * entries,BigBuffer * buffer)302 bool flattenConfig(const ResourceTableType* type, const ConfigDescription& config,
303 std::vector<FlatEntry>* entries, BigBuffer* buffer) {
304 ChunkWriter typeWriter(buffer);
305 ResTable_type* typeHeader = typeWriter.startChunk<ResTable_type>(RES_TABLE_TYPE_TYPE);
306 typeHeader->id = type->id.value();
307 typeHeader->config = config;
308 typeHeader->config.swapHtoD();
309
310 auto maxAccum = [](uint32_t max, const std::unique_ptr<ResourceEntry>& a) -> uint32_t {
311 return std::max(max, (uint32_t) a->id.value());
312 };
313
314 // Find the largest entry ID. That is how many entries we will have.
315 const uint32_t entryCount =
316 std::accumulate(type->entries.begin(), type->entries.end(), 0, maxAccum) + 1;
317
318 typeHeader->entryCount = util::hostToDevice32(entryCount);
319 uint32_t* indices = typeWriter.nextBlock<uint32_t>(entryCount);
320
321 assert((size_t) entryCount <= std::numeric_limits<uint16_t>::max() + 1);
322 memset(indices, 0xff, entryCount * sizeof(uint32_t));
323
324 typeHeader->entriesStart = util::hostToDevice32(typeWriter.size());
325
326 const size_t entryStart = typeWriter.getBuffer()->size();
327 for (FlatEntry& flatEntry : *entries) {
328 assert(flatEntry.entry->id.value() < entryCount);
329 indices[flatEntry.entry->id.value()] = util::hostToDevice32(
330 typeWriter.getBuffer()->size() - entryStart);
331 if (!flattenValue(&flatEntry, typeWriter.getBuffer())) {
332 mDiag->error(DiagMessage()
333 << "failed to flatten resource '"
334 << ResourceNameRef(mPackage->name, type->type, flatEntry.entry->name)
335 << "' for configuration '" << config << "'");
336 return false;
337 }
338 }
339 typeWriter.finish();
340 return true;
341 }
342
collectAndSortTypes()343 std::vector<ResourceTableType*> collectAndSortTypes() {
344 std::vector<ResourceTableType*> sortedTypes;
345 for (auto& type : mPackage->types) {
346 if (type->type == ResourceType::kStyleable) {
347 // Styleables aren't real Resource Types, they are represented in the R.java
348 // file.
349 continue;
350 }
351
352 assert(type->id && "type must have an ID set");
353
354 sortedTypes.push_back(type.get());
355 }
356 std::sort(sortedTypes.begin(), sortedTypes.end(), cmpIds<ResourceTableType>);
357 return sortedTypes;
358 }
359
collectAndSortEntries(ResourceTableType * type)360 std::vector<ResourceEntry*> collectAndSortEntries(ResourceTableType* type) {
361 // Sort the entries by entry ID.
362 std::vector<ResourceEntry*> sortedEntries;
363 for (auto& entry : type->entries) {
364 assert(entry->id && "entry must have an ID set");
365 sortedEntries.push_back(entry.get());
366 }
367 std::sort(sortedEntries.begin(), sortedEntries.end(), cmpIds<ResourceEntry>);
368 return sortedEntries;
369 }
370
flattenTypeSpec(ResourceTableType * type,std::vector<ResourceEntry * > * sortedEntries,BigBuffer * buffer)371 bool flattenTypeSpec(ResourceTableType* type, std::vector<ResourceEntry*>* sortedEntries,
372 BigBuffer* buffer) {
373 ChunkWriter typeSpecWriter(buffer);
374 ResTable_typeSpec* specHeader = typeSpecWriter.startChunk<ResTable_typeSpec>(
375 RES_TABLE_TYPE_SPEC_TYPE);
376 specHeader->id = type->id.value();
377
378 if (sortedEntries->empty()) {
379 typeSpecWriter.finish();
380 return true;
381 }
382
383 // We can't just take the size of the vector. There may be holes in the entry ID space.
384 // Since the entries are sorted by ID, the last one will be the biggest.
385 const size_t numEntries = sortedEntries->back()->id.value() + 1;
386
387 specHeader->entryCount = util::hostToDevice32(numEntries);
388
389 // Reserve space for the masks of each resource in this type. These
390 // show for which configuration axis the resource changes.
391 uint32_t* configMasks = typeSpecWriter.nextBlock<uint32_t>(numEntries);
392
393 const size_t actualNumEntries = sortedEntries->size();
394 for (size_t entryIndex = 0; entryIndex < actualNumEntries; entryIndex++) {
395 ResourceEntry* entry = sortedEntries->at(entryIndex);
396
397 // Populate the config masks for this entry.
398
399 if (entry->symbolStatus.state == SymbolState::kPublic) {
400 configMasks[entry->id.value()] |=
401 util::hostToDevice32(ResTable_typeSpec::SPEC_PUBLIC);
402 }
403
404 const size_t configCount = entry->values.size();
405 for (size_t i = 0; i < configCount; i++) {
406 const ConfigDescription& config = entry->values[i]->config;
407 for (size_t j = i + 1; j < configCount; j++) {
408 configMasks[entry->id.value()] |= util::hostToDevice32(
409 config.diff(entry->values[j]->config));
410 }
411 }
412 }
413 typeSpecWriter.finish();
414 return true;
415 }
416
flattenTypes(BigBuffer * buffer)417 bool flattenTypes(BigBuffer* buffer) {
418 // Sort the types by their IDs. They will be inserted into the StringPool in this order.
419 std::vector<ResourceTableType*> sortedTypes = collectAndSortTypes();
420
421 size_t expectedTypeId = 1;
422 for (ResourceTableType* type : sortedTypes) {
423 // If there is a gap in the type IDs, fill in the StringPool
424 // with empty values until we reach the ID we expect.
425 while (type->id.value() > expectedTypeId) {
426 std::u16string typeName(u"?");
427 typeName += expectedTypeId;
428 mTypePool.makeRef(typeName);
429 expectedTypeId++;
430 }
431 expectedTypeId++;
432 mTypePool.makeRef(toString(type->type));
433
434 std::vector<ResourceEntry*> sortedEntries = collectAndSortEntries(type);
435
436 if (!flattenTypeSpec(type, &sortedEntries, buffer)) {
437 return false;
438 }
439
440 // The binary resource table lists resource entries for each configuration.
441 // We store them inverted, where a resource entry lists the values for each
442 // configuration available. Here we reverse this to match the binary table.
443 std::map<ConfigDescription, std::vector<FlatEntry>> configToEntryListMap;
444 for (ResourceEntry* entry : sortedEntries) {
445 const uint32_t keyIndex = (uint32_t) mKeyPool.makeRef(entry->name).getIndex();
446
447 // Group values by configuration.
448 for (auto& configValue : entry->values) {
449 configToEntryListMap[configValue->config].push_back(FlatEntry{
450 entry, configValue->value.get(), keyIndex });
451 }
452 }
453
454 // Flatten a configuration value.
455 for (auto& entry : configToEntryListMap) {
456 if (!flattenConfig(type, entry.first, &entry.second, buffer)) {
457 return false;
458 }
459 }
460 }
461 return true;
462 }
463 };
464
465 } // namespace
466
consume(IAaptContext * context,ResourceTable * table)467 bool TableFlattener::consume(IAaptContext* context, ResourceTable* table) {
468 // We must do this before writing the resources, since the string pool IDs may change.
469 table->stringPool.sort([](const StringPool::Entry& a, const StringPool::Entry& b) -> bool {
470 int diff = a.context.priority - b.context.priority;
471 if (diff < 0) return true;
472 if (diff > 0) return false;
473 diff = a.context.config.compare(b.context.config);
474 if (diff < 0) return true;
475 if (diff > 0) return false;
476 return a.value < b.value;
477 });
478 table->stringPool.prune();
479
480 // Write the ResTable header.
481 ChunkWriter tableWriter(mBuffer);
482 ResTable_header* tableHeader = tableWriter.startChunk<ResTable_header>(RES_TABLE_TYPE);
483 tableHeader->packageCount = util::hostToDevice32(table->packages.size());
484
485 // Flatten the values string pool.
486 StringPool::flattenUtf8(tableWriter.getBuffer(), table->stringPool);
487
488 BigBuffer packageBuffer(1024);
489
490 // Flatten each package.
491 for (auto& package : table->packages) {
492 PackageFlattener flattener(context->getDiagnostics(), package.get());
493 if (!flattener.flattenPackage(&packageBuffer)) {
494 return false;
495 }
496 }
497
498 // Finally merge all the packages into the main buffer.
499 tableWriter.getBuffer()->appendBuffer(std::move(packageBuffer));
500 tableWriter.finish();
501 return true;
502 }
503
504 } // namespace aapt
505