• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include "code_info_table_deduper.h"
18 
19 #include "stack_map.h"
20 
21 namespace art {
22 namespace linker {
23 
ReserveDedupeBuffer(size_t num_code_infos)24 void CodeInfoTableDeduper::ReserveDedupeBuffer(size_t num_code_infos) {
25   DCHECK(dedupe_set_.empty());
26   const size_t max_size = num_code_infos * CodeInfo::kNumBitTables;
27   // Reserve space for 1/2 of the maximum dedupe set size to avoid rehashing.
28   // Usually only 30%-40% of bit tables are unique.
29   dedupe_set_.reserve(max_size / 2u);
30 }
31 
Dedupe(const uint8_t * code_info_data)32 size_t CodeInfoTableDeduper::Dedupe(const uint8_t* code_info_data) {
33   static constexpr size_t kNumHeaders = CodeInfo::kNumHeaders;
34   static constexpr size_t kNumBitTables = CodeInfo::kNumBitTables;
35 
36   // The back-reference offset takes space so dedupe is not worth it for tiny tables.
37   constexpr size_t kMinDedupSize = 33;  // Assume 32-bit offset on average.
38 
39   size_t start_bit_offset = writer_.NumberOfWrittenBits();
40   DCHECK_ALIGNED(start_bit_offset, kBitsPerByte);
41 
42   // Reserve enough space in the `dedupe_set_` to avoid reashing later in this
43   // function and allow using direct pointers to the `HashSet<>` entries.
44   size_t elements_until_expand = dedupe_set_.ElementsUntilExpand();
45   if (UNLIKELY(elements_until_expand - dedupe_set_.size() < kNumBitTables)) {
46     // When resizing, try to make the load factor close to the minimum load factor.
47     size_t required_capacity = dedupe_set_.size() + kNumBitTables;
48     double factor = dedupe_set_.GetMaxLoadFactor() / dedupe_set_.GetMinLoadFactor();
49     size_t reservation = required_capacity * factor;
50     DCHECK_GE(reservation, required_capacity);
51     dedupe_set_.reserve(reservation);
52     elements_until_expand = dedupe_set_.ElementsUntilExpand();
53     DCHECK_GE(elements_until_expand - dedupe_set_.size(), kNumBitTables);
54   }
55 
56   // Read the existing code info and record bit table starts and end.
57   BitMemoryReader reader(code_info_data);
58   std::array<uint32_t, kNumHeaders> header = reader.ReadInterleavedVarints<kNumHeaders>();
59   CodeInfo code_info;
60   CodeInfo::ForEachHeaderField([&code_info, &header](size_t i, auto member_pointer) {
61     code_info.*member_pointer = header[i];
62   });
63   DCHECK(!code_info.HasDedupedBitTables());  // Input `CodeInfo` has no deduped tables.
64   std::array<uint32_t, kNumBitTables + 1u> bit_table_bit_starts;
65   CodeInfo::ForEachBitTableField([&](size_t i, auto member_pointer) {
66     bit_table_bit_starts[i] = dchecked_integral_cast<uint32_t>(reader.NumberOfReadBits());
67     DCHECK(!code_info.IsBitTableDeduped(i));
68     if (LIKELY(code_info.HasBitTable(i))) {
69       auto& table = code_info.*member_pointer;
70       table.Decode(reader);
71     }
72   });
73   bit_table_bit_starts[kNumBitTables] = dchecked_integral_cast<uint32_t>(reader.NumberOfReadBits());
74 
75   // Copy the source data.
76   BitMemoryRegion read_region = reader.GetReadRegion();
77   writer_.WriteBytesAligned(code_info_data, BitsToBytesRoundUp(read_region.size_in_bits()));
78 
79   // Insert entries for large tables to the `dedupe_set_` and check for duplicates.
80   std::array<DedupeSetEntry*, kNumBitTables> dedupe_entries;
81   std::fill(dedupe_entries.begin(), dedupe_entries.end(), nullptr);
82   CodeInfo::ForEachBitTableField([&](size_t i, auto member_pointer ATTRIBUTE_UNUSED) {
83     if (LIKELY(code_info.HasBitTable(i))) {
84       uint32_t table_bit_size = bit_table_bit_starts[i + 1u] - bit_table_bit_starts[i];
85       if (table_bit_size >= kMinDedupSize) {
86         uint32_t table_bit_start = start_bit_offset + bit_table_bit_starts[i];
87         BitMemoryRegion region(
88             const_cast<uint8_t*>(writer_.data()), table_bit_start, table_bit_size);
89         DedupeSetEntry entry{table_bit_start, table_bit_size};
90         auto [it, inserted] = dedupe_set_.insert(entry);
91         dedupe_entries[i] = &*it;
92         if (!inserted) {
93           code_info.SetBitTableDeduped(i);  // Mark as deduped before we write header.
94         }
95       }
96     }
97   });
98   DCHECK_EQ(elements_until_expand, dedupe_set_.ElementsUntilExpand()) << "Unexpected resizing!";
99 
100   if (code_info.HasDedupedBitTables()) {
101     // Reset the writer to the original position. This makes new entries in the
102     // `dedupe_set_` effectively point to non-existent data. We shall write the
103     // new data again at the correct position and update these entries.
104     writer_.Truncate(start_bit_offset);
105     // Update bit table flags in the `header` and write the `header`.
106     header[kNumHeaders - 1u] = code_info.bit_table_flags_;
107     CodeInfo::ForEachHeaderField([&code_info, &header](size_t i, auto member_pointer) {
108       DCHECK_EQ(code_info.*member_pointer, header[i]);
109     });
110     writer_.WriteInterleavedVarints(header);
111     // Write bit tables and update offsets in `dedupe_set_` after encoding the `header`.
112     CodeInfo::ForEachBitTableField([&](size_t i, auto member_pointer ATTRIBUTE_UNUSED) {
113       if (code_info.HasBitTable(i)) {
114         size_t current_bit_offset = writer_.NumberOfWrittenBits();
115         if (code_info.IsBitTableDeduped(i)) {
116           DCHECK_GE(bit_table_bit_starts[i + 1u] - bit_table_bit_starts[i], kMinDedupSize);
117           DCHECK(dedupe_entries[i] != nullptr);
118           size_t deduped_offset = dedupe_entries[i]->bit_start;
119           writer_.WriteVarint(current_bit_offset - deduped_offset);
120         } else {
121           uint32_t table_bit_size = bit_table_bit_starts[i + 1u] - bit_table_bit_starts[i];
122           writer_.WriteRegion(read_region.Subregion(bit_table_bit_starts[i], table_bit_size));
123           if (table_bit_size >= kMinDedupSize) {
124             // Update offset in the `dedupe_set_` entry.
125             DCHECK(dedupe_entries[i] != nullptr);
126             dedupe_entries[i]->bit_start = current_bit_offset;
127           }
128         }
129       }
130     });
131     writer_.ByteAlign();
132   }  // else nothing to do - we already copied the data.
133 
134   if (kIsDebugBuild) {
135     CodeInfo old_code_info(code_info_data);
136     CodeInfo new_code_info(writer_.data() + start_bit_offset / kBitsPerByte);
137     CodeInfo::ForEachHeaderField([&old_code_info, &new_code_info](size_t, auto member_pointer) {
138       if (member_pointer != &CodeInfo::bit_table_flags_) {  // Expected to differ.
139         DCHECK_EQ(old_code_info.*member_pointer, new_code_info.*member_pointer);
140       }
141     });
142     CodeInfo::ForEachBitTableField([&old_code_info, &new_code_info](size_t i, auto member_pointer) {
143       DCHECK_EQ(old_code_info.HasBitTable(i), new_code_info.HasBitTable(i));
144       DCHECK((old_code_info.*member_pointer).Equals(new_code_info.*member_pointer));
145     });
146   }
147 
148   return start_bit_offset / kBitsPerByte;
149 }
150 
151 }  //  namespace linker
152 }  //  namespace art
153