1 // Copyright (C) 2019 Google LLC
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License");
4 // you may not use this file except in compliance with the License.
5 // You may obtain a copy of the License at
6 //
7 // http://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS,
11 // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 // See the License for the specific language governing permissions and
13 // limitations under the License.
14
15 #include "icing/index/main/main-index-merger.h"
16
17 #include <algorithm>
18 #include <cstdint>
19 #include <cstring>
20 #include <unordered_map>
21 #include <utility>
22 #include <vector>
23
24 #include "icing/text_classifier/lib3/utils/base/statusor.h"
25 #include "icing/absl_ports/canonical_errors.h"
26 #include "icing/file/posting_list/posting-list-common.h"
27 #include "icing/index/hit/hit.h"
28 #include "icing/index/lite/lite-index.h"
29 #include "icing/index/lite/term-id-hit-pair.h"
30 #include "icing/index/main/main-index.h"
31 #include "icing/index/term-id-codec.h"
32 #include "icing/legacy/core/icing-string-util.h"
33 #include "icing/util/logging.h"
34 #include "icing/util/status-macros.h"
35
36 namespace icing {
37 namespace lib {
38
39 namespace {
40
41 class HitSelector {
42 public:
43 // Returns whether or not term_id_hit_pair has the same term_id, document_id
44 // and section_id as the previously selected hits.
IsEquivalentHit(const TermIdHitPair & term_id_hit_pair)45 bool IsEquivalentHit(const TermIdHitPair& term_id_hit_pair) {
46 return prev_.term_id() == term_id_hit_pair.term_id() &&
47 prev_.hit().document_id() == term_id_hit_pair.hit().document_id() &&
48 prev_.hit().section_id() == term_id_hit_pair.hit().section_id();
49 }
50
51 // Merges term_id_hit_pair with previously added hits.
SelectIfBetter(const TermIdHitPair & term_id_hit_pair)52 void SelectIfBetter(const TermIdHitPair& term_id_hit_pair) {
53 if (term_id_hit_pair.hit().is_prefix_hit()) {
54 SelectPrefixHitIfBetter(term_id_hit_pair);
55 } else {
56 SelectExactHitIfBetter(term_id_hit_pair);
57 }
58 prev_ = term_id_hit_pair;
59 }
60
61 // Adds all valid, selected hits to hits starting at position pos in hits.
62 // Returns the offset in hits after the position of the last added hit.
63 // This function may add between 0-2 hits depending on whether the HitSelector
64 // holds both a valid exact hit and a valid prefix hit, one of those or none.
InsertSelectedHits(size_t pos,std::vector<TermIdHitPair> * hits)65 size_t InsertSelectedHits(size_t pos, std::vector<TermIdHitPair>* hits) {
66 // Given the prefix/exact hits for a given term+docid+sectionid, push needed
67 // hits into hits array at offset pos. Return new pos.
68 if (best_prefix_hit_.hit().is_valid() && best_exact_hit_.hit().is_valid()) {
69 (*hits)[pos++] = best_exact_hit_;
70 const Hit& prefix_hit = best_prefix_hit_.hit();
71 // The prefix hit has score equal to the sum of the scores, capped at
72 // kMaxTermFrequency.
73 Hit::TermFrequency final_term_frequency = std::min(
74 static_cast<int>(Hit::kMaxTermFrequency),
75 prefix_hit.term_frequency() + best_exact_hit_.hit().term_frequency());
76 best_prefix_hit_ = TermIdHitPair(
77 best_prefix_hit_.term_id(),
78 Hit(prefix_hit.section_id(), prefix_hit.document_id(),
79 final_term_frequency, prefix_hit.is_in_prefix_section(),
80 prefix_hit.is_prefix_hit()));
81 (*hits)[pos++] = best_prefix_hit_;
82 // Ensure sorted.
83 if (best_prefix_hit_.hit() < best_exact_hit_.hit()) {
84 std::swap((*hits)[pos - 1], (*hits)[pos - 2]);
85 }
86 } else if (best_prefix_hit_.hit().is_valid()) {
87 (*hits)[pos++] = best_prefix_hit_;
88 } else if (best_exact_hit_.hit().is_valid()) {
89 (*hits)[pos++] = best_exact_hit_;
90 }
91
92 return pos;
93 }
94
Reset()95 void Reset() {
96 best_prefix_hit_ = TermIdHitPair();
97 best_exact_hit_ = TermIdHitPair();
98 prev_ = TermIdHitPair();
99 }
100
101 private:
SelectPrefixHitIfBetter(const TermIdHitPair & term_id_hit_pair)102 void SelectPrefixHitIfBetter(const TermIdHitPair& term_id_hit_pair) {
103 if (!best_prefix_hit_.hit().is_valid()) {
104 best_prefix_hit_ = term_id_hit_pair;
105 } else {
106 const Hit& hit = term_id_hit_pair.hit();
107 // Create a new prefix hit with term_frequency as the sum of the term
108 // frequencies. The term frequency is capped at kMaxTermFrequency.
109 Hit::TermFrequency final_term_frequency = std::min(
110 static_cast<int>(Hit::kMaxTermFrequency),
111 hit.term_frequency() + best_prefix_hit_.hit().term_frequency());
112 best_prefix_hit_ = TermIdHitPair(
113 term_id_hit_pair.term_id(),
114 Hit(hit.section_id(), hit.document_id(), final_term_frequency,
115 best_prefix_hit_.hit().is_in_prefix_section(),
116 best_prefix_hit_.hit().is_prefix_hit()));
117 }
118 }
119
SelectExactHitIfBetter(const TermIdHitPair & term_id_hit_pair)120 void SelectExactHitIfBetter(const TermIdHitPair& term_id_hit_pair) {
121 if (!best_exact_hit_.hit().is_valid()) {
122 best_exact_hit_ = term_id_hit_pair;
123 } else {
124 const Hit& hit = term_id_hit_pair.hit();
125 // Create a new exact hit with term_frequency as the sum of the term
126 // frequencies. The term frequency is capped at kMaxHitScore.
127 Hit::TermFrequency final_term_frequency = std::min(
128 static_cast<int>(Hit::kMaxTermFrequency),
129 hit.term_frequency() + best_exact_hit_.hit().term_frequency());
130 best_exact_hit_ = TermIdHitPair(
131 term_id_hit_pair.term_id(),
132 Hit(hit.section_id(), hit.document_id(), final_term_frequency,
133 best_exact_hit_.hit().is_in_prefix_section(),
134 best_exact_hit_.hit().is_prefix_hit()));
135 }
136 }
137
138 TermIdHitPair best_prefix_hit_;
139 TermIdHitPair best_exact_hit_;
140 TermIdHitPair prev_;
141 };
142
143 class HitComparator {
144 public:
HitComparator(const TermIdCodec & term_id_codec,const std::unordered_map<uint32_t,int> & main_tvi_to_block_index)145 explicit HitComparator(
146 const TermIdCodec& term_id_codec,
147 const std::unordered_map<uint32_t, int>& main_tvi_to_block_index)
148 : term_id_codec_(&term_id_codec),
149 main_tvi_to_block_index_(&main_tvi_to_block_index) {}
150
operator ()(const TermIdHitPair & lhs,const TermIdHitPair & rhs) const151 bool operator()(const TermIdHitPair& lhs, const TermIdHitPair& rhs) const {
152 // Primary sort by index block. This acheives two things:
153 // 1. It reduces the number of flash writes by grouping together new hits
154 // for terms whose posting lists might share the same index block.
155 // 2. More importantly, this ensures that newly added backfill branch points
156 // will be populated first (because all newly added terms have an invalid
157 // block index of 0) before any new hits are added to the postings lists
158 // that they backfill from.
159 int lhs_index_block = GetIndexBlock(lhs.term_id());
160 int rhs_index_block = GetIndexBlock(rhs.term_id());
161 if (lhs_index_block == rhs_index_block) {
162 // Secondary sort by term_id and hit.
163 return lhs.value() < rhs.value();
164 }
165 return lhs_index_block < rhs_index_block;
166 }
167
168 private:
GetIndexBlock(uint32_t term_id) const169 int GetIndexBlock(uint32_t term_id) const {
170 auto term_info_or = term_id_codec_->DecodeTermInfo(term_id);
171 if (!term_info_or.ok()) {
172 ICING_LOG(WARNING)
173 << "Unable to decode term-info during merge. This shouldn't happen.";
174 return kInvalidBlockIndex;
175 }
176 TermIdCodec::DecodedTermInfo term_info =
177 std::move(term_info_or).ValueOrDie();
178 auto itr = main_tvi_to_block_index_->find(term_info.tvi);
179 if (itr == main_tvi_to_block_index_->end()) {
180 return kInvalidBlockIndex;
181 }
182 return itr->second;
183 }
184
185 const TermIdCodec* term_id_codec_;
186 const std::unordered_map<uint32_t, int>* main_tvi_to_block_index_;
187 };
188
189 // A helper function to dedupe hits stored in hits. Suppose that the lite index
190 // contained a single document with two hits in a single prefix section: "foot"
191 // and "fool". When expanded, there would be four hits:
192 // {"fo", docid0, sectionid0}
193 // {"fo", docid0, sectionid0}
194 // {"foot", docid0, sectionid0}
195 // {"fool", docid0, sectionid0}
196 //
197 // The first two are duplicates of each other. So, this function will dedupe
198 // and shrink hits to be:
199 // {"fo", docid0, sectionid0}
200 // {"foot", docid0, sectionid0}
201 // {"fool", docid0, sectionid0}
202 //
203 // When two or more prefix hits are duplicates, merge into one hit with term
204 // frequency as the sum of the term frequencies. If there is both an exact and
205 // prefix hit for the same term, keep the exact hit as it is, update the prefix
206 // hit so that its term frequency is the sum of the term frequencies.
DedupeHits(std::vector<TermIdHitPair> * hits,const TermIdCodec & term_id_codec,const std::unordered_map<uint32_t,int> & main_tvi_to_block_index)207 void DedupeHits(
208 std::vector<TermIdHitPair>* hits, const TermIdCodec& term_id_codec,
209 const std::unordered_map<uint32_t, int>& main_tvi_to_block_index) {
210 // Now all terms are grouped together and all hits for a term are sorted.
211 // Merge equivalent hits into one.
212 std::sort(hits->begin(), hits->end(),
213 HitComparator(term_id_codec, main_tvi_to_block_index));
214 size_t current_offset = 0;
215 HitSelector hit_selector;
216 for (const TermIdHitPair& term_id_hit_pair : *hits) {
217 if (!hit_selector.IsEquivalentHit(term_id_hit_pair)) {
218 // We've reached a new hit. Insert the previously selected hits that we
219 // had accumulated and reset to add this new hit.
220 current_offset = hit_selector.InsertSelectedHits(current_offset, hits);
221 hit_selector.Reset();
222 }
223 // Update best exact and prefix hit.
224 hit_selector.SelectIfBetter(term_id_hit_pair);
225 }
226
227 // Push last.
228 current_offset = hit_selector.InsertSelectedHits(current_offset, hits);
229
230 hits->resize(current_offset);
231 }
232
233 // Based on experiments with full prefix expansion, the multiplier
234 // is ~4x.
235 constexpr int kAvgPrefixesPerTerm = 4;
236
237 } // namespace
238
239 libtextclassifier3::StatusOr<std::vector<TermIdHitPair>>
TranslateAndExpandLiteHits(const LiteIndex & lite_index,const TermIdCodec & term_id_codec,const MainIndex::LexiconMergeOutputs & lexicon_merge_outputs)240 MainIndexMerger::TranslateAndExpandLiteHits(
241 const LiteIndex& lite_index, const TermIdCodec& term_id_codec,
242 const MainIndex::LexiconMergeOutputs& lexicon_merge_outputs) {
243 std::vector<TermIdHitPair> hits;
244 if (lite_index.empty()) {
245 return hits;
246 }
247 // Reserve enough space for the average number of prefixes per term and the
248 // terms themselves.
249 hits.reserve(lite_index.size() * (kAvgPrefixesPerTerm + 1));
250
251 // Translate lite tvis to main tvis.
252 for (const TermIdHitPair& term_id_hit_pair : lite_index) {
253 uint32_t cur_term_id = term_id_hit_pair.term_id();
254 ICING_ASSIGN_OR_RETURN(TermIdCodec::DecodedTermInfo cur_decoded_term,
255 term_id_codec.DecodeTermInfo(cur_term_id));
256 Hit hit(term_id_hit_pair.hit());
257
258 // 1. Translate and push original.
259 auto itr =
260 lexicon_merge_outputs.other_tvi_to_main_tvi.find(cur_decoded_term.tvi);
261 if (itr == lexicon_merge_outputs.other_tvi_to_main_tvi.cend()) {
262 // b/37273773
263 return absl_ports::InternalError(IcingStringUtil::StringPrintf(
264 "Trying to translate lite tvi %u that was never added to the lexicon",
265 cur_decoded_term.tvi));
266 }
267 ICING_ASSIGN_OR_RETURN(uint32_t term_id,
268 term_id_codec.EncodeTvi(itr->second, TviType::MAIN));
269 hits.emplace_back(term_id, hit);
270
271 // 2. Expand hits in prefix sections.
272 if (hit.is_in_prefix_section()) {
273 // Hit was in a prefix section. Push prefixes. Turn on prefix bit.
274 auto itr_prefixes =
275 lexicon_merge_outputs.other_tvi_to_prefix_main_tvis.find(
276 cur_decoded_term.tvi);
277 if (itr_prefixes ==
278 lexicon_merge_outputs.other_tvi_to_prefix_main_tvis.end()) {
279 ICING_VLOG(1) << "No necessary prefix expansion for "
280 << cur_decoded_term.tvi;
281 continue;
282 }
283 // The tvis of all prefixes of this hit's term that appear in the main
284 // lexicon are between [prefix_tvis_buf[offset],
285 // prefix_tvis_buf[offset+len]).
286 size_t offset = itr_prefixes->second.first;
287 size_t len = itr_prefixes->second.second;
288 size_t offset_end_exclusive = offset + len;
289 Hit prefix_hit(hit.section_id(), hit.document_id(), hit.term_frequency(),
290 /*is_in_prefix_section=*/true, /*is_prefix_hit=*/true);
291 for (; offset < offset_end_exclusive; ++offset) {
292 // Take the tvi (in the main lexicon) of each prefix term.
293 uint32_t prefix_main_tvi =
294 lexicon_merge_outputs.prefix_tvis_buf[offset];
295 // Convert it to a term_id.
296 ICING_ASSIGN_OR_RETURN(
297 uint32_t prefix_term_id,
298 term_id_codec.EncodeTvi(prefix_main_tvi, TviType::MAIN));
299 // Create add an element for this prefix TermId and prefix Hit to hits.
300 hits.emplace_back(prefix_term_id, prefix_hit);
301 }
302 }
303 }
304 // 3. Remove any duplicate hits.
305 DedupeHits(&hits, term_id_codec,
306 lexicon_merge_outputs.main_tvi_to_block_index);
307 return hits;
308 }
309
310 } // namespace lib
311 } // namespace icing
312