1 /* 2 * Copyright (C) 2019 The Android Open Source Project 3 * 4 * Licensed under the Apache License, Version 2.0 (the "License"); 5 * you may not use this file except in compliance with the License. 6 * You may obtain a copy of the License at 7 * 8 * http://www.apache.org/licenses/LICENSE-2.0 9 * 10 * Unless required by applicable law or agreed to in writing, software 11 * distributed under the License is distributed on an "AS IS" BASIS, 12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. 13 * See the License for the specific language governing permissions and 14 * limitations under the License. 15 */ 16 17 #ifndef SRC_TRACE_PROCESSOR_CONTAINERS_ROW_MAP_H_ 18 #define SRC_TRACE_PROCESSOR_CONTAINERS_ROW_MAP_H_ 19 20 #include <stdint.h> 21 22 #include <memory> 23 #include <vector> 24 25 #include "perfetto/base/logging.h" 26 #include "perfetto/ext/base/optional.h" 27 #include "src/trace_processor/containers/bit_vector.h" 28 #include "src/trace_processor/containers/bit_vector_iterators.h" 29 30 namespace perfetto { 31 namespace trace_processor { 32 33 // Stores a list of row indicies in a space efficient manner. One or more 34 // columns can refer to the same RowMap. The RowMap defines the access pattern 35 // to iterate on rows. 36 // 37 // Naming convention: 38 // 39 // As both the input and output of RowMap is a uint32_t, it can be quite 40 // confusing to reason about what parameters/return values of the functions 41 // of RowMap actually means. To help with this, we define a strict convention 42 // of naming. 43 // 44 // row: input - that is, rows are what are passed into operator[]; named as 45 // such because a "row" number in a table is converted to an index to 46 // lookup in the backing vectors. 47 // index: output - that is, indices are what are returned from operator[]; 48 // named as such because an "index" is what's used to lookup data 49 // from the backing vectors. 50 // 51 // Implementation details: 52 // 53 // Behind the scenes, this class is impelemented using one of three backing 54 // data-structures: 55 // 1. A start and end index (internally named 'range') 56 // 1. BitVector 57 // 2. std::vector<uint32_t> (internally named IndexVector). 58 // 59 // Generally the preference for data structures is range > BitVector > 60 // std::vector<uint32>; this ordering is based mainly on memory efficiency as we 61 // expect RowMaps to be large. 62 // 63 // However, BitVector and std::vector<uint32_t> allow things which are not 64 // possible with the data-structures preferred to them: 65 // * a range (as the name suggests) can only store a compact set of indices 66 // with no holes. A BitVector works around this limitation by storing a 1 at an 67 // index where that row is part of the RowMap and 0 otherwise. 68 // * as soon as ordering or duplicate rows come into play, we cannot use a 69 // BitVector anymore as ordering/duplicate row information cannot be captured 70 // by a BitVector. 71 // 72 // For small, sparse RowMaps, it is possible that a std::vector<uint32_t> is 73 // more efficient than a BitVector; in this case, we will make a best effort 74 // switch to it but the cases where this happens is not precisely defined. 75 class RowMap { 76 private: 77 // We need to declare these iterator classes before RowMap::Iterator as it 78 // depends on them. However, we don't want to make these public so keep them 79 // under a special private section. 80 81 // Iterator for ranged mode of RowMap. 82 // This class should act as a drop-in replacement for 83 // BitVector::SetBitsIterator. 84 class RangeIterator { 85 public: RangeIterator(const RowMap * rm)86 RangeIterator(const RowMap* rm) : rm_(rm), index_(rm->start_index_) {} 87 Next()88 void Next() { ++index_; } 89 90 operator bool() const { return index_ < rm_->end_index_; } 91 index()92 uint32_t index() const { return index_; } 93 ordinal()94 uint32_t ordinal() const { return index_ - rm_->start_index_; } 95 96 private: 97 const RowMap* rm_ = nullptr; 98 uint32_t index_ = 0; 99 }; 100 101 // Iterator for index vector mode of RowMap. 102 // This class should act as a drop-in replacement for 103 // BitVector::SetBitsIterator. 104 class IndexVectorIterator { 105 public: IndexVectorIterator(const RowMap * rm)106 IndexVectorIterator(const RowMap* rm) : rm_(rm) {} 107 Next()108 void Next() { ++ordinal_; } 109 110 operator bool() const { return ordinal_ < rm_->index_vector_.size(); } 111 index()112 uint32_t index() const { return rm_->index_vector_[ordinal_]; } 113 ordinal()114 uint32_t ordinal() const { return ordinal_; } 115 116 private: 117 const RowMap* rm_ = nullptr; 118 uint32_t ordinal_ = 0; 119 }; 120 121 public: 122 // Input type. 123 using InputRow = uint32_t; 124 using OutputIndex = uint32_t; 125 126 // Allows efficient iteration over the rows of a RowMap. 127 // 128 // Note: you should usually prefer to use the methods on RowMap directly (if 129 // they exist for the task being attempted) to avoid the lookup for the mode 130 // of the RowMap on every method call. 131 class Iterator { 132 public: Iterator(const RowMap * rm)133 Iterator(const RowMap* rm) : rm_(rm) { 134 switch (rm->mode_) { 135 case Mode::kRange: 136 range_it_.reset(new RangeIterator(rm)); 137 break; 138 case Mode::kBitVector: 139 set_bits_it_.reset( 140 new BitVector::SetBitsIterator(rm->bit_vector_.IterateSetBits())); 141 break; 142 case Mode::kIndexVector: 143 iv_it_.reset(new IndexVectorIterator(rm)); 144 break; 145 } 146 } 147 148 Iterator(Iterator&&) noexcept = default; 149 Iterator& operator=(Iterator&&) = default; 150 151 // Forwards the iterator to the next row of the RowMap. Next()152 void Next() { 153 switch (rm_->mode_) { 154 case Mode::kRange: 155 range_it_->Next(); 156 break; 157 case Mode::kBitVector: 158 set_bits_it_->Next(); 159 break; 160 case Mode::kIndexVector: 161 iv_it_->Next(); 162 break; 163 } 164 } 165 166 // Returns if the iterator is still valid. 167 operator bool() const { 168 switch (rm_->mode_) { 169 case Mode::kRange: 170 return *range_it_; 171 case Mode::kBitVector: 172 return *set_bits_it_; 173 case Mode::kIndexVector: 174 return *iv_it_; 175 } 176 PERFETTO_FATAL("For GCC"); 177 } 178 179 // Returns the index pointed to by this iterator. index()180 OutputIndex index() const { 181 switch (rm_->mode_) { 182 case Mode::kRange: 183 return range_it_->index(); 184 case Mode::kBitVector: 185 return set_bits_it_->index(); 186 case Mode::kIndexVector: 187 return iv_it_->index(); 188 } 189 PERFETTO_FATAL("For GCC"); 190 } 191 192 // Returns the row of the index the iterator points to. row()193 InputRow row() const { 194 switch (rm_->mode_) { 195 case Mode::kRange: 196 return range_it_->ordinal(); 197 case Mode::kBitVector: 198 return set_bits_it_->ordinal(); 199 case Mode::kIndexVector: 200 return iv_it_->ordinal(); 201 } 202 PERFETTO_FATAL("For GCC"); 203 } 204 205 private: 206 Iterator(const Iterator&) = delete; 207 Iterator& operator=(const Iterator&) = delete; 208 209 // Only one of the below will be non-null depending on the mode of the 210 // RowMap. 211 std::unique_ptr<RangeIterator> range_it_; 212 std::unique_ptr<BitVector::SetBitsIterator> set_bits_it_; 213 std::unique_ptr<IndexVectorIterator> iv_it_; 214 215 const RowMap* rm_ = nullptr; 216 }; 217 218 // Enum to allow users of RowMap to decide whether they want to optimize for 219 // memory usage or for speed of lookups. 220 enum class OptimizeFor { 221 kMemory, 222 kLookupSpeed, 223 }; 224 225 // Creates an empty RowMap. 226 // By default this will be implemented using a range. 227 RowMap(); 228 229 // Creates a RowMap containing the range of indices between |start| and |end| 230 // i.e. all indices between |start| (inclusive) and |end| (exclusive). 231 explicit RowMap(OutputIndex start, 232 OutputIndex end, 233 OptimizeFor optimize_for = OptimizeFor::kMemory); 234 235 // Creates a RowMap backed by a BitVector. 236 explicit RowMap(BitVector bit_vector); 237 238 // Creates a RowMap backed by an std::vector<uint32_t>. 239 explicit RowMap(std::vector<OutputIndex> vec); 240 241 // Creates a RowMap containing just |index|. 242 // By default this will be implemented using a range. SingleRow(OutputIndex index)243 static RowMap SingleRow(OutputIndex index) { 244 return RowMap(index, index + 1); 245 } 246 247 // Creates a copy of the RowMap. 248 // We have an explicit copy function because RowMap can hold onto large chunks 249 // of memory and we want to be very explicit when making a copy to avoid 250 // accidental leaks and copies. 251 RowMap Copy() const; 252 253 // Returns the size of the RowMap; that is the number of indices in the 254 // RowMap. size()255 uint32_t size() const { 256 switch (mode_) { 257 case Mode::kRange: 258 return end_index_ - start_index_; 259 case Mode::kBitVector: 260 return bit_vector_.GetNumBitsSet(); 261 case Mode::kIndexVector: 262 return static_cast<uint32_t>(index_vector_.size()); 263 } 264 PERFETTO_FATAL("For GCC"); 265 } 266 267 // Returns whether this rowmap is empty. empty()268 bool empty() const { return size() == 0; } 269 270 // Returns the index at the given |row|. Get(InputRow row)271 OutputIndex Get(InputRow row) const { 272 PERFETTO_DCHECK(row < size()); 273 switch (mode_) { 274 case Mode::kRange: 275 return GetRange(row); 276 case Mode::kBitVector: 277 return GetBitVector(row); 278 case Mode::kIndexVector: 279 return GetIndexVector(row); 280 } 281 PERFETTO_FATAL("For GCC"); 282 } 283 284 // Returns whether the RowMap contains the given index. Contains(OutputIndex index)285 bool Contains(OutputIndex index) const { 286 switch (mode_) { 287 case Mode::kRange: { 288 return index >= start_index_ && index < end_index_; 289 } 290 case Mode::kBitVector: { 291 return index < bit_vector_.size() && bit_vector_.IsSet(index); 292 } 293 case Mode::kIndexVector: { 294 auto it = std::find(index_vector_.begin(), index_vector_.end(), index); 295 return it != index_vector_.end(); 296 } 297 } 298 PERFETTO_FATAL("For GCC"); 299 } 300 301 // Returns the first row of the given |index| in the RowMap. RowOf(OutputIndex index)302 base::Optional<InputRow> RowOf(OutputIndex index) const { 303 switch (mode_) { 304 case Mode::kRange: { 305 if (index < start_index_ || index >= end_index_) 306 return base::nullopt; 307 return index - start_index_; 308 } 309 case Mode::kBitVector: { 310 return index < bit_vector_.size() && bit_vector_.IsSet(index) 311 ? base::make_optional(bit_vector_.GetNumBitsSet(index)) 312 : base::nullopt; 313 } 314 case Mode::kIndexVector: { 315 auto it = std::find(index_vector_.begin(), index_vector_.end(), index); 316 return it != index_vector_.end() 317 ? base::make_optional(static_cast<InputRow>( 318 std::distance(index_vector_.begin(), it))) 319 : base::nullopt; 320 } 321 } 322 PERFETTO_FATAL("For GCC"); 323 } 324 325 // Performs an ordered insert of the index into the current RowMap 326 // (precondition: this RowMap is ordered based on the indices it contains). 327 // 328 // Example: 329 // this = [1, 5, 10, 11, 20] 330 // Insert(10) // this = [1, 5, 10, 11, 20] 331 // Insert(12) // this = [1, 5, 10, 11, 12, 20] 332 // Insert(21) // this = [1, 5, 10, 11, 12, 20, 21] 333 // Insert(2) // this = [1, 2, 5, 10, 11, 12, 20, 21] 334 // 335 // Speecifically, this means that it is only valid to call Insert on a RowMap 336 // which is sorted by the indices it contains; this is automatically true when 337 // the RowMap is in range or BitVector mode but is a required condition for 338 // IndexVector mode. Insert(OutputIndex index)339 void Insert(OutputIndex index) { 340 switch (mode_) { 341 case Mode::kRange: 342 if (index == end_index_) { 343 // Fast path: if we're just appending to the end of the range, we can 344 // stay in range mode and just bump the end index. 345 end_index_++; 346 } else { 347 // Slow path: the insert is somewhere else other than the end. This 348 // means we need to switch to using a BitVector instead. 349 bit_vector_.Resize(start_index_, false); 350 bit_vector_.Resize(end_index_, true); 351 *this = RowMap(std::move(bit_vector_)); 352 353 InsertIntoBitVector(index); 354 } 355 break; 356 case Mode::kBitVector: 357 InsertIntoBitVector(index); 358 break; 359 case Mode::kIndexVector: { 360 PERFETTO_DCHECK( 361 std::is_sorted(index_vector_.begin(), index_vector_.end())); 362 auto it = 363 std::upper_bound(index_vector_.begin(), index_vector_.end(), index); 364 index_vector_.insert(it, index); 365 break; 366 } 367 } 368 } 369 370 // Updates this RowMap by 'picking' the indices given by |picker|. 371 // This is easiest to explain with an example; suppose we have the following 372 // RowMaps: 373 // this : [0, 1, 4, 10, 11] 374 // picker: [0, 3, 4, 4, 2] 375 // 376 // After calling Apply(picker), we now have the following: 377 // this : [0, 10, 11, 11, 4] 378 // 379 // Conceptually, we are performing the following algorithm: 380 // RowMap rm = Copy() 381 // for (p : picker) 382 // rm[i++] = this[p] 383 // return rm; SelectRows(const RowMap & selector)384 RowMap SelectRows(const RowMap& selector) const { 385 uint32_t size = selector.size(); 386 387 // If the selector is empty, just return an empty RowMap. 388 if (size == 0u) 389 return RowMap(); 390 391 // If the selector is just picking a single row, just return that row 392 // without any additional overhead. 393 if (size == 1u) 394 return RowMap::SingleRow(Get(selector.Get(0))); 395 396 // For all other cases, go into the slow-path. 397 return SelectRowsSlow(selector); 398 } 399 400 // Intersects |other| with |this| writing the result into |this|. 401 // By "intersect", we mean to keep only the indices present in both RowMaps. 402 // The order of the preserved indices will be the same as |this|. 403 // 404 // Conceptually, we are performing the following algorithm: 405 // for (idx : this) 406 // if (!other.Contains(idx)) 407 // Remove(idx) Intersect(const RowMap & other)408 void Intersect(const RowMap& other) { 409 if (mode_ == Mode::kRange && other.mode_ == Mode::kRange) { 410 // If both RowMaps have ranges, we can just take the smallest intersection 411 // of them as the new RowMap. 412 // We have this as an explicit fast path as this is very common for 413 // constraints on id and sorted columns to satisfy this condition. 414 start_index_ = std::max(start_index_, other.start_index_); 415 end_index_ = 416 std::max(start_index_, std::min(end_index_, other.end_index_)); 417 return; 418 } 419 420 // TODO(lalitm): improve efficiency of this if we end up needing it. 421 Filter([&other](uint32_t row) { return other.Contains(row); }); 422 } 423 424 // Filters the current RowMap into the RowMap given by |out| based on the 425 // return value of |p(idx)|. 426 // 427 // Precondition: |out| should be sorted by the indices inside it (this is 428 // required to keep this method efficient). This is automatically true if the 429 // mode is out is Range or BitVector but needs to be enforced if the mode is 430 // IndexVector. 431 // 432 // Specifically, the setup for each of the variables is as follows: 433 // this: contains the RowMap indices which will be looked up and passed to 434 // p to filter. 435 // out : contains indicies into |this| and will be filtered down to only 436 // contain indicies where p returns true. 437 // p : takes an index given by |this| and returns whether the index should 438 // be retained in |out|. 439 // 440 // Concretely, the algorithm being invoked looks like (but more efficient 441 // based on the mode of |this| and |out|): 442 // for (idx : out) 443 // this_idx = (*this)[idx] 444 // if (!p(this_idx)) 445 // out->Remove(idx) 446 template <typename Predicate> FilterInto(RowMap * out,Predicate p)447 void FilterInto(RowMap* out, Predicate p) const { 448 PERFETTO_DCHECK(size() >= out->size()); 449 450 if (out->empty()) { 451 // If the output RowMap is empty, we don't need to do anything. 452 return; 453 } 454 455 if (out->size() == 1) { 456 // If the output RowMap has a single entry, just lookup that entry and see 457 // if we should keep it. 458 if (!p(Get(out->Get(0)))) 459 *out = RowMap(); 460 return; 461 } 462 463 // TODO(lalitm): investigate whether we should have another fast path for 464 // cases where |out| has only a few entries so we can scan |out| instead of 465 // scanning |this|. 466 467 // Ideally, we'd always just scan |out| and keep the indices in |this| which 468 // meet |p|. However, if |this| is a BitVector, we end up needing expensive 469 // |IndexOfNthSet| calls (as we need to convert the row to an index before 470 // passing it to |p|). 471 switch (mode_) { 472 case Mode::kRange: { 473 auto ip = [this, p](uint32_t row) { return p(GetRange(row)); }; 474 out->Filter(ip); 475 break; 476 } 477 case Mode::kBitVector: { 478 FilterIntoScanSelfBv(out, p); 479 break; 480 } 481 case Mode::kIndexVector: { 482 auto ip = [this, p](uint32_t row) { return p(GetIndexVector(row)); }; 483 out->Filter(ip); 484 break; 485 } 486 } 487 } 488 489 template <typename Comparator = bool(uint32_t, uint32_t)> StableSort(std::vector<uint32_t> * out,Comparator c)490 void StableSort(std::vector<uint32_t>* out, Comparator c) const { 491 switch (mode_) { 492 case Mode::kRange: 493 std::stable_sort(out->begin(), out->end(), 494 [this, c](uint32_t a, uint32_t b) { 495 return c(GetRange(a), GetRange(b)); 496 }); 497 break; 498 case Mode::kBitVector: 499 std::stable_sort(out->begin(), out->end(), 500 [this, c](uint32_t a, uint32_t b) { 501 return c(GetBitVector(a), GetBitVector(b)); 502 }); 503 break; 504 case Mode::kIndexVector: 505 std::stable_sort(out->begin(), out->end(), 506 [this, c](uint32_t a, uint32_t b) { 507 return c(GetIndexVector(a), GetIndexVector(b)); 508 }); 509 break; 510 } 511 } 512 513 // Returns the iterator over the rows in this RowMap. IterateRows()514 Iterator IterateRows() const { return Iterator(this); } 515 516 // Returns if the RowMap is internally represented using a range. IsRange()517 bool IsRange() const { return mode_ == Mode::kRange; } 518 519 private: 520 enum class Mode { 521 kRange, 522 kBitVector, 523 kIndexVector, 524 }; 525 526 // Filters the indices in |out| by keeping those which meet |p|. 527 template <typename Predicate> Filter(Predicate p)528 void Filter(Predicate p) { 529 switch (mode_) { 530 case Mode::kRange: 531 FilterRange(p); 532 break; 533 case Mode::kBitVector: { 534 for (auto it = bit_vector_.IterateSetBits(); it; it.Next()) { 535 if (!p(it.index())) 536 it.Clear(); 537 } 538 break; 539 } 540 case Mode::kIndexVector: { 541 auto ret = std::remove_if(index_vector_.begin(), index_vector_.end(), 542 [p](uint32_t i) { return !p(i); }); 543 index_vector_.erase(ret, index_vector_.end()); 544 break; 545 } 546 } 547 } 548 549 // Filters the current RowMap into |out| by performing a full scan on |this| 550 // where |this| is a BitVector. 551 // See |FilterInto| for a full breakdown of the semantics of this function. 552 template <typename Predicate> FilterIntoScanSelfBv(RowMap * out,Predicate p)553 void FilterIntoScanSelfBv(RowMap* out, Predicate p) const { 554 auto it = bit_vector_.IterateSetBits(); 555 switch (out->mode_) { 556 case Mode::kRange: { 557 // TODO(lalitm): investigate whether we can reuse the data inside 558 // out->bit_vector_ at some point. 559 BitVector bv(out->end_index_, false); 560 for (auto out_it = bv.IterateAllBits(); it; it.Next(), out_it.Next()) { 561 uint32_t ordinal = it.ordinal(); 562 if (ordinal < out->start_index_) 563 continue; 564 if (ordinal >= out->end_index_) 565 break; 566 567 if (p(it.index())) { 568 out_it.Set(); 569 } 570 } 571 *out = RowMap(std::move(bv)); 572 break; 573 } 574 case Mode::kBitVector: { 575 auto out_it = out->bit_vector_.IterateAllBits(); 576 for (; out_it; it.Next(), out_it.Next()) { 577 PERFETTO_DCHECK(it); 578 if (out_it.IsSet() && !p(it.index())) 579 out_it.Clear(); 580 } 581 break; 582 } 583 case Mode::kIndexVector: { 584 PERFETTO_DCHECK(std::is_sorted(out->index_vector_.begin(), 585 out->index_vector_.end())); 586 auto fn = [&p, &it](uint32_t i) { 587 while (it.ordinal() < i) { 588 it.Next(); 589 PERFETTO_DCHECK(it); 590 } 591 PERFETTO_DCHECK(it.ordinal() == i); 592 return !p(it.index()); 593 }; 594 auto iv_it = std::remove_if(out->index_vector_.begin(), 595 out->index_vector_.end(), fn); 596 out->index_vector_.erase(iv_it, out->index_vector_.end()); 597 break; 598 } 599 } 600 } 601 602 template <typename Predicate> FilterRange(Predicate p)603 void FilterRange(Predicate p) { 604 uint32_t count = end_index_ - start_index_; 605 606 // Optimization: if we are only going to scan a few indices, it's not 607 // worth the haslle of working with a BitVector. 608 constexpr uint32_t kSmallRangeLimit = 2048; 609 bool is_small_range = count < kSmallRangeLimit; 610 611 // Optimization: weif the cost of a BitVector is more than the highest 612 // possible cost an index vector could have, use the index vector. 613 uint32_t bit_vector_cost = BitVector::ApproxBytesCost(end_index_); 614 uint32_t index_vector_cost_ub = sizeof(uint32_t) * count; 615 616 // If either of the conditions hold which make it better to use an 617 // index vector, use it instead. Alternatively, if we are optimizing for 618 // lookup speed, we also want to use an index vector. 619 if (is_small_range || index_vector_cost_ub <= bit_vector_cost || 620 optimize_for_ == OptimizeFor::kLookupSpeed) { 621 // Try and strike a good balance between not making the vector too 622 // big and good performance. 623 std::vector<uint32_t> iv(std::min(kSmallRangeLimit, count)); 624 625 uint32_t out_i = 0; 626 for (uint32_t i = 0; i < count; ++i) { 627 // If we reach the capacity add another small set of indices. 628 if (PERFETTO_UNLIKELY(out_i == iv.size())) 629 iv.resize(iv.size() + kSmallRangeLimit); 630 631 // We keep this branch free by always writing the index but only 632 // incrementing the out index if the return value is true. 633 bool value = p(i + start_index_); 634 iv[out_i] = i + start_index_; 635 out_i += value; 636 } 637 638 // Make the vector the correct size and as small as possible. 639 iv.resize(out_i); 640 iv.shrink_to_fit(); 641 642 *this = RowMap(std::move(iv)); 643 return; 644 } 645 646 // Otherwise, create a bitvector which spans the full range using 647 // |p| as the filler for the bits between start and end. 648 *this = RowMap(BitVector::Range(start_index_, end_index_, p)); 649 } 650 InsertIntoBitVector(uint32_t row)651 void InsertIntoBitVector(uint32_t row) { 652 PERFETTO_DCHECK(mode_ == Mode::kBitVector); 653 654 if (row >= bit_vector_.size()) 655 bit_vector_.Resize(row + 1, false); 656 bit_vector_.Set(row); 657 } 658 GetRange(InputRow row)659 PERFETTO_ALWAYS_INLINE OutputIndex GetRange(InputRow row) const { 660 PERFETTO_DCHECK(mode_ == Mode::kRange); 661 return start_index_ + row; 662 } GetBitVector(uint32_t row)663 PERFETTO_ALWAYS_INLINE OutputIndex GetBitVector(uint32_t row) const { 664 PERFETTO_DCHECK(mode_ == Mode::kBitVector); 665 return bit_vector_.IndexOfNthSet(row); 666 } GetIndexVector(uint32_t row)667 PERFETTO_ALWAYS_INLINE OutputIndex GetIndexVector(uint32_t row) const { 668 PERFETTO_DCHECK(mode_ == Mode::kIndexVector); 669 return index_vector_[row]; 670 } 671 672 RowMap SelectRowsSlow(const RowMap& selector) const; 673 674 Mode mode_ = Mode::kRange; 675 676 // Only valid when |mode_| == Mode::kRange. 677 OutputIndex start_index_ = 0; // This is an inclusive index. 678 OutputIndex end_index_ = 0; // This is an exclusive index. 679 680 // Only valid when |mode_| == Mode::kBitVector. 681 BitVector bit_vector_; 682 683 // Only valid when |mode_| == Mode::kIndexVector. 684 std::vector<OutputIndex> index_vector_; 685 686 OptimizeFor optimize_for_ = OptimizeFor::kMemory; 687 }; 688 689 } // namespace trace_processor 690 } // namespace perfetto 691 692 #endif // SRC_TRACE_PROCESSOR_CONTAINERS_ROW_MAP_H_ 693