• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 //
4 // Use of this source code is governed by a BSD-style
5 // license that can be found in the LICENSE file or at
6 // https://developers.google.com/open-source/licenses/bsd
7 
8 // This file defines the map container and its helpers to support protobuf maps.
9 //
10 // The Map and MapIterator types are provided by this header file.
11 // Please avoid using other types defined here, unless they are public
12 // types within Map or MapIterator, such as Map::value_type.
13 
14 #ifndef GOOGLE_PROTOBUF_MAP_H__
15 #define GOOGLE_PROTOBUF_MAP_H__
16 
17 #include <algorithm>
18 #include <cstddef>
19 #include <cstdint>
20 #include <functional>
21 #include <initializer_list>
22 #include <iterator>
23 #include <limits>  // To support Visual Studio 2008
24 #include <string>
25 #include <type_traits>
26 #include <utility>
27 
28 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC) && defined(__APPLE__)
29 #include <time.h>
30 #endif
31 
32 #include "google/protobuf/stubs/common.h"
33 #include "absl/base/attributes.h"
34 #include "absl/container/btree_map.h"
35 #include "absl/hash/hash.h"
36 #include "absl/log/absl_check.h"
37 #include "absl/meta/type_traits.h"
38 #include "absl/strings/string_view.h"
39 #include "google/protobuf/arena.h"
40 #include "google/protobuf/generated_enum_util.h"
41 #include "google/protobuf/internal_visibility.h"
42 #include "google/protobuf/map_type_handler.h"
43 #include "google/protobuf/port.h"
44 #include "google/protobuf/wire_format_lite.h"
45 
46 
47 #ifdef SWIG
48 #error "You cannot SWIG proto headers"
49 #endif
50 
51 // Must be included last.
52 #include "google/protobuf/port_def.inc"
53 
54 namespace google {
55 namespace protobuf {
56 
57 template <typename Key, typename T>
58 class Map;
59 
60 class MapIterator;
61 
62 template <typename Enum>
63 struct is_proto_enum;
64 
65 namespace rust {
66 struct PtrAndLen;
67 }  // namespace rust
68 
69 namespace internal {
70 template <typename Key, typename T>
71 class MapFieldLite;
72 
73 template <typename Derived, typename Key, typename T,
74           WireFormatLite::FieldType key_wire_type,
75           WireFormatLite::FieldType value_wire_type>
76 class MapField;
77 
78 struct MapTestPeer;
79 struct MapBenchmarkPeer;
80 
81 template <typename Key, typename T>
82 class TypeDefinedMapFieldBase;
83 
84 class DynamicMapField;
85 
86 class GeneratedMessageReflection;
87 
88 // The largest valid serialization for a message is INT_MAX, so we can't have
89 // more than 32-bits worth of elements.
90 using map_index_t = uint32_t;
91 
92 // Internal type traits that can be used to define custom key/value types. These
93 // are only be specialized by protobuf internals, and never by users.
94 template <typename T, typename VoidT = void>
95 struct is_internal_map_key_type : std::false_type {};
96 
97 template <typename T, typename VoidT = void>
98 struct is_internal_map_value_type : std::false_type {};
99 
100 // re-implement std::allocator to use arena allocator for memory allocation.
101 // Used for Map implementation. Users should not use this class
102 // directly.
103 template <typename U>
104 class MapAllocator {
105  public:
106   using value_type = U;
107   using pointer = value_type*;
108   using const_pointer = const value_type*;
109   using reference = value_type&;
110   using const_reference = const value_type&;
111   using size_type = size_t;
112   using difference_type = ptrdiff_t;
113 
MapAllocator()114   constexpr MapAllocator() : arena_(nullptr) {}
MapAllocator(Arena * arena)115   explicit constexpr MapAllocator(Arena* arena) : arena_(arena) {}
116   template <typename X>
MapAllocator(const MapAllocator<X> & allocator)117   MapAllocator(const MapAllocator<X>& allocator)  // NOLINT(runtime/explicit)
118       : arena_(allocator.arena()) {}
119 
120   // MapAllocator does not support alignments beyond 8. Technically we should
121   // support up to std::max_align_t, but this fails with ubsan and tcmalloc
122   // debug allocation logic which assume 8 as default alignment.
123   static_assert(alignof(value_type) <= 8, "");
124 
125   pointer allocate(size_type n, const void* /* hint */ = nullptr) {
126     // If arena is not given, malloc needs to be called which doesn't
127     // construct element object.
128     if (arena_ == nullptr) {
129       return static_cast<pointer>(::operator new(n * sizeof(value_type)));
130     } else {
131       return reinterpret_cast<pointer>(
132           Arena::CreateArray<uint8_t>(arena_, n * sizeof(value_type)));
133     }
134   }
135 
deallocate(pointer p,size_type n)136   void deallocate(pointer p, size_type n) {
137     if (arena_ == nullptr) {
138       internal::SizedDelete(p, n * sizeof(value_type));
139     }
140   }
141 
142 #if !defined(GOOGLE_PROTOBUF_OS_APPLE) && !defined(GOOGLE_PROTOBUF_OS_NACL) && \
143     !defined(GOOGLE_PROTOBUF_OS_EMSCRIPTEN)
144   template <class NodeType, class... Args>
construct(NodeType * p,Args &&...args)145   void construct(NodeType* p, Args&&... args) {
146     // Clang 3.6 doesn't compile static casting to void* directly. (Issue
147     // #1266) According C++ standard 5.2.9/1: "The static_cast operator shall
148     // not cast away constness". So first the maybe const pointer is casted to
149     // const void* and after the const void* is const casted.
150     new (const_cast<void*>(static_cast<const void*>(p)))
151         NodeType(std::forward<Args>(args)...);
152   }
153 
154   template <class NodeType>
destroy(NodeType * p)155   void destroy(NodeType* p) {
156     p->~NodeType();
157   }
158 #else
construct(pointer p,const_reference t)159   void construct(pointer p, const_reference t) { new (p) value_type(t); }
160 
destroy(pointer p)161   void destroy(pointer p) { p->~value_type(); }
162 #endif
163 
164   template <typename X>
165   struct rebind {
166     using other = MapAllocator<X>;
167   };
168 
169   template <typename X>
170   bool operator==(const MapAllocator<X>& other) const {
171     return arena_ == other.arena_;
172   }
173 
174   template <typename X>
175   bool operator!=(const MapAllocator<X>& other) const {
176     return arena_ != other.arena_;
177   }
178 
179   // To support Visual Studio 2008
max_size()180   size_type max_size() const {
181     // parentheses around (std::...:max) prevents macro warning of max()
182     return (std::numeric_limits<size_type>::max)();
183   }
184 
185   // To support gcc-4.4, which does not properly
186   // support templated friend classes
arena()187   Arena* arena() const { return arena_; }
188 
189  private:
190   using DestructorSkippable_ = void;
191   Arena* arena_;
192 };
193 
194 // To save on binary size and simplify generic uses of the map types we collapse
195 // signed/unsigned versions of the same sized integer to the unsigned version.
196 template <typename T, typename = void>
197 struct KeyForBaseImpl {
198   using type = T;
199 };
200 template <typename T>
201 struct KeyForBaseImpl<T, std::enable_if_t<std::is_integral<T>::value &&
202                                           std::is_signed<T>::value>> {
203   using type = std::make_unsigned_t<T>;
204 };
205 template <typename T>
206 using KeyForBase = typename KeyForBaseImpl<T>::type;
207 
208 // Default case: Not transparent.
209 // We use std::hash<key_type>/std::less<key_type> and all the lookup functions
210 // only accept `key_type`.
211 template <typename key_type>
212 struct TransparentSupport {
213   // We hash all the scalars as uint64_t so that we can implement the same hash
214   // function for VariantKey. This way we can have MapKey provide the same hash
215   // as the underlying value would have.
216   using hash = absl::Hash<
217       std::conditional_t<std::is_scalar<key_type>::value, uint64_t, key_type>>;
218 
219   static bool Equals(const key_type& a, const key_type& b) { return a == b; }
220 
221   template <typename K>
222   using key_arg = key_type;
223 
224   using ViewType = std::conditional_t<std::is_scalar<key_type>::value, key_type,
225                                       const key_type&>;
226   static ViewType ToView(const key_type& v) { return v; }
227 };
228 
229 // We add transparent support for std::string keys. We use
230 // absl::Hash<absl::string_view> as it supports the input types we care about.
231 // The lookup functions accept arbitrary `K`. This will include any key type
232 // that is convertible to absl::string_view.
233 template <>
234 struct TransparentSupport<std::string> {
235   // Use go/ranked-overloads for dispatching.
236   struct Rank0 {};
237   struct Rank1 : Rank0 {};
238   struct Rank2 : Rank1 {};
239   template <typename T, typename = std::enable_if_t<
240                             std::is_convertible<T, absl::string_view>::value>>
241   static absl::string_view ImplicitConvertImpl(T&& str, Rank2) {
242     absl::string_view ref = str;
243     return ref;
244   }
245   template <typename T, typename = std::enable_if_t<
246                             std::is_convertible<T, const std::string&>::value>>
247   static absl::string_view ImplicitConvertImpl(T&& str, Rank1) {
248     const std::string& ref = str;
249     return ref;
250   }
251   template <typename T>
252   static absl::string_view ImplicitConvertImpl(T&& str, Rank0) {
253     return {str.data(), str.size()};
254   }
255 
256   template <typename T>
257   static absl::string_view ImplicitConvert(T&& str) {
258     return ImplicitConvertImpl(std::forward<T>(str), Rank2{});
259   }
260 
261   struct hash : public absl::Hash<absl::string_view> {
262     using is_transparent = void;
263 
264     template <typename T>
265     size_t operator()(T&& str) const {
266       return absl::Hash<absl::string_view>::operator()(
267           ImplicitConvert(std::forward<T>(str)));
268     }
269   };
270 
271   template <typename T, typename U>
272   static bool Equals(T&& t, U&& u) {
273     return ImplicitConvert(std::forward<T>(t)) ==
274            ImplicitConvert(std::forward<U>(u));
275   }
276 
277   template <typename K>
278   using key_arg = K;
279 
280   using ViewType = absl::string_view;
281   template <typename T>
282   static ViewType ToView(const T& v) {
283     return ImplicitConvert(v);
284   }
285 };
286 
287 enum class MapNodeSizeInfoT : uint32_t;
288 inline uint16_t SizeFromInfo(MapNodeSizeInfoT node_size_info) {
289   return static_cast<uint16_t>(static_cast<uint32_t>(node_size_info) >> 16);
290 }
291 inline uint16_t ValueOffsetFromInfo(MapNodeSizeInfoT node_size_info) {
292   return static_cast<uint16_t>(static_cast<uint32_t>(node_size_info) >> 0);
293 }
294 constexpr MapNodeSizeInfoT MakeNodeInfo(uint16_t size, uint16_t value_offset) {
295   return static_cast<MapNodeSizeInfoT>((static_cast<uint32_t>(size) << 16) |
296                                        value_offset);
297 }
298 
299 struct NodeBase {
300   // Align the node to allow KeyNode to predict the location of the key.
301   // This way sizeof(NodeBase) contains any possible padding it was going to
302   // have between NodeBase and the key.
303   alignas(kMaxMessageAlignment) NodeBase* next;
304 
305   void* GetVoidKey() { return this + 1; }
306   const void* GetVoidKey() const { return this + 1; }
307 
308   void* GetVoidValue(MapNodeSizeInfoT size_info) {
309     return reinterpret_cast<char*>(this) + ValueOffsetFromInfo(size_info);
310   }
311 };
312 
313 inline NodeBase* EraseFromLinkedList(NodeBase* item, NodeBase* head) {
314   if (head == item) {
315     return head->next;
316   } else {
317     head->next = EraseFromLinkedList(item, head->next);
318     return head;
319   }
320 }
321 
322 constexpr size_t MapTreeLengthThreshold() { return 8; }
323 inline bool TableEntryIsTooLong(NodeBase* node) {
324   const size_t kMaxLength = MapTreeLengthThreshold();
325   size_t count = 0;
326   do {
327     ++count;
328     node = node->next;
329   } while (node != nullptr);
330   // Invariant: no linked list ever is more than kMaxLength in length.
331   ABSL_DCHECK_LE(count, kMaxLength);
332   return count >= kMaxLength;
333 }
334 
335 // Similar to the public MapKey, but specialized for the internal
336 // implementation.
337 struct VariantKey {
338   // We make this value 16 bytes to make it cheaper to pass in the ABI.
339   // Can't overload string_view this way, so we unpack the fields.
340   // data==nullptr means this is a number and `integral` is the value.
341   // data!=nullptr means this is a string and `integral` is the size.
342   const char* data;
343   uint64_t integral;
344 
345   explicit VariantKey(uint64_t v) : data(nullptr), integral(v) {}
346   explicit VariantKey(absl::string_view v)
347       : data(v.data()), integral(v.size()) {
348     // We use `data` to discriminate between the types, so make sure it is never
349     // null here.
350     if (data == nullptr) data = "";
351   }
352 
353   friend bool operator<(const VariantKey& left, const VariantKey& right) {
354     ABSL_DCHECK_EQ(left.data == nullptr, right.data == nullptr);
355     if (left.integral != right.integral) {
356       // If they are numbers with different value, or strings with different
357       // size, check the number only.
358       return left.integral < right.integral;
359     }
360     if (left.data == nullptr) {
361       // If they are numbers they have the same value, so return.
362       return false;
363     }
364     // They are strings of the same size, so check the bytes.
365     return memcmp(left.data, right.data, left.integral) < 0;
366   }
367 };
368 
369 // This is to be specialized by MapKey.
370 template <typename T>
371 struct RealKeyToVariantKey {
372   VariantKey operator()(T value) const { return VariantKey(value); }
373 };
374 
375 template <typename T, typename = void>
376 struct RealKeyToVariantKeyAlternative;
377 
378 template <typename T>
379 struct RealKeyToVariantKeyAlternative<
380     T, typename std::enable_if<std::is_integral<T>::value>::type> {
381   uint64_t operator()(uint64_t value) const { return value; }
382 };
383 
384 template <>
385 struct RealKeyToVariantKey<std::string> {
386   template <typename T>
387   VariantKey operator()(const T& value) const {
388     return VariantKey(TransparentSupport<std::string>::ImplicitConvert(value));
389   }
390 };
391 
392 template <>
393 struct RealKeyToVariantKeyAlternative<std::string> {
394   absl::string_view operator()(absl::string_view value) const { return value; }
395 };
396 
397 // We use a single kind of tree for all maps. This reduces code duplication.
398 using TreeForMap =
399     absl::btree_map<VariantKey, NodeBase*, std::less<VariantKey>,
400                     MapAllocator<std::pair<const VariantKey, NodeBase*>>>;
401 
402 // Type safe tagged pointer.
403 // We convert to/from nodes and trees using the operations below.
404 // They ensure that the tags are used correctly.
405 // There are three states:
406 //  - x == 0: the entry is empty
407 //  - x != 0 && (x&1) == 0: the entry is a node list
408 //  - x != 0 && (x&1) == 1: the entry is a tree
409 enum class TableEntryPtr : uintptr_t;
410 
411 inline bool TableEntryIsEmpty(TableEntryPtr entry) {
412   return entry == TableEntryPtr{};
413 }
414 inline bool TableEntryIsTree(TableEntryPtr entry) {
415   return (static_cast<uintptr_t>(entry) & 1) == 1;
416 }
417 inline bool TableEntryIsList(TableEntryPtr entry) {
418   return !TableEntryIsTree(entry);
419 }
420 inline bool TableEntryIsNonEmptyList(TableEntryPtr entry) {
421   return !TableEntryIsEmpty(entry) && TableEntryIsList(entry);
422 }
423 inline NodeBase* TableEntryToNode(TableEntryPtr entry) {
424   ABSL_DCHECK(TableEntryIsList(entry));
425   return reinterpret_cast<NodeBase*>(static_cast<uintptr_t>(entry));
426 }
427 inline TableEntryPtr NodeToTableEntry(NodeBase* node) {
428   ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
429   return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node));
430 }
431 inline TreeForMap* TableEntryToTree(TableEntryPtr entry) {
432   ABSL_DCHECK(TableEntryIsTree(entry));
433   return reinterpret_cast<TreeForMap*>(static_cast<uintptr_t>(entry) - 1);
434 }
435 inline TableEntryPtr TreeToTableEntry(TreeForMap* node) {
436   ABSL_DCHECK((reinterpret_cast<uintptr_t>(node) & 1) == 0);
437   return static_cast<TableEntryPtr>(reinterpret_cast<uintptr_t>(node) | 1);
438 }
439 
440 // This captures all numeric types.
441 inline size_t MapValueSpaceUsedExcludingSelfLong(bool) { return 0; }
442 inline size_t MapValueSpaceUsedExcludingSelfLong(const std::string& str) {
443   return StringSpaceUsedExcludingSelfLong(str);
444 }
445 template <typename T,
446           typename = decltype(std::declval<const T&>().SpaceUsedLong())>
447 size_t MapValueSpaceUsedExcludingSelfLong(const T& message) {
448   return message.SpaceUsedLong() - sizeof(T);
449 }
450 
451 constexpr size_t kGlobalEmptyTableSize = 1;
452 PROTOBUF_EXPORT extern const TableEntryPtr
453     kGlobalEmptyTable[kGlobalEmptyTableSize];
454 
455 template <typename Map,
456           typename = typename std::enable_if<
457               !std::is_scalar<typename Map::key_type>::value ||
458               !std::is_scalar<typename Map::mapped_type>::value>::type>
459 size_t SpaceUsedInValues(const Map* map) {
460   size_t size = 0;
461   for (const auto& v : *map) {
462     size += internal::MapValueSpaceUsedExcludingSelfLong(v.first) +
463             internal::MapValueSpaceUsedExcludingSelfLong(v.second);
464   }
465   return size;
466 }
467 
468 inline size_t SpaceUsedInValues(const void*) { return 0; }
469 
470 class UntypedMapBase;
471 
472 class UntypedMapIterator {
473  public:
474   // Invariants:
475   // node_ is always correct. This is handy because the most common
476   // operations are operator* and operator-> and they only use node_.
477   // When node_ is set to a non-null value, all the other non-const fields
478   // are updated to be correct also, but those fields can become stale
479   // if the underlying map is modified.  When those fields are needed they
480   // are rechecked, and updated if necessary.
481 
482   // We do not provide any constructors for this type. We need it to be a
483   // trivial type to ensure that we can safely share it with Rust.
484 
485   // Advance through buckets, looking for the first that isn't empty.
486   // If nothing non-empty is found then leave node_ == nullptr.
487   void SearchFrom(map_index_t start_bucket);
488 
489   // The definition of operator== is handled by the derived type. If we were
490   // to do it in this class it would allow comparing iterators of different
491   // map types.
492   bool Equals(const UntypedMapIterator& other) const {
493     return node_ == other.node_;
494   }
495 
496   // The definition of operator++ is handled in the derived type. We would not
497   // be able to return the right type from here.
498   void PlusPlus() {
499     if (node_->next == nullptr) {
500       SearchFrom(bucket_index_ + 1);
501     } else {
502       node_ = node_->next;
503     }
504   }
505 
506   // Conversion to and from a typed iterator child class is used by FFI.
507   template <class Iter>
508   static UntypedMapIterator FromTyped(Iter it) {
509     static_assert(
510 #if defined(__cpp_lib_is_layout_compatible) && \
511     __cpp_lib_is_layout_compatible >= 201907L
512         std::is_layout_compatible_v<Iter, UntypedMapIterator>,
513 #else
514         sizeof(it) == sizeof(UntypedMapIterator),
515 #endif
516         "Map iterator must not have extra state that the base class"
517         "does not define.");
518     return static_cast<UntypedMapIterator>(it);
519   }
520 
521   template <class Iter>
522   Iter ToTyped() const {
523     return Iter(*this);
524   }
525   NodeBase* node_;
526   const UntypedMapBase* m_;
527   map_index_t bucket_index_;
528 };
529 
530 // These properties are depended upon by Rust FFI.
531 static_assert(std::is_trivial<UntypedMapIterator>::value,
532               "UntypedMapIterator must be a trivial type.");
533 static_assert(std::is_trivially_copyable<UntypedMapIterator>::value,
534               "UntypedMapIterator must be trivially copyable.");
535 static_assert(std::is_trivially_destructible<UntypedMapIterator>::value,
536               "UntypedMapIterator must be trivially destructible.");
537 static_assert(std::is_standard_layout<UntypedMapIterator>::value,
538               "UntypedMapIterator must be standard layout.");
539 static_assert(offsetof(UntypedMapIterator, node_) == 0,
540               "node_ must be the first field of UntypedMapIterator.");
541 static_assert(sizeof(UntypedMapIterator) ==
542                   sizeof(void*) * 2 +
543                       std::max(sizeof(uint32_t), alignof(void*)),
544               "UntypedMapIterator does not have the expected size for FFI");
545 static_assert(
546     alignof(UntypedMapIterator) == std::max(alignof(void*), alignof(uint32_t)),
547     "UntypedMapIterator does not have the expected alignment for FFI");
548 
549 // Base class for all Map instantiations.
550 // This class holds all the data and provides the basic functionality shared
551 // among all instantiations.
552 // Having an untyped base class helps generic consumers (like the table-driven
553 // parser) by having non-template code that can handle all instantiations.
554 class PROTOBUF_EXPORT UntypedMapBase {
555   using Allocator = internal::MapAllocator<void*>;
556   using Tree = internal::TreeForMap;
557 
558  public:
559   using size_type = size_t;
560 
561   explicit constexpr UntypedMapBase(Arena* arena)
562       : num_elements_(0),
563         num_buckets_(internal::kGlobalEmptyTableSize),
564         seed_(0),
565         index_of_first_non_null_(internal::kGlobalEmptyTableSize),
566         table_(const_cast<TableEntryPtr*>(internal::kGlobalEmptyTable)),
567         alloc_(arena) {}
568 
569   UntypedMapBase(const UntypedMapBase&) = delete;
570   UntypedMapBase& operator=(const UntypedMapBase&) = delete;
571 
572  protected:
573   // 16 bytes is the minimum useful size for the array cache in the arena.
574   enum : map_index_t { kMinTableSize = 16 / sizeof(void*) };
575 
576  public:
577   Arena* arena() const { return this->alloc_.arena(); }
578 
579   void InternalSwap(UntypedMapBase* other) {
580     std::swap(num_elements_, other->num_elements_);
581     std::swap(num_buckets_, other->num_buckets_);
582     std::swap(seed_, other->seed_);
583     std::swap(index_of_first_non_null_, other->index_of_first_non_null_);
584     std::swap(table_, other->table_);
585     std::swap(alloc_, other->alloc_);
586   }
587 
588   static size_type max_size() {
589     return std::numeric_limits<map_index_t>::max();
590   }
591   size_type size() const { return num_elements_; }
592   bool empty() const { return size() == 0; }
593   UntypedMapIterator begin() const;
594 
595   // We make this a static function to reduce the cost in MapField.
596   // All the end iterators are singletons anyway.
597   static UntypedMapIterator EndIterator() { return {nullptr, nullptr, 0}; }
598 
599  protected:
600   friend class TcParser;
601   friend struct MapTestPeer;
602   friend struct MapBenchmarkPeer;
603   friend class UntypedMapIterator;
604   friend class RustMapHelper;
605 
606   struct NodeAndBucket {
607     NodeBase* node;
608     map_index_t bucket;
609   };
610 
611   // Returns whether we should insert after the head of the list. For
612   // non-optimized builds, we randomly decide whether to insert right at the
613   // head of the list or just after the head. This helps add a little bit of
614   // non-determinism to the map ordering.
615   bool ShouldInsertAfterHead(void* node) {
616 #ifdef NDEBUG
617     (void)node;
618     return false;
619 #else
620     // Doing modulo with a prime mixes the bits more.
621     return (reinterpret_cast<uintptr_t>(node) ^ seed_) % 13 > 6;
622 #endif
623   }
624 
625   // Helper for InsertUnique.  Handles the case where bucket b is a
626   // not-too-long linked list.
627   void InsertUniqueInList(map_index_t b, NodeBase* node) {
628     if (!TableEntryIsEmpty(b) && ShouldInsertAfterHead(node)) {
629       auto* first = TableEntryToNode(table_[b]);
630       node->next = first->next;
631       first->next = node;
632     } else {
633       node->next = TableEntryToNode(table_[b]);
634       table_[b] = NodeToTableEntry(node);
635     }
636   }
637 
638   bool TableEntryIsEmpty(map_index_t b) const {
639     return internal::TableEntryIsEmpty(table_[b]);
640   }
641   bool TableEntryIsNonEmptyList(map_index_t b) const {
642     return internal::TableEntryIsNonEmptyList(table_[b]);
643   }
644   bool TableEntryIsTree(map_index_t b) const {
645     return internal::TableEntryIsTree(table_[b]);
646   }
647   bool TableEntryIsList(map_index_t b) const {
648     return internal::TableEntryIsList(table_[b]);
649   }
650 
651   // Return whether table_[b] is a linked list that seems awfully long.
652   // Requires table_[b] to point to a non-empty linked list.
653   bool TableEntryIsTooLong(map_index_t b) {
654     return internal::TableEntryIsTooLong(TableEntryToNode(table_[b]));
655   }
656 
657   // Return a power of two no less than max(kMinTableSize, n).
658   // Assumes either n < kMinTableSize or n is a power of two.
659   map_index_t TableSize(map_index_t n) {
660     return n < kMinTableSize ? kMinTableSize : n;
661   }
662 
663   template <typename T>
664   using AllocFor = absl::allocator_traits<Allocator>::template rebind_alloc<T>;
665 
666   // Alignment of the nodes is the same as alignment of NodeBase.
667   NodeBase* AllocNode(MapNodeSizeInfoT size_info) {
668     return AllocNode(SizeFromInfo(size_info));
669   }
670 
671   NodeBase* AllocNode(size_t node_size) {
672     PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
673     return AllocFor<NodeBase>(alloc_).allocate(node_size / sizeof(NodeBase));
674   }
675 
676   void DeallocNode(NodeBase* node, MapNodeSizeInfoT size_info) {
677     DeallocNode(node, SizeFromInfo(size_info));
678   }
679 
680   void DeallocNode(NodeBase* node, size_t node_size) {
681     PROTOBUF_ASSUME(node_size % sizeof(NodeBase) == 0);
682     AllocFor<NodeBase>(alloc_).deallocate(node, node_size / sizeof(NodeBase));
683   }
684 
685   void DeleteTable(TableEntryPtr* table, map_index_t n) {
686     if (auto* a = arena()) {
687       a->ReturnArrayMemory(table, n * sizeof(TableEntryPtr));
688     } else {
689       internal::SizedDelete(table, n * sizeof(TableEntryPtr));
690     }
691   }
692 
693   NodeBase* DestroyTree(Tree* tree);
694   using GetKey = VariantKey (*)(NodeBase*);
695   void InsertUniqueInTree(map_index_t b, GetKey get_key, NodeBase* node);
696   void TransferTree(Tree* tree, GetKey get_key);
697   TableEntryPtr ConvertToTree(NodeBase* node, GetKey get_key);
698   void EraseFromTree(map_index_t b, typename Tree::iterator tree_it);
699 
700   map_index_t VariantBucketNumber(VariantKey key) const {
701     return key.data == nullptr
702                ? VariantBucketNumber(key.integral)
703                : VariantBucketNumber(absl::string_view(
704                      key.data, static_cast<size_t>(key.integral)));
705   }
706 
707   map_index_t VariantBucketNumber(absl::string_view key) const {
708     return static_cast<map_index_t>(absl::HashOf(seed_, key) &
709                                     (num_buckets_ - 1));
710   }
711 
712   map_index_t VariantBucketNumber(uint64_t key) const {
713     return static_cast<map_index_t>(absl::HashOf(key ^ seed_) &
714                                     (num_buckets_ - 1));
715   }
716 
717   TableEntryPtr* CreateEmptyTable(map_index_t n) {
718     ABSL_DCHECK_GE(n, kMinTableSize);
719     ABSL_DCHECK_EQ(n & (n - 1), 0u);
720     TableEntryPtr* result = AllocFor<TableEntryPtr>(alloc_).allocate(n);
721     memset(result, 0, n * sizeof(result[0]));
722     return result;
723   }
724 
725   // Return a randomish value.
726   map_index_t Seed() const {
727     uint64_t s = 0;
728 #if !defined(GOOGLE_PROTOBUF_NO_RDTSC)
729 #if defined(__APPLE__)
730     // Use a commpage-based fast time function on Apple environments (MacOS,
731     // iOS, tvOS, watchOS, etc).
732     s = clock_gettime_nsec_np(CLOCK_UPTIME_RAW);
733 #elif defined(__x86_64__) && defined(__GNUC__)
734     uint32_t hi, lo;
735     asm volatile("rdtsc" : "=a"(lo), "=d"(hi));
736     s = ((static_cast<uint64_t>(hi) << 32) | lo);
737 #elif defined(__aarch64__) && defined(__GNUC__)
738     // There is no rdtsc on ARMv8. CNTVCT_EL0 is the virtual counter of the
739     // system timer. It runs at a different frequency than the CPU's, but is
740     // the best source of time-based entropy we get.
741     uint64_t virtual_timer_value;
742     asm volatile("mrs %0, cntvct_el0" : "=r"(virtual_timer_value));
743     s = virtual_timer_value;
744 #endif
745 #endif  // !defined(GOOGLE_PROTOBUF_NO_RDTSC)
746     // Add entropy from the address of the map and the address of the table
747     // array.
748     return static_cast<map_index_t>(
749         absl::HashOf(s, table_, static_cast<const void*>(this)));
750   }
751 
752   enum {
753     kKeyIsString = 1 << 0,
754     kValueIsString = 1 << 1,
755     kValueIsProto = 1 << 2,
756     kUseDestructFunc = 1 << 3,
757   };
758   template <typename Key, typename Value>
759   static constexpr uint8_t MakeDestroyBits() {
760     uint8_t result = 0;
761     if (!std::is_trivially_destructible<Key>::value) {
762       if (std::is_same<Key, std::string>::value) {
763         result |= kKeyIsString;
764       } else {
765         return kUseDestructFunc;
766       }
767     }
768     if (!std::is_trivially_destructible<Value>::value) {
769       if (std::is_same<Value, std::string>::value) {
770         result |= kValueIsString;
771       } else if (std::is_base_of<MessageLite, Value>::value) {
772         result |= kValueIsProto;
773       } else {
774         return kUseDestructFunc;
775       }
776     }
777     return result;
778   }
779 
780   struct ClearInput {
781     MapNodeSizeInfoT size_info;
782     uint8_t destroy_bits;
783     bool reset_table;
784     void (*destroy_node)(NodeBase*);
785   };
786 
787   template <typename Node>
788   static void DestroyNode(NodeBase* node) {
789     static_cast<Node*>(node)->~Node();
790   }
791 
792   template <typename Node>
793   static constexpr ClearInput MakeClearInput(bool reset) {
794     constexpr auto bits =
795         MakeDestroyBits<typename Node::key_type, typename Node::mapped_type>();
796     return ClearInput{Node::size_info(), bits, reset,
797                       bits & kUseDestructFunc ? DestroyNode<Node> : nullptr};
798   }
799 
800   void ClearTable(ClearInput input);
801 
802   NodeAndBucket FindFromTree(map_index_t b, VariantKey key,
803                              Tree::iterator* it) const;
804 
805   // Space used for the table, trees, and nodes.
806   // Does not include the indirect space used. Eg the data of a std::string.
807   size_t SpaceUsedInTable(size_t sizeof_node) const;
808 
809   map_index_t num_elements_;
810   map_index_t num_buckets_;
811   map_index_t seed_;
812   map_index_t index_of_first_non_null_;
813   TableEntryPtr* table_;  // an array with num_buckets_ entries
814   Allocator alloc_;
815 };
816 
817 inline UntypedMapIterator UntypedMapBase::begin() const {
818   map_index_t bucket_index;
819   NodeBase* node;
820   if (index_of_first_non_null_ == num_buckets_) {
821     bucket_index = 0;
822     node = nullptr;
823   } else {
824     bucket_index = index_of_first_non_null_;
825     TableEntryPtr entry = table_[bucket_index];
826     node = PROTOBUF_PREDICT_TRUE(internal::TableEntryIsList(entry))
827                ? TableEntryToNode(entry)
828                : TableEntryToTree(entry)->begin()->second;
829     PROTOBUF_ASSUME(node != nullptr);
830   }
831   return UntypedMapIterator{node, this, bucket_index};
832 }
833 
834 inline void UntypedMapIterator::SearchFrom(map_index_t start_bucket) {
835   ABSL_DCHECK(m_->index_of_first_non_null_ == m_->num_buckets_ ||
836               !m_->TableEntryIsEmpty(m_->index_of_first_non_null_));
837   for (map_index_t i = start_bucket; i < m_->num_buckets_; ++i) {
838     TableEntryPtr entry = m_->table_[i];
839     if (entry == TableEntryPtr{}) continue;
840     bucket_index_ = i;
841     if (PROTOBUF_PREDICT_TRUE(TableEntryIsList(entry))) {
842       node_ = TableEntryToNode(entry);
843     } else {
844       TreeForMap* tree = TableEntryToTree(entry);
845       ABSL_DCHECK(!tree->empty());
846       node_ = tree->begin()->second;
847     }
848     return;
849   }
850   node_ = nullptr;
851   bucket_index_ = 0;
852 }
853 
854 // Base class used by TcParser to extract the map object from a map field.
855 // We keep it here to avoid a dependency into map_field.h from the main TcParser
856 // code, since that would bring in Message too.
857 class MapFieldBaseForParse {
858  public:
859   const UntypedMapBase& GetMap() const {
860     return vtable_->get_map(*this, false);
861   }
862   UntypedMapBase* MutableMap() {
863     return &const_cast<UntypedMapBase&>(vtable_->get_map(*this, true));
864   }
865 
866  protected:
867   struct VTable {
868     const UntypedMapBase& (*get_map)(const MapFieldBaseForParse&,
869                                      bool is_mutable);
870   };
871   explicit constexpr MapFieldBaseForParse(const VTable* vtable)
872       : vtable_(vtable) {}
873   ~MapFieldBaseForParse() = default;
874 
875   const VTable* vtable_;
876 };
877 
878 // The value might be of different signedness, so use memcpy to extract it.
879 template <typename T, std::enable_if_t<std::is_integral<T>::value, int> = 0>
880 T ReadKey(const void* ptr) {
881   T out;
882   memcpy(&out, ptr, sizeof(T));
883   return out;
884 }
885 
886 template <typename T, std::enable_if_t<!std::is_integral<T>::value, int> = 0>
887 const T& ReadKey(const void* ptr) {
888   return *reinterpret_cast<const T*>(ptr);
889 }
890 
891 template <typename Key>
892 struct KeyNode : NodeBase {
893   static constexpr size_t kOffset = sizeof(NodeBase);
894   decltype(auto) key() const { return ReadKey<Key>(GetVoidKey()); }
895 };
896 
897 // KeyMapBase is a chaining hash map with the additional feature that some
898 // buckets can be converted to use an ordered container.  This ensures O(lg n)
899 // bounds on find, insert, and erase, while avoiding the overheads of ordered
900 // containers most of the time.
901 //
902 // The implementation doesn't need the full generality of unordered_map,
903 // and it doesn't have it.  More bells and whistles can be added as needed.
904 // Some implementation details:
905 // 1. The number of buckets is a power of two.
906 // 2. As is typical for hash_map and such, the Keys and Values are always
907 //    stored in linked list nodes.  Pointers to elements are never invalidated
908 //    until the element is deleted.
909 // 3. The trees' payload type is pointer to linked-list node.  Tree-converting
910 //    a bucket doesn't copy Key-Value pairs.
911 // 4. Once we've tree-converted a bucket, it is never converted back unless the
912 //    bucket is completely emptied out. Note that the items a tree contains may
913 //    wind up assigned to trees or lists upon a rehash.
914 // 5. Mutations to a map do not invalidate the map's iterators, pointers to
915 //    elements, or references to elements.
916 // 6. Except for erase(iterator), any non-const method can reorder iterators.
917 // 7. Uses VariantKey when using the Tree representation, which holds all
918 //    possible key types as a variant value.
919 
920 template <typename Key>
921 class KeyMapBase : public UntypedMapBase {
922   static_assert(!std::is_signed<Key>::value || !std::is_integral<Key>::value,
923                 "");
924 
925   using TS = TransparentSupport<Key>;
926 
927  public:
928   using hasher = typename TS::hash;
929 
930   using UntypedMapBase::UntypedMapBase;
931 
932  protected:
933   using KeyNode = internal::KeyNode<Key>;
934 
935   // Trees. The payload type is a copy of Key, so that we can query the tree
936   // with Keys that are not in any particular data structure.
937   // The value is a void* pointing to Node. We use void* instead of Node* to
938   // avoid code bloat. That way there is only one instantiation of the tree
939   // class per key type.
940   using Tree = internal::TreeForMap;
941   using TreeIterator = typename Tree::iterator;
942 
943  public:
944   hasher hash_function() const { return {}; }
945 
946  protected:
947   friend class TcParser;
948   friend struct MapTestPeer;
949   friend struct MapBenchmarkPeer;
950   friend class RustMapHelper;
951 
952   PROTOBUF_NOINLINE void erase_no_destroy(map_index_t b, KeyNode* node) {
953     TreeIterator tree_it;
954     const bool is_list = revalidate_if_necessary(b, node, &tree_it);
955     if (is_list) {
956       ABSL_DCHECK(TableEntryIsNonEmptyList(b));
957       auto* head = TableEntryToNode(table_[b]);
958       head = EraseFromLinkedList(node, head);
959       table_[b] = NodeToTableEntry(head);
960     } else {
961       EraseFromTree(b, tree_it);
962     }
963     --num_elements_;
964     if (PROTOBUF_PREDICT_FALSE(b == index_of_first_non_null_)) {
965       while (index_of_first_non_null_ < num_buckets_ &&
966              TableEntryIsEmpty(index_of_first_non_null_)) {
967         ++index_of_first_non_null_;
968       }
969     }
970   }
971 
972   NodeAndBucket FindHelper(typename TS::ViewType k,
973                            TreeIterator* it = nullptr) const {
974     map_index_t b = BucketNumber(k);
975     if (TableEntryIsNonEmptyList(b)) {
976       auto* node = internal::TableEntryToNode(table_[b]);
977       do {
978         if (TS::Equals(static_cast<KeyNode*>(node)->key(), k)) {
979           return {node, b};
980         } else {
981           node = node->next;
982         }
983       } while (node != nullptr);
984     } else if (TableEntryIsTree(b)) {
985       return FindFromTree(b, internal::RealKeyToVariantKey<Key>{}(k), it);
986     }
987     return {nullptr, b};
988   }
989 
990   // Insert the given node.
991   // If the key is a duplicate, it inserts the new node and returns the old one.
992   // Gives ownership to the caller.
993   // If the key is unique, it returns `nullptr`.
994   KeyNode* InsertOrReplaceNode(KeyNode* node) {
995     KeyNode* to_erase = nullptr;
996     auto p = this->FindHelper(node->key());
997     map_index_t b = p.bucket;
998     if (p.node != nullptr) {
999       erase_no_destroy(p.bucket, static_cast<KeyNode*>(p.node));
1000       to_erase = static_cast<KeyNode*>(p.node);
1001     } else if (ResizeIfLoadIsOutOfRange(num_elements_ + 1)) {
1002       b = BucketNumber(node->key());  // bucket_number
1003     }
1004     InsertUnique(b, node);
1005     ++num_elements_;
1006     return to_erase;
1007   }
1008 
1009   // Insert the given Node in bucket b.  If that would make bucket b too big,
1010   // and bucket b is not a tree, create a tree for buckets b.
1011   // Requires count(*KeyPtrFromNodePtr(node)) == 0 and that b is the correct
1012   // bucket.  num_elements_ is not modified.
1013   void InsertUnique(map_index_t b, KeyNode* node) {
1014     ABSL_DCHECK(index_of_first_non_null_ == num_buckets_ ||
1015                 !TableEntryIsEmpty(index_of_first_non_null_));
1016     // In practice, the code that led to this point may have already
1017     // determined whether we are inserting into an empty list, a short list,
1018     // or whatever.  But it's probably cheap enough to recompute that here;
1019     // it's likely that we're inserting into an empty or short list.
1020     ABSL_DCHECK(FindHelper(node->key()).node == nullptr);
1021     if (TableEntryIsEmpty(b)) {
1022       InsertUniqueInList(b, node);
1023       index_of_first_non_null_ = (std::min)(index_of_first_non_null_, b);
1024     } else if (TableEntryIsNonEmptyList(b) && !TableEntryIsTooLong(b)) {
1025       InsertUniqueInList(b, node);
1026     } else {
1027       InsertUniqueInTree(b, NodeToVariantKey, node);
1028     }
1029   }
1030 
1031   static VariantKey NodeToVariantKey(NodeBase* node) {
1032     return internal::RealKeyToVariantKey<Key>{}(
1033         static_cast<KeyNode*>(node)->key());
1034   }
1035 
1036   // Have it a separate function for testing.
1037   static size_type CalculateHiCutoff(size_type num_buckets) {
1038     // We want the high cutoff to follow this rules:
1039     //  - When num_buckets_ == kGlobalEmptyTableSize, then make it 0 to force an
1040     //    allocation.
1041     //  - When num_buckets_ < 8, then make it num_buckets_ to avoid
1042     //    a reallocation. A large load factor is not that important on small
1043     //    tables and saves memory.
1044     //  - Otherwise, make it 75% of num_buckets_.
1045     return num_buckets - num_buckets / 16 * 4 - num_buckets % 2;
1046   }
1047 
1048   // Returns whether it did resize.  Currently this is only used when
1049   // num_elements_ increases, though it could be used in other situations.
1050   // It checks for load too low as well as load too high: because any number
1051   // of erases can occur between inserts, the load could be as low as 0 here.
1052   // Resizing to a lower size is not always helpful, but failing to do so can
1053   // destroy the expected big-O bounds for some operations. By having the
1054   // policy that sometimes we resize down as well as up, clients can easily
1055   // keep O(size()) = O(number of buckets) if they want that.
1056   bool ResizeIfLoadIsOutOfRange(size_type new_size) {
1057     const size_type hi_cutoff = CalculateHiCutoff(num_buckets_);
1058     const size_type lo_cutoff = hi_cutoff / 4;
1059     // We don't care how many elements are in trees.  If a lot are,
1060     // we may resize even though there are many empty buckets.  In
1061     // practice, this seems fine.
1062     if (PROTOBUF_PREDICT_FALSE(new_size > hi_cutoff)) {
1063       if (num_buckets_ <= max_size() / 2) {
1064         Resize(num_buckets_ * 2);
1065         return true;
1066       }
1067     } else if (PROTOBUF_PREDICT_FALSE(new_size <= lo_cutoff &&
1068                                       num_buckets_ > kMinTableSize)) {
1069       size_type lg2_of_size_reduction_factor = 1;
1070       // It's possible we want to shrink a lot here... size() could even be 0.
1071       // So, estimate how much to shrink by making sure we don't shrink so
1072       // much that we would need to grow the table after a few inserts.
1073       const size_type hypothetical_size = new_size * 5 / 4 + 1;
1074       while ((hypothetical_size << lg2_of_size_reduction_factor) < hi_cutoff) {
1075         ++lg2_of_size_reduction_factor;
1076       }
1077       size_type new_num_buckets = std::max<size_type>(
1078           kMinTableSize, num_buckets_ >> lg2_of_size_reduction_factor);
1079       if (new_num_buckets != num_buckets_) {
1080         Resize(new_num_buckets);
1081         return true;
1082       }
1083     }
1084     return false;
1085   }
1086 
1087   // Resize to the given number of buckets.
1088   void Resize(map_index_t new_num_buckets) {
1089     if (num_buckets_ == kGlobalEmptyTableSize) {
1090       // This is the global empty array.
1091       // Just overwrite with a new one. No need to transfer or free anything.
1092       num_buckets_ = index_of_first_non_null_ = kMinTableSize;
1093       table_ = CreateEmptyTable(num_buckets_);
1094       seed_ = Seed();
1095       return;
1096     }
1097 
1098     ABSL_DCHECK_GE(new_num_buckets, kMinTableSize);
1099     const auto old_table = table_;
1100     const map_index_t old_table_size = num_buckets_;
1101     num_buckets_ = new_num_buckets;
1102     table_ = CreateEmptyTable(num_buckets_);
1103     const map_index_t start = index_of_first_non_null_;
1104     index_of_first_non_null_ = num_buckets_;
1105     for (map_index_t i = start; i < old_table_size; ++i) {
1106       if (internal::TableEntryIsNonEmptyList(old_table[i])) {
1107         TransferList(static_cast<KeyNode*>(TableEntryToNode(old_table[i])));
1108       } else if (internal::TableEntryIsTree(old_table[i])) {
1109         this->TransferTree(TableEntryToTree(old_table[i]), NodeToVariantKey);
1110       }
1111     }
1112     DeleteTable(old_table, old_table_size);
1113   }
1114 
1115   // Transfer all nodes in the list `node` into `this`.
1116   void TransferList(KeyNode* node) {
1117     do {
1118       auto* next = static_cast<KeyNode*>(node->next);
1119       InsertUnique(BucketNumber(node->key()), node);
1120       node = next;
1121     } while (node != nullptr);
1122   }
1123 
1124   map_index_t BucketNumber(typename TS::ViewType k) const {
1125     ABSL_DCHECK_EQ(
1126         VariantBucketNumber(RealKeyToVariantKeyAlternative<Key>{}(k)),
1127         VariantBucketNumber(RealKeyToVariantKey<Key>{}(k)));
1128     return VariantBucketNumber(RealKeyToVariantKeyAlternative<Key>{}(k));
1129   }
1130 
1131   // Assumes node_ and m_ are correct and non-null, but other fields may be
1132   // stale.  Fix them as needed.  Then return true iff node_ points to a
1133   // Node in a list.  If false is returned then *it is modified to be
1134   // a valid iterator for node_.
1135   bool revalidate_if_necessary(map_index_t& bucket_index, KeyNode* node,
1136                                TreeIterator* it) const {
1137     // Force bucket_index to be in range.
1138     bucket_index &= (num_buckets_ - 1);
1139     // Common case: the bucket we think is relevant points to `node`.
1140     if (table_[bucket_index] == NodeToTableEntry(node)) return true;
1141     // Less common: the bucket is a linked list with node_ somewhere in it,
1142     // but not at the head.
1143     if (TableEntryIsNonEmptyList(bucket_index)) {
1144       auto* l = TableEntryToNode(table_[bucket_index]);
1145       while ((l = l->next) != nullptr) {
1146         if (l == node) {
1147           return true;
1148         }
1149       }
1150     }
1151     // Well, bucket_index_ still might be correct, but probably
1152     // not.  Revalidate just to be sure.  This case is rare enough that we
1153     // don't worry about potential optimizations, such as having a custom
1154     // find-like method that compares Node* instead of the key.
1155     auto res = FindHelper(node->key(), it);
1156     bucket_index = res.bucket;
1157     return TableEntryIsList(bucket_index);
1158   }
1159 };
1160 
1161 template <typename T, typename K>
1162 bool InitializeMapKey(T*, K&&, Arena*) {
1163   return false;
1164 }
1165 
1166 
1167 // The purpose of this class is to give the Rust implementation visibility into
1168 // some of the internals of C++ proto maps. We need access to these internals
1169 // to be able to implement Rust map operations without duplicating the same
1170 // functionality for every message type.
1171 class RustMapHelper {
1172  public:
1173   using NodeAndBucket = UntypedMapBase::NodeAndBucket;
1174   using ClearInput = UntypedMapBase::ClearInput;
1175 
1176   template <typename Key, typename Value>
1177   static constexpr MapNodeSizeInfoT SizeInfo() {
1178     return Map<Key, Value>::Node::size_info();
1179   }
1180 
1181   enum {
1182     kKeyIsString = UntypedMapBase::kKeyIsString,
1183     kValueIsProto = UntypedMapBase::kValueIsProto,
1184   };
1185 
1186   static NodeBase* AllocNode(UntypedMapBase* m, MapNodeSizeInfoT size_info) {
1187     return m->AllocNode(size_info);
1188   }
1189 
1190   static void DeallocNode(UntypedMapBase* m, NodeBase* node,
1191                           MapNodeSizeInfoT size_info) {
1192     return m->DeallocNode(node, size_info);
1193   }
1194 
1195   template <typename Map, typename Key>
1196   static NodeAndBucket FindHelper(Map* m, Key key) {
1197     return m->FindHelper(key);
1198   }
1199 
1200   template <typename Map>
1201   static typename Map::KeyNode* InsertOrReplaceNode(Map* m, NodeBase* node) {
1202     return m->InsertOrReplaceNode(static_cast<typename Map::KeyNode*>(node));
1203   }
1204 
1205   template <typename Map>
1206   static void EraseNoDestroy(Map* m, map_index_t bucket, NodeBase* node) {
1207     m->erase_no_destroy(bucket, static_cast<typename Map::KeyNode*>(node));
1208   }
1209 
1210   static google::protobuf::MessageLite* PlacementNew(const MessageLite* prototype,
1211                                            void* mem) {
1212     return prototype->GetClassData()->PlacementNew(mem, /* arena = */ nullptr);
1213   }
1214 
1215   static void DestroyMessage(MessageLite* m) { m->DestroyInstance(); }
1216 
1217   static void ClearTable(UntypedMapBase* m, ClearInput input) {
1218     m->ClearTable(input);
1219   }
1220 
1221   static bool IsGlobalEmptyTable(const UntypedMapBase* m) {
1222     return m->num_buckets_ == kGlobalEmptyTableSize;
1223   }
1224 };
1225 
1226 }  // namespace internal
1227 
1228 // This is the class for Map's internal value_type.
1229 template <typename Key, typename T>
1230 using MapPair = std::pair<const Key, T>;
1231 
1232 // Map is an associative container type used to store protobuf map
1233 // fields.  Each Map instance may or may not use a different hash function, a
1234 // different iteration order, and so on.  E.g., please don't examine
1235 // implementation details to decide if the following would work:
1236 //  Map<int, int> m0, m1;
1237 //  m0[0] = m1[0] = m0[1] = m1[1] = 0;
1238 //  assert(m0.begin()->first == m1.begin()->first);  // Bug!
1239 //
1240 // Map's interface is similar to std::unordered_map, except that Map is not
1241 // designed to play well with exceptions.
1242 template <typename Key, typename T>
1243 class Map : private internal::KeyMapBase<internal::KeyForBase<Key>> {
1244   using Base = typename Map::KeyMapBase;
1245 
1246   using TS = internal::TransparentSupport<Key>;
1247 
1248  public:
1249   using key_type = Key;
1250   using mapped_type = T;
1251   using init_type = std::pair<Key, T>;
1252   using value_type = MapPair<Key, T>;
1253 
1254   using pointer = value_type*;
1255   using const_pointer = const value_type*;
1256   using reference = value_type&;
1257   using const_reference = const value_type&;
1258 
1259   using size_type = size_t;
1260   using hasher = typename TS::hash;
1261 
1262   constexpr Map() : Base(nullptr) { StaticValidityCheck(); }
1263   Map(const Map& other) : Map(nullptr, other) {}
1264 
1265   // Internal Arena constructors: do not use!
1266   // TODO: remove non internal ctors
1267   explicit Map(Arena* arena) : Base(arena) { StaticValidityCheck(); }
1268   Map(internal::InternalVisibility, Arena* arena) : Map(arena) {}
1269   Map(internal::InternalVisibility, Arena* arena, const Map& other)
1270       : Map(arena, other) {}
1271 
1272   Map(Map&& other) noexcept : Map() {
1273     if (other.arena() != nullptr) {
1274       *this = other;
1275     } else {
1276       swap(other);
1277     }
1278   }
1279 
1280   Map& operator=(Map&& other) noexcept ABSL_ATTRIBUTE_LIFETIME_BOUND {
1281     if (this != &other) {
1282       if (arena() != other.arena()) {
1283         *this = other;
1284       } else {
1285         swap(other);
1286       }
1287     }
1288     return *this;
1289   }
1290 
1291   template <class InputIt>
1292   Map(const InputIt& first, const InputIt& last) : Map() {
1293     insert(first, last);
1294   }
1295 
1296   ~Map() {
1297     // Fail-safe in case we miss calling this in a constructor.  Note: this one
1298     // won't trigger for leaked maps that never get destructed.
1299     StaticValidityCheck();
1300 
1301     if (this->num_buckets_ != internal::kGlobalEmptyTableSize) {
1302       this->ClearTable(this->template MakeClearInput<Node>(false));
1303     }
1304   }
1305 
1306  private:
1307   Map(Arena* arena, const Map& other) : Base(arena) {
1308     StaticValidityCheck();
1309     insert(other.begin(), other.end());
1310   }
1311   static_assert(!std::is_const<mapped_type>::value &&
1312                     !std::is_const<key_type>::value,
1313                 "We do not support const types.");
1314   static_assert(!std::is_volatile<mapped_type>::value &&
1315                     !std::is_volatile<key_type>::value,
1316                 "We do not support volatile types.");
1317   static_assert(!std::is_pointer<mapped_type>::value &&
1318                     !std::is_pointer<key_type>::value,
1319                 "We do not support pointer types.");
1320   static_assert(!std::is_reference<mapped_type>::value &&
1321                     !std::is_reference<key_type>::value,
1322                 "We do not support reference types.");
1323   static constexpr PROTOBUF_ALWAYS_INLINE void StaticValidityCheck() {
1324     static_assert(alignof(internal::NodeBase) >= alignof(mapped_type),
1325                   "Alignment of mapped type is too high.");
1326     static_assert(
1327         absl::disjunction<internal::is_supported_integral_type<key_type>,
1328                           internal::is_supported_string_type<key_type>,
1329                           internal::is_internal_map_key_type<key_type>>::value,
1330         "We only support integer, string, or designated internal key "
1331         "types.");
1332     static_assert(absl::disjunction<
1333                       internal::is_supported_scalar_type<mapped_type>,
1334                       is_proto_enum<mapped_type>,
1335                       internal::is_supported_message_type<mapped_type>,
1336                       internal::is_internal_map_value_type<mapped_type>>::value,
1337                   "We only support scalar, Message, and designated internal "
1338                   "mapped types.");
1339     // The Rust implementation that wraps C++ protos relies on the ability to
1340     // create an UntypedMapBase and cast a pointer of it to google::protobuf::Map*.
1341     static_assert(
1342         sizeof(Map) == sizeof(internal::UntypedMapBase),
1343         "Map must not have any data members beyond what is in UntypedMapBase.");
1344   }
1345 
1346   template <typename P>
1347   struct SameAsElementReference
1348       : std::is_same<typename std::remove_cv<
1349                          typename std::remove_reference<reference>::type>::type,
1350                      typename std::remove_cv<
1351                          typename std::remove_reference<P>::type>::type> {};
1352 
1353   template <class P>
1354   using RequiresInsertable =
1355       typename std::enable_if<std::is_convertible<P, init_type>::value ||
1356                                   SameAsElementReference<P>::value,
1357                               int>::type;
1358   template <class P>
1359   using RequiresNotInit =
1360       typename std::enable_if<!std::is_same<P, init_type>::value, int>::type;
1361 
1362   template <typename LookupKey>
1363   using key_arg = typename TS::template key_arg<LookupKey>;
1364 
1365  public:
1366   // Iterators
1367   class const_iterator : private internal::UntypedMapIterator {
1368     using BaseIt = internal::UntypedMapIterator;
1369 
1370    public:
1371     using iterator_category = std::forward_iterator_tag;
1372     using value_type = typename Map::value_type;
1373     using difference_type = ptrdiff_t;
1374     using pointer = const value_type*;
1375     using reference = const value_type&;
1376 
1377     const_iterator() : BaseIt{nullptr, nullptr, 0} {}
1378     const_iterator(const const_iterator&) = default;
1379     const_iterator& operator=(const const_iterator&) = default;
1380     explicit const_iterator(BaseIt it) : BaseIt(it) {}
1381 
1382     reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1383     pointer operator->() const { return &(operator*()); }
1384 
1385     const_iterator& operator++() {
1386       this->PlusPlus();
1387       return *this;
1388     }
1389     const_iterator operator++(int) {
1390       auto copy = *this;
1391       this->PlusPlus();
1392       return copy;
1393     }
1394 
1395     friend bool operator==(const const_iterator& a, const const_iterator& b) {
1396       return a.Equals(b);
1397     }
1398     friend bool operator!=(const const_iterator& a, const const_iterator& b) {
1399       return !a.Equals(b);
1400     }
1401 
1402    private:
1403     using BaseIt::BaseIt;
1404     friend class Map;
1405     friend class internal::UntypedMapIterator;
1406     friend class internal::TypeDefinedMapFieldBase<Key, T>;
1407   };
1408 
1409   class iterator : private internal::UntypedMapIterator {
1410     using BaseIt = internal::UntypedMapIterator;
1411 
1412    public:
1413     using iterator_category = std::forward_iterator_tag;
1414     using value_type = typename Map::value_type;
1415     using difference_type = ptrdiff_t;
1416     using pointer = value_type*;
1417     using reference = value_type&;
1418 
1419     iterator() : BaseIt{nullptr, nullptr, 0} {}
1420     iterator(const iterator&) = default;
1421     iterator& operator=(const iterator&) = default;
1422     explicit iterator(BaseIt it) : BaseIt(it) {}
1423 
1424     reference operator*() const { return static_cast<Node*>(this->node_)->kv; }
1425     pointer operator->() const { return &(operator*()); }
1426 
1427     iterator& operator++() {
1428       this->PlusPlus();
1429       return *this;
1430     }
1431     iterator operator++(int) {
1432       auto copy = *this;
1433       this->PlusPlus();
1434       return copy;
1435     }
1436 
1437     // Allow implicit conversion to const_iterator.
1438     operator const_iterator() const {  // NOLINT(runtime/explicit)
1439       return const_iterator(static_cast<const BaseIt&>(*this));
1440     }
1441 
1442     friend bool operator==(const iterator& a, const iterator& b) {
1443       return a.Equals(b);
1444     }
1445     friend bool operator!=(const iterator& a, const iterator& b) {
1446       return !a.Equals(b);
1447     }
1448 
1449    private:
1450     using BaseIt::BaseIt;
1451     friend class Map;
1452   };
1453 
1454   iterator begin() ABSL_ATTRIBUTE_LIFETIME_BOUND {
1455     return iterator(Base::begin());
1456   }
1457   iterator end() ABSL_ATTRIBUTE_LIFETIME_BOUND { return iterator(); }
1458   const_iterator begin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1459     return const_iterator(Base::begin());
1460   }
1461   const_iterator end() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1462     return const_iterator();
1463   }
1464   const_iterator cbegin() const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1465     return begin();
1466   }
1467   const_iterator cend() const ABSL_ATTRIBUTE_LIFETIME_BOUND { return end(); }
1468 
1469   using Base::empty;
1470   using Base::size;
1471 
1472   // Element access
1473   template <typename K = key_type>
1474   T& operator[](const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1475     return try_emplace(key).first->second;
1476   }
1477   template <
1478       typename K = key_type,
1479       // Disable for integral types to reduce code bloat.
1480       typename = typename std::enable_if<!std::is_integral<K>::value>::type>
1481   T& operator[](key_arg<K>&& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1482     return try_emplace(std::forward<K>(key)).first->second;
1483   }
1484 
1485   template <typename K = key_type>
1486   const T& at(const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1487     const_iterator it = find(key);
1488     ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1489     return it->second;
1490   }
1491 
1492   template <typename K = key_type>
1493   T& at(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1494     iterator it = find(key);
1495     ABSL_CHECK(it != end()) << "key not found: " << static_cast<Key>(key);
1496     return it->second;
1497   }
1498 
1499   // Lookup
1500   template <typename K = key_type>
1501   size_type count(const key_arg<K>& key) const {
1502     return find(key) == end() ? 0 : 1;
1503   }
1504 
1505   template <typename K = key_type>
1506   const_iterator find(const key_arg<K>& key) const
1507       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1508     return const_cast<Map*>(this)->find(key);
1509   }
1510   template <typename K = key_type>
1511   iterator find(const key_arg<K>& key) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1512     auto res = this->FindHelper(TS::ToView(key));
1513     return iterator(internal::UntypedMapIterator{static_cast<Node*>(res.node),
1514                                                  this, res.bucket});
1515   }
1516 
1517   template <typename K = key_type>
1518   bool contains(const key_arg<K>& key) const {
1519     return find(key) != end();
1520   }
1521 
1522   template <typename K = key_type>
1523   std::pair<const_iterator, const_iterator> equal_range(
1524       const key_arg<K>& key) const ABSL_ATTRIBUTE_LIFETIME_BOUND {
1525     const_iterator it = find(key);
1526     if (it == end()) {
1527       return std::pair<const_iterator, const_iterator>(it, it);
1528     } else {
1529       const_iterator begin = it++;
1530       return std::pair<const_iterator, const_iterator>(begin, it);
1531     }
1532   }
1533 
1534   template <typename K = key_type>
1535   std::pair<iterator, iterator> equal_range(const key_arg<K>& key)
1536       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1537     iterator it = find(key);
1538     if (it == end()) {
1539       return std::pair<iterator, iterator>(it, it);
1540     } else {
1541       iterator begin = it++;
1542       return std::pair<iterator, iterator>(begin, it);
1543     }
1544   }
1545 
1546   // insert
1547   template <typename K, typename... Args>
1548   std::pair<iterator, bool> try_emplace(K&& k, Args&&... args)
1549       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1550     // Inserts a new element into the container if there is no element with the
1551     // key in the container.
1552     // The new element is:
1553     //  (1) Constructed in-place with the given args, if mapped_type is not
1554     //      arena constructible.
1555     //  (2) Constructed in-place with the arena and then assigned with a
1556     //      mapped_type temporary constructed with the given args, otherwise.
1557     return ArenaAwareTryEmplace(Arena::is_arena_constructable<mapped_type>(),
1558                                 std::forward<K>(k),
1559                                 std::forward<Args>(args)...);
1560   }
1561   std::pair<iterator, bool> insert(init_type&& value)
1562       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1563     return try_emplace(std::move(value.first), std::move(value.second));
1564   }
1565   template <typename P, RequiresInsertable<P> = 0>
1566   std::pair<iterator, bool> insert(P&& value) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1567     return try_emplace(std::forward<P>(value).first,
1568                        std::forward<P>(value).second);
1569   }
1570   template <typename... Args>
1571   std::pair<iterator, bool> emplace(Args&&... args)
1572       ABSL_ATTRIBUTE_LIFETIME_BOUND {
1573     return EmplaceInternal(Rank0{}, std::forward<Args>(args)...);
1574   }
1575   template <class InputIt>
1576   void insert(InputIt first, InputIt last) {
1577     for (; first != last; ++first) {
1578       auto&& pair = *first;
1579       try_emplace(pair.first, pair.second);
1580     }
1581   }
1582   void insert(std::initializer_list<init_type> values) {
1583     insert(values.begin(), values.end());
1584   }
1585   template <typename P, RequiresNotInit<P> = 0,
1586             RequiresInsertable<const P&> = 0>
1587   void insert(std::initializer_list<P> values) {
1588     insert(values.begin(), values.end());
1589   }
1590 
1591   // Erase and clear
1592   template <typename K = key_type>
1593   size_type erase(const key_arg<K>& key) {
1594     iterator it = find(key);
1595     if (it == end()) {
1596       return 0;
1597     } else {
1598       erase(it);
1599       return 1;
1600     }
1601   }
1602 
1603   iterator erase(iterator pos) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1604     auto next = std::next(pos);
1605     ABSL_DCHECK_EQ(pos.m_, static_cast<Base*>(this));
1606     auto* node = static_cast<Node*>(pos.node_);
1607     this->erase_no_destroy(pos.bucket_index_, node);
1608     DestroyNode(node);
1609     return next;
1610   }
1611 
1612   void erase(iterator first, iterator last) {
1613     while (first != last) {
1614       first = erase(first);
1615     }
1616   }
1617 
1618   void clear() {
1619     if (this->num_buckets_ == internal::kGlobalEmptyTableSize) return;
1620     this->ClearTable(this->template MakeClearInput<Node>(true));
1621   }
1622 
1623   // Assign
1624   Map& operator=(const Map& other) ABSL_ATTRIBUTE_LIFETIME_BOUND {
1625     if (this != &other) {
1626       clear();
1627       insert(other.begin(), other.end());
1628     }
1629     return *this;
1630   }
1631 
1632   void swap(Map& other) {
1633     if (arena() == other.arena()) {
1634       InternalSwap(&other);
1635     } else {
1636       // TODO: optimize this. The temporary copy can be allocated
1637       // in the same arena as the other message, and the "other = copy" can
1638       // be replaced with the fast-path swap above.
1639       Map copy = *this;
1640       *this = other;
1641       other = copy;
1642     }
1643   }
1644 
1645   void InternalSwap(Map* other) {
1646     internal::UntypedMapBase::InternalSwap(other);
1647   }
1648 
1649   hasher hash_function() const { return {}; }
1650 
1651   size_t SpaceUsedExcludingSelfLong() const {
1652     if (empty()) return 0;
1653     return SpaceUsedInternal() + internal::SpaceUsedInValues(this);
1654   }
1655 
1656   static constexpr size_t InternalGetArenaOffset(internal::InternalVisibility) {
1657     return PROTOBUF_FIELD_OFFSET(Map, alloc_);
1658   }
1659 
1660  private:
1661   struct Rank1 {};
1662   struct Rank0 : Rank1 {};
1663 
1664   // Linked-list nodes, as one would expect for a chaining hash table.
1665   struct Node : Base::KeyNode {
1666     using key_type = Key;
1667     using mapped_type = T;
1668     static constexpr internal::MapNodeSizeInfoT size_info() {
1669       return internal::MakeNodeInfo(sizeof(Node),
1670                                     PROTOBUF_FIELD_OFFSET(Node, kv.second));
1671     }
1672     value_type kv;
1673   };
1674 
1675   using Tree = internal::TreeForMap;
1676   using TreeIterator = typename Tree::iterator;
1677   using TableEntryPtr = internal::TableEntryPtr;
1678 
1679   static Node* NodeFromTreeIterator(TreeIterator it) {
1680     static_assert(
1681         PROTOBUF_FIELD_OFFSET(Node, kv.first) == Base::KeyNode::kOffset, "");
1682     static_assert(alignof(Node) == alignof(internal::NodeBase), "");
1683     return static_cast<Node*>(it->second);
1684   }
1685 
1686   void DestroyNode(Node* node) {
1687     if (this->alloc_.arena() == nullptr) {
1688       node->kv.first.~key_type();
1689       node->kv.second.~mapped_type();
1690       this->DeallocNode(node, sizeof(Node));
1691     }
1692   }
1693 
1694   size_t SpaceUsedInternal() const {
1695     return this->SpaceUsedInTable(sizeof(Node));
1696   }
1697 
1698   // We try to construct `init_type` from `Args` with a fall back to
1699   // `value_type`. The latter is less desired as it unconditionally makes a copy
1700   // of `value_type::first`.
1701   template <typename... Args>
1702   auto EmplaceInternal(Rank0, Args&&... args) ->
1703       typename std::enable_if<std::is_constructible<init_type, Args...>::value,
1704                               std::pair<iterator, bool>>::type {
1705     return insert(init_type(std::forward<Args>(args)...));
1706   }
1707   template <typename... Args>
1708   std::pair<iterator, bool> EmplaceInternal(Rank1, Args&&... args) {
1709     return insert(value_type(std::forward<Args>(args)...));
1710   }
1711 
1712   template <typename K, typename... Args>
1713   std::pair<iterator, bool> TryEmplaceInternal(K&& k, Args&&... args) {
1714     auto p = this->FindHelper(TS::ToView(k));
1715     internal::map_index_t b = p.bucket;
1716     // Case 1: key was already present.
1717     if (p.node != nullptr)
1718       return std::make_pair(iterator(internal::UntypedMapIterator{
1719                                 static_cast<Node*>(p.node), this, p.bucket}),
1720                             false);
1721     // Case 2: insert.
1722     if (this->ResizeIfLoadIsOutOfRange(this->num_elements_ + 1)) {
1723       b = this->BucketNumber(TS::ToView(k));
1724     }
1725     // If K is not key_type, make the conversion to key_type explicit.
1726     using TypeToInit = typename std::conditional<
1727         std::is_same<typename std::decay<K>::type, key_type>::value, K&&,
1728         key_type>::type;
1729     Node* node = static_cast<Node*>(this->AllocNode(sizeof(Node)));
1730 
1731     // Even when arena is nullptr, CreateInArenaStorage is still used to
1732     // ensure the arena of submessage will be consistent. Otherwise,
1733     // submessage may have its own arena when message-owned arena is enabled.
1734     // Note: This only works if `Key` is not arena constructible.
1735     if (!internal::InitializeMapKey(const_cast<Key*>(&node->kv.first),
1736                                     std::forward<K>(k), this->alloc_.arena())) {
1737       Arena::CreateInArenaStorage(const_cast<Key*>(&node->kv.first),
1738                                   this->alloc_.arena(),
1739                                   static_cast<TypeToInit>(std::forward<K>(k)));
1740     }
1741     // Note: if `T` is arena constructible, `Args` needs to be empty.
1742     Arena::CreateInArenaStorage(&node->kv.second, this->alloc_.arena(),
1743                                 std::forward<Args>(args)...);
1744 
1745     this->InsertUnique(b, node);
1746     ++this->num_elements_;
1747     return std::make_pair(iterator(internal::UntypedMapIterator{node, this, b}),
1748                           true);
1749   }
1750 
1751   // A helper function to perform an assignment of `mapped_type`.
1752   // If the first argument is true, then it is a regular assignment.
1753   // Otherwise, we first create a temporary and then perform an assignment.
1754   template <typename V>
1755   static void AssignMapped(std::true_type, mapped_type& mapped, V&& v) {
1756     mapped = std::forward<V>(v);
1757   }
1758   template <typename... Args>
1759   static void AssignMapped(std::false_type, mapped_type& mapped,
1760                            Args&&... args) {
1761     mapped = mapped_type(std::forward<Args>(args)...);
1762   }
1763 
1764   // Case 1: `mapped_type` is arena constructible. A temporary object is
1765   // created and then (if `Args` are not empty) assigned to a mapped value
1766   // that was created with the arena.
1767   template <typename K>
1768   std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k) {
1769     // case 1.1: "default" constructed (e.g. from arena only).
1770     return TryEmplaceInternal(std::forward<K>(k));
1771   }
1772   template <typename K, typename... Args>
1773   std::pair<iterator, bool> ArenaAwareTryEmplace(std::true_type, K&& k,
1774                                                  Args&&... args) {
1775     // case 1.2: "default" constructed + copy/move assignment
1776     auto p = TryEmplaceInternal(std::forward<K>(k));
1777     if (p.second) {
1778       AssignMapped(std::is_same<void(typename std::decay<Args>::type...),
1779                                 void(mapped_type)>(),
1780                    p.first->second, std::forward<Args>(args)...);
1781     }
1782     return p;
1783   }
1784   // Case 2: `mapped_type` is not arena constructible. Using in-place
1785   // construction.
1786   template <typename... Args>
1787   std::pair<iterator, bool> ArenaAwareTryEmplace(std::false_type,
1788                                                  Args&&... args) {
1789     return TryEmplaceInternal(std::forward<Args>(args)...);
1790   }
1791 
1792   using Base::arena;
1793 
1794   friend class Arena;
1795   template <typename, typename>
1796   friend class internal::TypeDefinedMapFieldBase;
1797   using InternalArenaConstructable_ = void;
1798   using DestructorSkippable_ = void;
1799   template <typename K, typename V>
1800   friend class internal::MapFieldLite;
1801   friend class internal::TcParser;
1802   friend struct internal::MapTestPeer;
1803   friend struct internal::MapBenchmarkPeer;
1804   friend class internal::RustMapHelper;
1805 };
1806 
1807 namespace internal {
1808 template <typename... T>
1809 PROTOBUF_NOINLINE void MapMergeFrom(Map<T...>& dest, const Map<T...>& src) {
1810   for (const auto& elem : src) {
1811     dest[elem.first] = elem.second;
1812   }
1813 }
1814 }  // namespace internal
1815 
1816 }  // namespace protobuf
1817 }  // namespace google
1818 
1819 #include "google/protobuf/port_undef.inc"
1820 
1821 #endif  // GOOGLE_PROTOBUF_MAP_H__
1822