• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 //
4 // Use of this source code is governed by a BSD-style
5 // license that can be found in the LICENSE file or at
6 // https://developers.google.com/open-source/licenses/bsd
7 
8 #include "google/protobuf/generated_enum_util.h"
9 
10 #include <algorithm>
11 #include <cstddef>
12 #include <cstdint>
13 #include <utility>
14 #include <vector>
15 
16 #include "absl/log/absl_check.h"
17 #include "absl/types/optional.h"
18 #include "absl/types/span.h"
19 #include "google/protobuf/generated_message_util.h"
20 
21 // Must be included last.
22 #include "google/protobuf/port_def.inc"
23 
24 namespace google {
25 namespace protobuf {
26 namespace internal {
27 namespace {
28 
EnumCompareByName(const EnumEntry & a,const EnumEntry & b)29 bool EnumCompareByName(const EnumEntry& a, const EnumEntry& b) {
30   return absl::string_view(a.name) < absl::string_view(b.name);
31 }
32 
33 // Gets the numeric value of the EnumEntry at the given index, but returns a
34 // special value for the index -1. This gives a way to use std::lower_bound on a
35 // sorted array of indices while searching for value that we associate with -1.
GetValue(const EnumEntry * enums,int i,int target)36 int GetValue(const EnumEntry* enums, int i, int target) {
37   if (i == -1) {
38     return target;
39   } else {
40     return enums[i].value;
41   }
42 }
43 
44 }  // namespace
45 
LookUpEnumValue(const EnumEntry * enums,size_t size,absl::string_view name,int * value)46 bool LookUpEnumValue(const EnumEntry* enums, size_t size,
47                      absl::string_view name, int* value) {
48   EnumEntry target{name, 0};
49   auto it = std::lower_bound(enums, enums + size, target, EnumCompareByName);
50   if (it != enums + size && it->name == name) {
51     *value = it->value;
52     return true;
53   }
54   return false;
55 }
56 
LookUpEnumName(const EnumEntry * enums,const int * sorted_indices,size_t size,int value)57 int LookUpEnumName(const EnumEntry* enums, const int* sorted_indices,
58                    size_t size, int value) {
59   auto comparator = [enums, value](int a, int b) {
60     return GetValue(enums, a, value) < GetValue(enums, b, value);
61   };
62   auto it =
63       std::lower_bound(sorted_indices, sorted_indices + size, -1, comparator);
64   if (it != sorted_indices + size && enums[*it].value == value) {
65     return it - sorted_indices;
66   }
67   return -1;
68 }
69 
InitializeEnumStrings(const EnumEntry * enums,const int * sorted_indices,size_t size,internal::ExplicitlyConstructed<std::string> * enum_strings)70 bool InitializeEnumStrings(
71     const EnumEntry* enums, const int* sorted_indices, size_t size,
72     internal::ExplicitlyConstructed<std::string>* enum_strings) {
73   for (size_t i = 0; i < size; ++i) {
74     enum_strings[i].Construct(enums[sorted_indices[i]].name);
75     internal::OnShutdownDestroyString(enum_strings[i].get_mutable());
76   }
77   return true;
78 }
79 
ValidateEnum(int value,const uint32_t * data)80 bool ValidateEnum(int value, const uint32_t* data) {
81   return ValidateEnumInlined(value, data);
82 }
83 
84 struct EytzingerLayoutSorter {
85   absl::Span<const int32_t> input;
86   absl::Span<uint32_t> output;
87   size_t i;
88 
89   // This is recursive, but the maximum depth is log(N), so it should be safe.
Sortgoogle::protobuf::internal::EytzingerLayoutSorter90   void Sort(size_t output_index = 0) {
91     if (output_index < input.size()) {
92       Sort(2 * output_index + 1);
93       output[output_index] = input[i++];
94       Sort(2 * output_index + 2);
95     }
96   }
97 };
98 
GenerateEnumData(absl::Span<const int32_t> values)99 std::vector<uint32_t> GenerateEnumData(absl::Span<const int32_t> values) {
100   const auto sorted_and_unique = [&] {
101     for (size_t i = 0; i + 1 < values.size(); ++i) {
102       if (values[i] >= values[i + 1]) return false;
103     }
104     return true;
105   };
106   ABSL_DCHECK(sorted_and_unique());
107   std::vector<int32_t> fallback_values_too_large, fallback_values_after_bitmap;
108   std::vector<uint32_t> bitmap_values;
109   constexpr size_t kBitmapBlockSize = 32;
110   absl::optional<int16_t> start_sequence;
111   uint32_t sequence_length = 0;
112   for (int32_t v : values) {
113     // If we don't yet have a sequence, start it.
114     if (!start_sequence.has_value()) {
115       // But only if we can fit it in the sequence.
116       if (static_cast<int16_t>(v) != v) {
117         fallback_values_too_large.push_back(v);
118         continue;
119       }
120 
121       start_sequence = v;
122       sequence_length = 1;
123       continue;
124     }
125     // If we can extend the sequence, do so.
126     if (v == static_cast<int32_t>(*start_sequence) +
127                  static_cast<int32_t>(sequence_length) &&
128         sequence_length < 0xFFFF) {
129       ++sequence_length;
130       continue;
131     }
132 
133     // We adjust the bitmap values to be relative to the end of the sequence.
134     const auto adjust = [&](int32_t v) -> uint32_t {
135       // Cast to int64_t first to avoid overflow. The result is guaranteed to be
136       // positive and fit in uint32_t.
137       int64_t a = static_cast<int64_t>(v) - *start_sequence - sequence_length;
138       ABSL_DCHECK(a >= 0);
139       ABSL_DCHECK_EQ(a, static_cast<uint32_t>(a));
140       return a;
141     };
142     const uint32_t adjusted = adjust(v);
143 
144     const auto add_bit = [&](uint32_t bit) {
145       bitmap_values[bit / kBitmapBlockSize] |= uint32_t{1}
146                                                << (bit % kBitmapBlockSize);
147     };
148 
149     // If we can fit it on the already allocated bitmap, do so.
150     if (adjusted < kBitmapBlockSize * bitmap_values.size()) {
151       // We can fit it in the existing bitmap.
152       ABSL_DCHECK_EQ(fallback_values_after_bitmap.size(), 0);
153       add_bit(adjusted);
154       continue;
155     }
156 
157     // We can't fit in the sequence and we can't fit in the current bitmap.
158     // Evaluate if it is better to add to fallback, or to collapse all the
159     // fallback values after the bitmap into the bitmap.
160     const size_t cost_if_fallback =
161         bitmap_values.size() + (1 + fallback_values_after_bitmap.size());
162     const size_t rounded_bitmap_size =
163         (adjusted + kBitmapBlockSize) / kBitmapBlockSize;
164     const size_t cost_if_collapse = rounded_bitmap_size;
165 
166     if (cost_if_collapse <= cost_if_fallback &&
167         kBitmapBlockSize * rounded_bitmap_size < 0x10000) {
168       // Collapse the existing values, and add the new one.
169       ABSL_DCHECK_GT(rounded_bitmap_size, bitmap_values.size());
170       bitmap_values.resize(rounded_bitmap_size);
171       for (int32_t to_collapse : fallback_values_after_bitmap) {
172         add_bit(adjust(to_collapse));
173       }
174       fallback_values_after_bitmap.clear();
175       add_bit(adjusted);
176     } else {
177       fallback_values_after_bitmap.push_back(v);
178     }
179   }
180 
181   std::vector<int32_t> fallback_values;
182   if (fallback_values_after_bitmap.empty()) {
183     fallback_values = std::move(fallback_values_too_large);
184   } else if (fallback_values_too_large.empty()) {
185     fallback_values = std::move(fallback_values_after_bitmap);
186   } else {
187     fallback_values.resize(fallback_values_too_large.size() +
188                            fallback_values_after_bitmap.size());
189     std::merge(fallback_values_too_large.begin(),
190                fallback_values_too_large.end(),
191                fallback_values_after_bitmap.begin(),
192                fallback_values_after_bitmap.end(), &fallback_values[0]);
193   }
194 
195   std::vector<uint32_t> output(
196       2 /* seq start + seq len + bitmap len + ordered len */ +
197       bitmap_values.size() + fallback_values.size());
198   uint32_t* p = output.data();
199 
200   ABSL_DCHECK_EQ(sequence_length, static_cast<uint16_t>(sequence_length));
201   *p++ = uint32_t{static_cast<uint16_t>(start_sequence.value_or(0))} |
202          (uint32_t{sequence_length} << 16);
203   ABSL_DCHECK_EQ(
204       kBitmapBlockSize * bitmap_values.size(),
205       static_cast<uint16_t>(kBitmapBlockSize * bitmap_values.size()));
206   ABSL_DCHECK_EQ(fallback_values.size(),
207                  static_cast<uint16_t>(fallback_values.size()));
208   *p++ = static_cast<uint32_t>(kBitmapBlockSize * bitmap_values.size()) |
209          static_cast<uint32_t>(fallback_values.size() << 16);
210   p = std::copy(bitmap_values.begin(), bitmap_values.end(), p);
211 
212   EytzingerLayoutSorter{fallback_values,
213                         absl::MakeSpan(p, fallback_values.size())}
214       .Sort();
215 
216   return output;
217 }
218 
219 }  // namespace internal
220 }  // namespace protobuf
221 }  // namespace google
222 
223 #include "google/protobuf/port_undef.inc"
224