• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc.  All rights reserved.
3 //
4 // Use of this source code is governed by a BSD-style
5 // license that can be found in the LICENSE file or at
6 // https://developers.google.com/open-source/licenses/bsd
7 
8 #ifndef GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
9 #define GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
10 
11 #include <cstdint>
12 #include <cstring>
13 #include <string>
14 #include <type_traits>
15 #include <utility>
16 
17 #include "absl/base/config.h"
18 #include "absl/log/absl_check.h"
19 #include "absl/log/absl_log.h"
20 #include "absl/strings/cord.h"
21 #include "absl/strings/internal/resize_uninitialized.h"
22 #include "absl/strings/string_view.h"
23 #include "absl/types/optional.h"
24 #include "google/protobuf/arena.h"
25 #include "google/protobuf/arenastring.h"
26 #include "google/protobuf/endian_pb.h"
27 #include "google/protobuf/inlined_string_field.h"
28 #include "google/protobuf/io/coded_stream.h"
29 #include "google/protobuf/io/zero_copy_stream.h"
30 #include "google/protobuf/metadata_lite.h"
31 #include "google/protobuf/port.h"
32 #include "google/protobuf/repeated_field.h"
33 #include "google/protobuf/wire_format_lite.h"
34 
35 
36 // Must be included last.
37 #include "google/protobuf/port_def.inc"
38 
39 
40 namespace google {
41 namespace protobuf {
42 
43 class UnknownFieldSet;
44 class DescriptorPool;
45 class MessageFactory;
46 
47 namespace internal {
48 
49 // Template code below needs to know about the existence of these functions.
50 PROTOBUF_EXPORT void WriteVarint(uint32_t num, uint64_t val, std::string* s);
51 PROTOBUF_EXPORT void WriteLengthDelimited(uint32_t num, absl::string_view val,
52                                           std::string* s);
53 // Inline because it is just forwarding to s->WriteVarint
54 inline void WriteVarint(uint32_t num, uint64_t val, UnknownFieldSet* s);
55 inline void WriteLengthDelimited(uint32_t num, absl::string_view val,
56                                  UnknownFieldSet* s);
57 
58 
59 // The basic abstraction the parser is designed for is a slight modification
60 // of the ZeroCopyInputStream (ZCIS) abstraction. A ZCIS presents a serialized
61 // stream as a series of buffers that concatenate to the full stream.
62 // Pictorially a ZCIS presents a stream in chunks like so
63 // [---------------------------------------------------------------]
64 // [---------------------] chunk 1
65 //                      [----------------------------] chunk 2
66 //                                          chunk 3 [--------------]
67 //
68 // Where the '-' represent the bytes which are vertically lined up with the
69 // bytes of the stream. The proto parser requires its input to be presented
70 // similarly with the extra
71 // property that each chunk has kSlopBytes past its end that overlaps with the
72 // first kSlopBytes of the next chunk, or if there is no next chunk at least its
73 // still valid to read those bytes. Again, pictorially, we now have
74 //
75 // [---------------------------------------------------------------]
76 // [-------------------....] chunk 1
77 //                    [------------------------....] chunk 2
78 //                                    chunk 3 [------------------..**]
79 //                                                      chunk 4 [--****]
80 // Here '-' mean the bytes of the stream or chunk and '.' means bytes past the
81 // chunk that match up with the start of the next chunk. Above each chunk has
82 // 4 '.' after the chunk. In the case these 'overflow' bytes represents bytes
83 // past the stream, indicated by '*' above, their values are unspecified. It is
84 // still legal to read them (ie. should not segfault). Reading past the
85 // end should be detected by the user and indicated as an error.
86 //
87 // The reason for this, admittedly, unconventional invariant is to ruthlessly
88 // optimize the protobuf parser. Having an overlap helps in two important ways.
89 // Firstly it alleviates having to performing bounds checks if a piece of code
90 // is guaranteed to not read more than kSlopBytes. Secondly, and more
91 // importantly, the protobuf wireformat is such that reading a key/value pair is
92 // always less than 16 bytes. This removes the need to change to next buffer in
93 // the middle of reading primitive values. Hence there is no need to store and
94 // load the current position.
95 
96 class PROTOBUF_EXPORT EpsCopyInputStream {
97  public:
98   enum { kMaxCordBytesToCopy = 512 };
EpsCopyInputStream(bool enable_aliasing)99   explicit EpsCopyInputStream(bool enable_aliasing)
100       : aliasing_(enable_aliasing ? kOnPatch : kNoAliasing) {}
101 
BackUp(const char * ptr)102   void BackUp(const char* ptr) {
103     ABSL_DCHECK(ptr <= buffer_end_ + kSlopBytes);
104     int count;
105     if (next_chunk_ == patch_buffer_) {
106       count = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
107     } else {
108       count = size_ + static_cast<int>(buffer_end_ - ptr);
109     }
110     if (count > 0) StreamBackUp(count);
111   }
112 
113   // In sanitizer mode we use memory poisoning to guarantee that:
114   //  - We do not read an uninitialized token.
115   //  - We would like to verify that this token was consumed, but unfortunately
116   //    __asan_address_is_poisoned is allowed to have false negatives.
117   class LimitToken {
118    public:
LimitToken()119     LimitToken() { PROTOBUF_POISON_MEMORY_REGION(&token_, sizeof(token_)); }
120 
LimitToken(int token)121     explicit LimitToken(int token) : token_(token) {
122       PROTOBUF_UNPOISON_MEMORY_REGION(&token_, sizeof(token_));
123     }
124 
125     LimitToken(const LimitToken&) = delete;
126     LimitToken& operator=(const LimitToken&) = delete;
127 
LimitToken(LimitToken && other)128     LimitToken(LimitToken&& other) { *this = std::move(other); }
129 
130     LimitToken& operator=(LimitToken&& other) {
131       PROTOBUF_UNPOISON_MEMORY_REGION(&token_, sizeof(token_));
132       token_ = other.token_;
133       PROTOBUF_POISON_MEMORY_REGION(&other.token_, sizeof(token_));
134       return *this;
135     }
136 
~LimitToken()137     ~LimitToken() { PROTOBUF_UNPOISON_MEMORY_REGION(&token_, sizeof(token_)); }
138 
token()139     int token() && {
140       int t = token_;
141       PROTOBUF_POISON_MEMORY_REGION(&token_, sizeof(token_));
142       return t;
143     }
144 
145    private:
146     int token_;
147   };
148 
149   // If return value is negative it's an error
PushLimit(const char * ptr,int limit)150   PROTOBUF_NODISCARD LimitToken PushLimit(const char* ptr, int limit) {
151     ABSL_DCHECK(limit >= 0 && limit <= INT_MAX - kSlopBytes);
152     // This add is safe due to the invariant above, because
153     // ptr - buffer_end_ <= kSlopBytes.
154     limit += static_cast<int>(ptr - buffer_end_);
155     limit_end_ = buffer_end_ + (std::min)(0, limit);
156     auto old_limit = limit_;
157     limit_ = limit;
158     return LimitToken(old_limit - limit);
159   }
160 
PopLimit(LimitToken delta)161   PROTOBUF_NODISCARD bool PopLimit(LimitToken delta) {
162     // We must update the limit first before the early return. Otherwise, we can
163     // end up with an invalid limit and it can lead to integer overflows.
164     limit_ = limit_ + std::move(delta).token();
165     if (PROTOBUF_PREDICT_FALSE(!EndedAtLimit())) return false;
166     // TODO We could remove this line and hoist the code to
167     // DoneFallback. Study the perf/bin-size effects.
168     limit_end_ = buffer_end_ + (std::min)(0, limit_);
169     return true;
170   }
171 
Skip(const char * ptr,int size)172   PROTOBUF_NODISCARD const char* Skip(const char* ptr, int size) {
173     if (size <= buffer_end_ + kSlopBytes - ptr) {
174       return ptr + size;
175     }
176     return SkipFallback(ptr, size);
177   }
ReadString(const char * ptr,int size,std::string * s)178   PROTOBUF_NODISCARD const char* ReadString(const char* ptr, int size,
179                                             std::string* s) {
180     if (size <= buffer_end_ + kSlopBytes - ptr) {
181       // Fundamentally we just want to do assign to the string.
182       // However micro-benchmarks regress on string reading cases. So we copy
183       // the same logic from the old CodedInputStream ReadString. Note: as of
184       // Apr 2021, this is still a significant win over `assign()`.
185       absl::strings_internal::STLStringResizeUninitialized(s, size);
186       char* z = &(*s)[0];
187       memcpy(z, ptr, size);
188       return ptr + size;
189     }
190     return ReadStringFallback(ptr, size, s);
191   }
AppendString(const char * ptr,int size,std::string * s)192   PROTOBUF_NODISCARD const char* AppendString(const char* ptr, int size,
193                                               std::string* s) {
194     if (size <= buffer_end_ + kSlopBytes - ptr) {
195       s->append(ptr, size);
196       return ptr + size;
197     }
198     return AppendStringFallback(ptr, size, s);
199   }
200   // Implemented in arenastring.cc
201   PROTOBUF_NODISCARD const char* ReadArenaString(const char* ptr,
202                                                  ArenaStringPtr* s,
203                                                  Arena* arena);
204 
ReadCord(const char * ptr,int size,::absl::Cord * cord)205   PROTOBUF_NODISCARD const char* ReadCord(const char* ptr, int size,
206                                           ::absl::Cord* cord) {
207     if (size <= std::min<int>(static_cast<int>(buffer_end_ + kSlopBytes - ptr),
208                               kMaxCordBytesToCopy)) {
209       *cord = absl::string_view(ptr, size);
210       return ptr + size;
211     }
212     return ReadCordFallback(ptr, size, cord);
213   }
214 
215 
216   template <typename Tag, typename T>
217   PROTOBUF_NODISCARD const char* ReadRepeatedFixed(const char* ptr,
218                                                    Tag expected_tag,
219                                                    RepeatedField<T>* out);
220 
221   template <typename T>
222   PROTOBUF_NODISCARD const char* ReadPackedFixed(const char* ptr, int size,
223                                                  RepeatedField<T>* out);
224   template <typename Add>
ReadPackedVarint(const char * ptr,Add add)225   PROTOBUF_NODISCARD const char* ReadPackedVarint(const char* ptr, Add add) {
226     return ReadPackedVarint(ptr, add, [](int) {});
227   }
228   template <typename Add, typename SizeCb>
229   PROTOBUF_NODISCARD const char* ReadPackedVarint(const char* ptr, Add add,
230                                                   SizeCb size_callback);
231 
LastTag()232   uint32_t LastTag() const { return last_tag_minus_1_ + 1; }
ConsumeEndGroup(uint32_t start_tag)233   bool ConsumeEndGroup(uint32_t start_tag) {
234     bool res = last_tag_minus_1_ == start_tag;
235     last_tag_minus_1_ = 0;
236     return res;
237   }
EndedAtLimit()238   bool EndedAtLimit() const { return last_tag_minus_1_ == 0; }
EndedAtEndOfStream()239   bool EndedAtEndOfStream() const { return last_tag_minus_1_ == 1; }
SetLastTag(uint32_t tag)240   void SetLastTag(uint32_t tag) { last_tag_minus_1_ = tag - 1; }
SetEndOfStream()241   void SetEndOfStream() { last_tag_minus_1_ = 1; }
IsExceedingLimit(const char * ptr)242   bool IsExceedingLimit(const char* ptr) {
243     return ptr > limit_end_ &&
244            (next_chunk_ == nullptr || ptr - buffer_end_ > limit_);
245   }
AliasingEnabled()246   bool AliasingEnabled() const { return aliasing_ != kNoAliasing; }
BytesUntilLimit(const char * ptr)247   int BytesUntilLimit(const char* ptr) const {
248     return limit_ + static_cast<int>(buffer_end_ - ptr);
249   }
250   // Maximum number of sequential bytes that can be read starting from `ptr`.
MaximumReadSize(const char * ptr)251   int MaximumReadSize(const char* ptr) const {
252     return static_cast<int>(limit_end_ - ptr) + kSlopBytes;
253   }
254   // Returns true if more data is available, if false is returned one has to
255   // call Done for further checks.
DataAvailable(const char * ptr)256   bool DataAvailable(const char* ptr) { return ptr < limit_end_; }
257 
258  protected:
259   // Returns true if limit (either an explicit limit or end of stream) is
260   // reached. It aligns *ptr across buffer seams.
261   // If limit is exceeded, it returns true and ptr is set to null.
DoneWithCheck(const char ** ptr,int d)262   bool DoneWithCheck(const char** ptr, int d) {
263     ABSL_DCHECK(*ptr);
264     if (PROTOBUF_PREDICT_TRUE(*ptr < limit_end_)) return false;
265     int overrun = static_cast<int>(*ptr - buffer_end_);
266     ABSL_DCHECK_LE(overrun, kSlopBytes);  // Guaranteed by parse loop.
267     if (overrun ==
268         limit_) {  //  No need to flip buffers if we ended on a limit.
269       // If we actually overrun the buffer and next_chunk_ is null, it means
270       // the stream ended and we passed the stream end.
271       if (overrun > 0 && next_chunk_ == nullptr) *ptr = nullptr;
272       return true;
273     }
274     auto res = DoneFallback(overrun, d);
275     *ptr = res.first;
276     return res.second;
277   }
278 
InitFrom(absl::string_view flat)279   const char* InitFrom(absl::string_view flat) {
280     overall_limit_ = 0;
281     if (flat.size() > kSlopBytes) {
282       limit_ = kSlopBytes;
283       limit_end_ = buffer_end_ = flat.data() + flat.size() - kSlopBytes;
284       next_chunk_ = patch_buffer_;
285       if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
286       return flat.data();
287     } else {
288       if (!flat.empty()) {
289         std::memcpy(patch_buffer_, flat.data(), flat.size());
290       }
291       limit_ = 0;
292       limit_end_ = buffer_end_ = patch_buffer_ + flat.size();
293       next_chunk_ = nullptr;
294       if (aliasing_ == kOnPatch) {
295         aliasing_ = reinterpret_cast<std::uintptr_t>(flat.data()) -
296                     reinterpret_cast<std::uintptr_t>(patch_buffer_);
297       }
298       return patch_buffer_;
299     }
300   }
301 
302   const char* InitFrom(io::ZeroCopyInputStream* zcis);
303 
InitFrom(io::ZeroCopyInputStream * zcis,int limit)304   const char* InitFrom(io::ZeroCopyInputStream* zcis, int limit) {
305     if (limit == -1) return InitFrom(zcis);
306     overall_limit_ = limit;
307     auto res = InitFrom(zcis);
308     limit_ = limit - static_cast<int>(buffer_end_ - res);
309     limit_end_ = buffer_end_ + (std::min)(0, limit_);
310     return res;
311   }
312 
313  private:
314   enum { kSlopBytes = 16, kPatchBufferSize = 32 };
315   static_assert(kPatchBufferSize >= kSlopBytes * 2,
316                 "Patch buffer needs to be at least large enough to hold all "
317                 "the slop bytes from the previous buffer, plus the first "
318                 "kSlopBytes from the next buffer.");
319 
320   const char* limit_end_;  // buffer_end_ + min(limit_, 0)
321   const char* buffer_end_;
322   const char* next_chunk_;
323   int size_;
324   int limit_;  // relative to buffer_end_;
325   io::ZeroCopyInputStream* zcis_ = nullptr;
326   char patch_buffer_[kPatchBufferSize] = {};
327   enum { kNoAliasing = 0, kOnPatch = 1, kNoDelta = 2 };
328   std::uintptr_t aliasing_ = kNoAliasing;
329   // This variable is used to communicate how the parse ended, in order to
330   // completely verify the parsed data. A wire-format parse can end because of
331   // one of the following conditions:
332   // 1) A parse can end on a pushed limit.
333   // 2) A parse can end on End Of Stream (EOS).
334   // 3) A parse can end on 0 tag (only valid for toplevel message).
335   // 4) A parse can end on an end-group tag.
336   // This variable should always be set to 0, which indicates case 1. If the
337   // parse terminated due to EOS (case 2), it's set to 1. In case the parse
338   // ended due to a terminating tag (case 3 and 4) it's set to (tag - 1).
339   // This var doesn't really belong in EpsCopyInputStream and should be part of
340   // the ParseContext, but case 2 is most easily and optimally implemented in
341   // DoneFallback.
342   uint32_t last_tag_minus_1_ = 0;
343   int overall_limit_ = INT_MAX;  // Overall limit independent of pushed limits.
344   // Pretty random large number that seems like a safe allocation on most
345   // systems. TODO do we need to set this as build flag?
346   enum { kSafeStringSize = 50000000 };
347 
348   // Advances to next buffer chunk returns a pointer to the same logical place
349   // in the stream as set by overrun. Overrun indicates the position in the slop
350   // region the parse was left (0 <= overrun <= kSlopBytes). Returns true if at
351   // limit, at which point the returned pointer maybe null if there was an
352   // error. The invariant of this function is that it's guaranteed that
353   // kSlopBytes bytes can be accessed from the returned ptr. This function might
354   // advance more buffers than one in the underlying ZeroCopyInputStream.
355   std::pair<const char*, bool> DoneFallback(int overrun, int depth);
356   // Advances to the next buffer, at most one call to Next() on the underlying
357   // ZeroCopyInputStream is made. This function DOES NOT match the returned
358   // pointer to where in the slop region the parse ends, hence no overrun
359   // parameter. This is useful for string operations where you always copy
360   // to the end of the buffer (including the slop region).
361   const char* Next();
362   // overrun is the location in the slop region the stream currently is
363   // (0 <= overrun <= kSlopBytes). To prevent flipping to the next buffer of
364   // the ZeroCopyInputStream in the case the parse will end in the last
365   // kSlopBytes of the current buffer. depth is the current depth of nested
366   // groups (or negative if the use case does not need careful tracking).
367   inline const char* NextBuffer(int overrun, int depth);
368   const char* SkipFallback(const char* ptr, int size);
369   const char* AppendStringFallback(const char* ptr, int size, std::string* str);
370   const char* ReadStringFallback(const char* ptr, int size, std::string* str);
371   const char* ReadCordFallback(const char* ptr, int size, absl::Cord* cord);
372   static bool ParseEndsInSlopRegion(const char* begin, int overrun, int depth);
StreamNext(const void ** data)373   bool StreamNext(const void** data) {
374     bool res = zcis_->Next(data, &size_);
375     if (res) overall_limit_ -= size_;
376     return res;
377   }
StreamBackUp(int count)378   void StreamBackUp(int count) {
379     zcis_->BackUp(count);
380     overall_limit_ += count;
381   }
382 
383   template <typename A>
AppendSize(const char * ptr,int size,const A & append)384   const char* AppendSize(const char* ptr, int size, const A& append) {
385     int chunk_size = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
386     do {
387       ABSL_DCHECK(size > chunk_size);
388       if (next_chunk_ == nullptr) return nullptr;
389       append(ptr, chunk_size);
390       ptr += chunk_size;
391       size -= chunk_size;
392       // TODO Next calls NextBuffer which generates buffers with
393       // overlap and thus incurs cost of copying the slop regions. This is not
394       // necessary for reading strings. We should just call Next buffers.
395       if (limit_ <= kSlopBytes) return nullptr;
396       ptr = Next();
397       if (ptr == nullptr) return nullptr;  // passed the limit
398       ptr += kSlopBytes;
399       chunk_size = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
400     } while (size > chunk_size);
401     append(ptr, size);
402     return ptr + size;
403   }
404 
405   // AppendUntilEnd appends data until a limit (either a PushLimit or end of
406   // stream. Normal payloads are from length delimited fields which have an
407   // explicit size. Reading until limit only comes when the string takes
408   // the place of a protobuf, ie RawMessage, lazy fields and implicit weak
409   // messages. We keep these methods private and friend them.
410   template <typename A>
AppendUntilEnd(const char * ptr,const A & append)411   const char* AppendUntilEnd(const char* ptr, const A& append) {
412     if (ptr - buffer_end_ > limit_) return nullptr;
413     while (limit_ > kSlopBytes) {
414       size_t chunk_size = buffer_end_ + kSlopBytes - ptr;
415       append(ptr, chunk_size);
416       ptr = Next();
417       if (ptr == nullptr) return limit_end_;
418       ptr += kSlopBytes;
419     }
420     auto end = buffer_end_ + limit_;
421     ABSL_DCHECK(end >= ptr);
422     append(ptr, end - ptr);
423     return end;
424   }
425 
AppendString(const char * ptr,std::string * str)426   PROTOBUF_NODISCARD const char* AppendString(const char* ptr,
427                                               std::string* str) {
428     return AppendUntilEnd(
429         ptr, [str](const char* p, ptrdiff_t s) { str->append(p, s); });
430   }
431   friend class ImplicitWeakMessage;
432 
433   // Needs access to kSlopBytes.
434   friend PROTOBUF_EXPORT std::pair<const char*, int32_t> ReadSizeFallback(
435       const char* p, uint32_t res);
436 };
437 
438 using LazyEagerVerifyFnType = const char* (*)(const char* ptr,
439                                               ParseContext* ctx);
440 using LazyEagerVerifyFnRef = std::remove_pointer<LazyEagerVerifyFnType>::type&;
441 
442 // ParseContext holds all data that is global to the entire parse. Most
443 // importantly it contains the input stream, but also recursion depth and also
444 // stores the end group tag, in case a parser ended on a endgroup, to verify
445 // matching start/end group tags.
446 class PROTOBUF_EXPORT ParseContext : public EpsCopyInputStream {
447  public:
448   struct Data {
449     const DescriptorPool* pool = nullptr;
450     MessageFactory* factory = nullptr;
451   };
452 
453   template <typename... T>
ParseContext(int depth,bool aliasing,const char ** start,T &&...args)454   ParseContext(int depth, bool aliasing, const char** start, T&&... args)
455       : EpsCopyInputStream(aliasing), depth_(depth) {
456     *start = InitFrom(std::forward<T>(args)...);
457   }
458 
459   struct Spawn {};
460   static constexpr Spawn kSpawn = {};
461 
462   // Creates a new context from a given "ctx" to inherit a few attributes to
463   // emulate continued parsing. For example, recursion depth or descriptor pools
464   // must be passed down to a new "spawned" context to maintain the same parse
465   // context. Note that the spawned context always disables aliasing (different
466   // input).
467   template <typename... T>
ParseContext(Spawn,const ParseContext & ctx,const char ** start,T &&...args)468   ParseContext(Spawn, const ParseContext& ctx, const char** start, T&&... args)
469       : EpsCopyInputStream(false),
470         depth_(ctx.depth_),
471         data_(ctx.data_)
472   {
473     *start = InitFrom(std::forward<T>(args)...);
474   }
475 
476   // Move constructor and assignment operator are not supported because "ptr"
477   // for parsing may have pointed to an inlined buffer (patch_buffer_) which can
478   // be invalid afterwards.
479   ParseContext(ParseContext&&) = delete;
480   ParseContext& operator=(ParseContext&&) = delete;
481   ParseContext& operator=(const ParseContext&) = delete;
482 
TrackCorrectEnding()483   void TrackCorrectEnding() { group_depth_ = 0; }
484 
485   // Done should only be called when the parsing pointer is pointing to the
486   // beginning of field data - that is, at a tag.  Or if it is NULL.
Done(const char ** ptr)487   bool Done(const char** ptr) { return DoneWithCheck(ptr, group_depth_); }
488 
depth()489   int depth() const { return depth_; }
490 
data()491   Data& data() { return data_; }
data()492   const Data& data() const { return data_; }
493 
494   const char* ParseMessage(MessageLite* msg, const char* ptr);
495 
496   // Read the length prefix, push the new limit, call the func(ptr), and then
497   // pop the limit. Useful for situations that don't have an actual message.
498   template <typename Func>
499   PROTOBUF_NODISCARD const char* ParseLengthDelimitedInlined(const char*,
500                                                              const Func& func);
501 
502   // Push the recursion depth, call the func(ptr), and then pop depth. Useful
503   // for situations that don't have an actual message.
504   template <typename Func>
505   PROTOBUF_NODISCARD const char* ParseGroupInlined(const char* ptr,
506                                                    uint32_t start_tag,
507                                                    const Func& func);
508 
509   // Use a template to avoid the strong dep into TcParser. All callers will have
510   // the dep.
511   template <typename Parser = TcParser>
ParseMessage(MessageLite * msg,const TcParseTableBase * tc_table,const char * ptr)512   PROTOBUF_ALWAYS_INLINE const char* ParseMessage(
513       MessageLite* msg, const TcParseTableBase* tc_table, const char* ptr) {
514     return ParseLengthDelimitedInlined(ptr, [&](const char* ptr) {
515       return Parser::ParseLoop(msg, ptr, this, tc_table);
516     });
517   }
518   template <typename Parser = TcParser>
ParseGroup(MessageLite * msg,const TcParseTableBase * tc_table,const char * ptr,uint32_t start_tag)519   PROTOBUF_ALWAYS_INLINE const char* ParseGroup(
520       MessageLite* msg, const TcParseTableBase* tc_table, const char* ptr,
521       uint32_t start_tag) {
522     return ParseGroupInlined(ptr, start_tag, [&](const char* ptr) {
523       return Parser::ParseLoop(msg, ptr, this, tc_table);
524     });
525   }
526 
ParseGroup(MessageLite * msg,const char * ptr,uint32_t tag)527   PROTOBUF_NODISCARD PROTOBUF_NDEBUG_INLINE const char* ParseGroup(
528       MessageLite* msg, const char* ptr, uint32_t tag) {
529     if (--depth_ < 0) return nullptr;
530     group_depth_++;
531     auto old_depth = depth_;
532     auto old_group_depth = group_depth_;
533     ptr = msg->_InternalParse(ptr, this);
534     if (ptr != nullptr) {
535       ABSL_DCHECK_EQ(old_depth, depth_);
536       ABSL_DCHECK_EQ(old_group_depth, group_depth_);
537     }
538     group_depth_--;
539     depth_++;
540     if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(tag))) return nullptr;
541     return ptr;
542   }
543 
544  private:
545   // Out-of-line routine to save space in ParseContext::ParseMessage<T>
546   //   LimitToken old;
547   //   ptr = ReadSizeAndPushLimitAndDepth(ptr, &old)
548   // is equivalent to:
549   //   int size = ReadSize(&ptr);
550   //   if (!ptr) return nullptr;
551   //   LimitToken old = PushLimit(ptr, size);
552   //   if (--depth_ < 0) return nullptr;
553   PROTOBUF_NODISCARD const char* ReadSizeAndPushLimitAndDepth(
554       const char* ptr, LimitToken* old_limit);
555 
556   // As above, but fully inlined for the cases where we care about performance
557   // more than size. eg TcParser.
558   PROTOBUF_NODISCARD PROTOBUF_ALWAYS_INLINE const char*
559   ReadSizeAndPushLimitAndDepthInlined(const char* ptr, LimitToken* old_limit);
560 
561   // The context keeps an internal stack to keep track of the recursive
562   // part of the parse state.
563   // Current depth of the active parser, depth counts down.
564   // This is used to limit recursion depth (to prevent overflow on malicious
565   // data), but is also used to index in stack_ to store the current state.
566   int depth_;
567   // Unfortunately necessary for the fringe case of ending on 0 or end-group tag
568   // in the last kSlopBytes of a ZeroCopyInputStream chunk.
569   int group_depth_ = INT_MIN;
570   Data data_;
571 };
572 
573 template <int>
574 struct EndianHelper;
575 
576 template <>
577 struct EndianHelper<1> {
578   static uint8_t Load(const void* p) { return *static_cast<const uint8_t*>(p); }
579 };
580 
581 template <>
582 struct EndianHelper<2> {
583   static uint16_t Load(const void* p) {
584     uint16_t tmp;
585     std::memcpy(&tmp, p, 2);
586     return little_endian::ToHost(tmp);
587   }
588 };
589 
590 template <>
591 struct EndianHelper<4> {
592   static uint32_t Load(const void* p) {
593     uint32_t tmp;
594     std::memcpy(&tmp, p, 4);
595     return little_endian::ToHost(tmp);
596   }
597 };
598 
599 template <>
600 struct EndianHelper<8> {
601   static uint64_t Load(const void* p) {
602     uint64_t tmp;
603     std::memcpy(&tmp, p, 8);
604     return little_endian::ToHost(tmp);
605   }
606 };
607 
608 template <typename T>
609 T UnalignedLoad(const char* p) {
610   auto tmp = EndianHelper<sizeof(T)>::Load(p);
611   T res;
612   memcpy(&res, &tmp, sizeof(T));
613   return res;
614 }
615 template <typename T, typename Void,
616           typename = std::enable_if_t<std::is_same<Void, void>::value>>
617 T UnalignedLoad(const Void* p) {
618   return UnalignedLoad<T>(reinterpret_cast<const char*>(p));
619 }
620 
621 PROTOBUF_EXPORT
622 std::pair<const char*, uint32_t> VarintParseSlow32(const char* p, uint32_t res);
623 PROTOBUF_EXPORT
624 std::pair<const char*, uint64_t> VarintParseSlow64(const char* p, uint32_t res);
625 
626 inline const char* VarintParseSlow(const char* p, uint32_t res, uint32_t* out) {
627   auto tmp = VarintParseSlow32(p, res);
628   *out = tmp.second;
629   return tmp.first;
630 }
631 
632 inline const char* VarintParseSlow(const char* p, uint32_t res, uint64_t* out) {
633   auto tmp = VarintParseSlow64(p, res);
634   *out = tmp.second;
635   return tmp.first;
636 }
637 
638 #if defined(__aarch64__) && !defined(_MSC_VER)
639 // Generally, speaking, the ARM-optimized Varint decode algorithm is to extract
640 // and concatenate all potentially valid data bits, compute the actual length
641 // of the Varint, and mask off the data bits which are not actually part of the
642 // result.  More detail on the two main parts is shown below.
643 //
644 // 1) Extract and concatenate all potentially valid data bits.
645 //    Two ARM-specific features help significantly:
646 //    a) Efficient and non-destructive bit extraction (UBFX)
647 //    b) A single instruction can perform both an OR with a shifted
648 //       second operand in one cycle.  E.g., the following two lines do the same
649 //       thing
650 //       ```result = operand_1 | (operand2 << 7);```
651 //       ```ORR %[result], %[operand_1], %[operand_2], LSL #7```
652 //    The figure below shows the implementation for handling four chunks.
653 //
654 // Bits   32    31-24    23   22-16    15    14-8      7     6-0
655 //      +----+---------+----+---------+----+---------+----+---------+
656 //      |CB 3| Chunk 3 |CB 2| Chunk 2 |CB 1| Chunk 1 |CB 0| Chunk 0 |
657 //      +----+---------+----+---------+----+---------+----+---------+
658 //                |              |              |              |
659 //               UBFX           UBFX           UBFX           UBFX    -- cycle 1
660 //                |              |              |              |
661 //                V              V              V              V
662 //               Combined LSL #7 and ORR     Combined LSL #7 and ORR  -- cycle 2
663 //                                 |             |
664 //                                 V             V
665 //                            Combined LSL #14 and ORR                -- cycle 3
666 //                                       |
667 //                                       V
668 //                                Parsed bits 0-27
669 //
670 //
671 // 2) Calculate the index of the cleared continuation bit in order to determine
672 //    where the encoded Varint ends and the size of the decoded value.  The
673 //    easiest way to do this is mask off all data bits, leaving just the
674 //    continuation bits.  We actually need to do the masking on an inverted
675 //    copy of the data, which leaves a 1 in all continuation bits which were
676 //    originally clear.  The number of trailing zeroes in this value indicates
677 //    the size of the Varint.
678 //
679 //  AND  0x80    0x80    0x80    0x80    0x80    0x80    0x80    0x80
680 //
681 // Bits   63      55      47      39      31      23      15       7
682 //      +----+--+----+--+----+--+----+--+----+--+----+--+----+--+----+--+
683 // ~    |CB 7|  |CB 6|  |CB 5|  |CB 4|  |CB 3|  |CB 2|  |CB 1|  |CB 0|  |
684 //      +----+--+----+--+----+--+----+--+----+--+----+--+----+--+----+--+
685 //         |       |       |       |       |       |       |       |
686 //         V       V       V       V       V       V       V       V
687 // Bits   63      55      47      39      31      23      15       7
688 //      +----+--+----+--+----+--+----+--+----+--+----+--+----+--+----+--+
689 //      |~CB 7|0|~CB 6|0|~CB 5|0|~CB 4|0|~CB 3|0|~CB 2|0|~CB 1|0|~CB 0|0|
690 //      +----+--+----+--+----+--+----+--+----+--+----+--+----+--+----+--+
691 //                                      |
692 //                                     CTZ
693 //                                      V
694 //                     Index of first cleared continuation bit
695 //
696 //
697 // While this is implemented in C++ significant care has been taken to ensure
698 // the compiler emits the best instruction sequence.  In some cases we use the
699 // following two functions to manipulate the compiler's scheduling decisions.
700 //
701 // Controls compiler scheduling by telling it that the first value is modified
702 // by the second value the callsite.  This is useful if non-critical path
703 // instructions are too aggressively scheduled, resulting in a slowdown of the
704 // actual critical path due to opportunity costs.  An example usage is shown
705 // where a false dependence of num_bits on result is added to prevent checking
706 // for a very unlikely error until all critical path instructions have been
707 // fetched.
708 //
709 // ```
710 // num_bits = <multiple operations to calculate new num_bits value>
711 // result = <multiple operations to calculate result>
712 // num_bits = ValueBarrier(num_bits, result);
713 // if (num_bits == 63) {
714 //   ABSL_LOG(FATAL) << "Invalid num_bits value";
715 // }
716 // ```
717 // Falsely indicate that the specific value is modified at this location.  This
718 // prevents code which depends on this value from being scheduled earlier.
719 template <typename V1Type>
720 PROTOBUF_ALWAYS_INLINE inline V1Type ValueBarrier(V1Type value1) {
721   asm("" : "+r"(value1));
722   return value1;
723 }
724 
725 template <typename V1Type, typename V2Type>
726 PROTOBUF_ALWAYS_INLINE inline V1Type ValueBarrier(V1Type value1,
727                                                   V2Type value2) {
728   asm("" : "+r"(value1) : "r"(value2));
729   return value1;
730 }
731 
732 // Performs a 7 bit UBFX (Unsigned Bit Extract) starting at the indicated bit.
733 static PROTOBUF_ALWAYS_INLINE inline uint64_t Ubfx7(uint64_t data,
734                                                     uint64_t start) {
735   return ValueBarrier((data >> start) & 0x7f);
736 }
737 
738 PROTOBUF_ALWAYS_INLINE inline uint64_t ExtractAndMergeTwoChunks(
739     uint64_t data, uint64_t first_byte) {
740   ABSL_DCHECK_LE(first_byte, 6U);
741   uint64_t first = Ubfx7(data, first_byte * 8);
742   uint64_t second = Ubfx7(data, (first_byte + 1) * 8);
743   return ValueBarrier(first | (second << 7));
744 }
745 
746 struct SlowPathEncodedInfo {
747   const char* p;
748   uint64_t last8;
749   uint64_t valid_bits;
750   uint64_t valid_chunk_bits;
751   uint64_t masked_cont_bits;
752 };
753 
754 // Performs multiple actions which are identical between 32 and 64 bit Varints
755 // in order to compute the length of the encoded Varint and compute the new
756 // of p.
757 PROTOBUF_ALWAYS_INLINE inline SlowPathEncodedInfo ComputeLengthAndUpdateP(
758     const char* p) {
759   SlowPathEncodedInfo result;
760   // Load the last two bytes of the encoded Varint.
761   std::memcpy(&result.last8, p + 2, sizeof(result.last8));
762   uint64_t mask = ValueBarrier(0x8080808080808080);
763   // Only set continuation bits remain
764   result.masked_cont_bits = ValueBarrier(mask & ~result.last8);
765   // The first cleared continuation bit is the most significant 1 in the
766   // reversed value.  Result is undefined for an input of 0 and we handle that
767   // case below.
768   result.valid_bits = absl::countr_zero(result.masked_cont_bits);
769   // Calculates the number of chunks in the encoded Varint.  This value is low
770   // by three as neither the cleared continuation chunk nor the first two chunks
771   // are counted.
772   uint64_t set_continuation_bits = result.valid_bits >> 3;
773   // Update p to point past the encoded Varint.
774   result.p = p + set_continuation_bits + 3;
775   // Calculate number of valid data bits in the decoded value so invalid bits
776   // can be masked off.  Value is too low by 14 but we account for that when
777   // calculating the mask.
778   result.valid_chunk_bits = result.valid_bits - set_continuation_bits;
779   return result;
780 }
781 
782 inline PROTOBUF_ALWAYS_INLINE std::pair<const char*, uint64_t>
783 VarintParseSlowArm64(const char* p, uint64_t first8) {
784   constexpr uint64_t kResultMaskUnshifted = 0xffffffffffffc000ULL;
785   constexpr uint64_t kFirstResultBitChunk2 = 2 * 7;
786   constexpr uint64_t kFirstResultBitChunk4 = 4 * 7;
787   constexpr uint64_t kFirstResultBitChunk6 = 6 * 7;
788   constexpr uint64_t kFirstResultBitChunk8 = 8 * 7;
789 
790   SlowPathEncodedInfo info = ComputeLengthAndUpdateP(p);
791   // Extract data bits from the low six chunks.  This includes chunks zero and
792   // one which we already know are valid.
793   uint64_t merged_01 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/0);
794   uint64_t merged_23 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/2);
795   uint64_t merged_45 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/4);
796   // Low 42 bits of decoded value.
797   uint64_t result = merged_01 | (merged_23 << kFirstResultBitChunk2) |
798                     (merged_45 << kFirstResultBitChunk4);
799   // This immediate ends in 14 zeroes since valid_chunk_bits is too low by 14.
800   uint64_t result_mask = kResultMaskUnshifted << info.valid_chunk_bits;
801   //  iff the Varint i invalid.
802   if (PROTOBUF_PREDICT_FALSE(info.masked_cont_bits == 0)) {
803     return {nullptr, 0};
804   }
805   // Test for early exit if Varint does not exceed 6 chunks.  Branching on one
806   // bit is faster on ARM than via a compare and branch.
807   if (PROTOBUF_PREDICT_FALSE((info.valid_bits & 0x20) != 0)) {
808     // Extract data bits from high four chunks.
809     uint64_t merged_67 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/6);
810     // Last two chunks come from last two bytes of info.last8.
811     uint64_t merged_89 =
812         ExtractAndMergeTwoChunks(info.last8, /*first_chunk=*/6);
813     result |= merged_67 << kFirstResultBitChunk6;
814     result |= merged_89 << kFirstResultBitChunk8;
815     // Handle an invalid Varint with all 10 continuation bits set.
816   }
817   // Mask off invalid data bytes.
818   result &= ~result_mask;
819   return {info.p, result};
820 }
821 
822 // See comments in VarintParseSlowArm64 for a description of the algorithm.
823 // Differences in the 32 bit version are noted below.
824 inline PROTOBUF_ALWAYS_INLINE std::pair<const char*, uint32_t>
825 VarintParseSlowArm32(const char* p, uint64_t first8) {
826   constexpr uint64_t kResultMaskUnshifted = 0xffffffffffffc000ULL;
827   constexpr uint64_t kFirstResultBitChunk1 = 1 * 7;
828   constexpr uint64_t kFirstResultBitChunk3 = 3 * 7;
829 
830   // This also skips the slop bytes.
831   SlowPathEncodedInfo info = ComputeLengthAndUpdateP(p);
832   // Extract data bits from chunks 1-4.  Chunk zero is merged in below.
833   uint64_t merged_12 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/1);
834   uint64_t merged_34 = ExtractAndMergeTwoChunks(first8, /*first_chunk=*/3);
835   first8 = ValueBarrier(first8, p);
836   uint64_t result = Ubfx7(first8, /*start=*/0);
837   result = ValueBarrier(result | merged_12 << kFirstResultBitChunk1);
838   result = ValueBarrier(result | merged_34 << kFirstResultBitChunk3);
839   uint64_t result_mask = kResultMaskUnshifted << info.valid_chunk_bits;
840   result &= ~result_mask;
841   // It is extremely unlikely that a Varint is invalid so checking that
842   // condition isn't on the critical path. Here we make sure that we don't do so
843   // until result has been computed.
844   info.masked_cont_bits = ValueBarrier(info.masked_cont_bits, result);
845   if (PROTOBUF_PREDICT_FALSE(info.masked_cont_bits == 0)) {
846     return {nullptr, 0};
847   }
848   return {info.p, result};
849 }
850 
851 static const char* VarintParseSlowArm(const char* p, uint32_t* out,
852                                       uint64_t first8) {
853   auto tmp = VarintParseSlowArm32(p, first8);
854   *out = tmp.second;
855   return tmp.first;
856 }
857 
858 static const char* VarintParseSlowArm(const char* p, uint64_t* out,
859                                       uint64_t first8) {
860   auto tmp = VarintParseSlowArm64(p, first8);
861   *out = tmp.second;
862   return tmp.first;
863 }
864 #endif
865 
866 // The caller must ensure that p points to at least 10 valid bytes.
867 template <typename T>
868 PROTOBUF_NODISCARD const char* VarintParse(const char* p, T* out) {
869 #if defined(__aarch64__) && defined(ABSL_IS_LITTLE_ENDIAN) && !defined(_MSC_VER)
870   // This optimization is not supported in big endian mode
871   uint64_t first8;
872   std::memcpy(&first8, p, sizeof(first8));
873   if (PROTOBUF_PREDICT_TRUE((first8 & 0x80) == 0)) {
874     *out = static_cast<uint8_t>(first8);
875     return p + 1;
876   }
877   if (PROTOBUF_PREDICT_TRUE((first8 & 0x8000) == 0)) {
878     uint64_t chunk1;
879     uint64_t chunk2;
880     // Extracting the two chunks this way gives a speedup for this path.
881     chunk1 = Ubfx7(first8, 0);
882     chunk2 = Ubfx7(first8, 8);
883     *out = chunk1 | (chunk2 << 7);
884     return p + 2;
885   }
886   return VarintParseSlowArm(p, out, first8);
887 #else   // __aarch64__
888   auto ptr = reinterpret_cast<const uint8_t*>(p);
889   uint32_t res = ptr[0];
890   if ((res & 0x80) == 0) {
891     *out = res;
892     return p + 1;
893   }
894   return VarintParseSlow(p, res, out);
895 #endif  // __aarch64__
896 }
897 
898 // Used for tags, could read up to 5 bytes which must be available.
899 // Caller must ensure it's safe to call.
900 
901 PROTOBUF_EXPORT
902 std::pair<const char*, uint32_t> ReadTagFallback(const char* p, uint32_t res);
903 
904 // Same as ParseVarint but only accept 5 bytes at most.
905 inline const char* ReadTag(const char* p, uint32_t* out,
906                            uint32_t /*max_tag*/ = 0) {
907   uint32_t res = static_cast<uint8_t>(p[0]);
908   if (res < 128) {
909     *out = res;
910     return p + 1;
911   }
912   uint32_t second = static_cast<uint8_t>(p[1]);
913   res += (second - 1) << 7;
914   if (second < 128) {
915     *out = res;
916     return p + 2;
917   }
918   auto tmp = ReadTagFallback(p, res);
919   *out = tmp.second;
920   return tmp.first;
921 }
922 
923 // As above, but optimized to consume very few registers while still being fast,
924 // ReadTagInlined is useful for callers that don't mind the extra code but would
925 // like to avoid an extern function call causing spills into the stack.
926 //
927 // Two support routines for ReadTagInlined come first...
928 template <class T>
929 PROTOBUF_NODISCARD PROTOBUF_ALWAYS_INLINE constexpr T RotateLeft(
930     T x, int s) noexcept {
931   return static_cast<T>(x << (s & (std::numeric_limits<T>::digits - 1))) |
932          static_cast<T>(x >> ((-s) & (std::numeric_limits<T>::digits - 1)));
933 }
934 
935 PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE uint64_t
936 RotRight7AndReplaceLowByte(uint64_t res, const char& byte) {
937   // TODO: remove the inline assembly
938 #if defined(__x86_64__) && defined(__GNUC__)
939   // This will only use one register for `res`.
940   // `byte` comes as a reference to allow the compiler to generate code like:
941   //
942   //   rorq    $7, %rcx
943   //   movb    1(%rax), %cl
944   //
945   // which avoids loading the incoming bytes into a separate register first.
946   asm("ror $7,%0\n\t"
947       "movb %1,%b0"
948       : "+r"(res)
949       : "m"(byte));
950 #else
951   res = RotateLeft(res, -7);
952   res = res & ~0xFF;
953   res |= 0xFF & byte;
954 #endif
955   return res;
956 }
957 
958 inline PROTOBUF_ALWAYS_INLINE const char* ReadTagInlined(const char* ptr,
959                                                          uint32_t* out) {
960   uint64_t res = 0xFF & ptr[0];
961   if (PROTOBUF_PREDICT_FALSE(res >= 128)) {
962     res = RotRight7AndReplaceLowByte(res, ptr[1]);
963     if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
964       res = RotRight7AndReplaceLowByte(res, ptr[2]);
965       if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
966         res = RotRight7AndReplaceLowByte(res, ptr[3]);
967         if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
968           // Note: this wouldn't work if res were 32-bit,
969           // because then replacing the low byte would overwrite
970           // the bottom 4 bits of the result.
971           res = RotRight7AndReplaceLowByte(res, ptr[4]);
972           if (PROTOBUF_PREDICT_FALSE(res & 0x80)) {
973             // The proto format does not permit longer than 5-byte encodings for
974             // tags.
975             *out = 0;
976             return nullptr;
977           }
978           *out = static_cast<uint32_t>(RotateLeft(res, 28));
979 #if defined(__GNUC__)
980           // Note: this asm statement prevents the compiler from
981           // trying to share the "return ptr + constant" among all
982           // branches.
983           asm("" : "+r"(ptr));
984 #endif
985           return ptr + 5;
986         }
987         *out = static_cast<uint32_t>(RotateLeft(res, 21));
988         return ptr + 4;
989       }
990       *out = static_cast<uint32_t>(RotateLeft(res, 14));
991       return ptr + 3;
992     }
993     *out = static_cast<uint32_t>(RotateLeft(res, 7));
994     return ptr + 2;
995   }
996   *out = static_cast<uint32_t>(res);
997   return ptr + 1;
998 }
999 
1000 // Decode 2 consecutive bytes of a varint and returns the value, shifted left
1001 // by 1. It simultaneous updates *ptr to *ptr + 1 or *ptr + 2 depending if the
1002 // first byte's continuation bit is set.
1003 // If bit 15 of return value is set (equivalent to the continuation bits of both
1004 // bytes being set) the varint continues, otherwise the parse is done. On x86
1005 // movsx eax, dil
1006 // and edi, eax
1007 // add eax, edi
1008 // adc [rsi], 1
1009 inline uint32_t DecodeTwoBytes(const char** ptr) {
1010   uint32_t value = UnalignedLoad<uint16_t>(*ptr);
1011   // Sign extend the low byte continuation bit
1012   uint32_t x = static_cast<int8_t>(value);
1013   value &= x;  // Mask out the high byte iff no continuation
1014   // This add is an amazing operation, it cancels the low byte continuation bit
1015   // from y transferring it to the carry. Simultaneously it also shifts the 7
1016   // LSB left by one tightly against high byte varint bits. Hence value now
1017   // contains the unpacked value shifted left by 1.
1018   value += x;
1019   // Use the carry to update the ptr appropriately.
1020   *ptr += value < x ? 2 : 1;
1021   return value;
1022 }
1023 
1024 // More efficient varint parsing for big varints
1025 inline const char* ParseBigVarint(const char* p, uint64_t* out) {
1026   auto pnew = p;
1027   auto tmp = DecodeTwoBytes(&pnew);
1028   uint64_t res = tmp >> 1;
1029   if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
1030     *out = res;
1031     return pnew;
1032   }
1033   for (std::uint32_t i = 1; i < 5; i++) {
1034     pnew = p + 2 * i;
1035     tmp = DecodeTwoBytes(&pnew);
1036     res += (static_cast<std::uint64_t>(tmp) - 2) << (14 * i - 1);
1037     if (PROTOBUF_PREDICT_TRUE(static_cast<std::int16_t>(tmp) >= 0)) {
1038       *out = res;
1039       return pnew;
1040     }
1041   }
1042   return nullptr;
1043 }
1044 
1045 PROTOBUF_EXPORT
1046 std::pair<const char*, int32_t> ReadSizeFallback(const char* p, uint32_t first);
1047 // Used for tags, could read up to 5 bytes which must be available. Additionally
1048 // it makes sure the unsigned value fits a int32_t, otherwise returns nullptr.
1049 // Caller must ensure its safe to call.
1050 inline uint32_t ReadSize(const char** pp) {
1051   auto p = *pp;
1052   uint32_t res = static_cast<uint8_t>(p[0]);
1053   if (res < 128) {
1054     *pp = p + 1;
1055     return res;
1056   }
1057   auto x = ReadSizeFallback(p, res);
1058   *pp = x.first;
1059   return x.second;
1060 }
1061 
1062 // Some convenience functions to simplify the generated parse loop code.
1063 // Returning the value and updating the buffer pointer allows for nicer
1064 // function composition. We rely on the compiler to inline this.
1065 // Also in debug compiles having local scoped variables tend to generated
1066 // stack frames that scale as O(num fields).
1067 inline uint64_t ReadVarint64(const char** p) {
1068   uint64_t tmp;
1069   *p = VarintParse(*p, &tmp);
1070   return tmp;
1071 }
1072 
1073 inline uint32_t ReadVarint32(const char** p) {
1074   uint32_t tmp;
1075   *p = VarintParse(*p, &tmp);
1076   return tmp;
1077 }
1078 
1079 inline int64_t ReadVarintZigZag64(const char** p) {
1080   uint64_t tmp;
1081   *p = VarintParse(*p, &tmp);
1082   return WireFormatLite::ZigZagDecode64(tmp);
1083 }
1084 
1085 inline int32_t ReadVarintZigZag32(const char** p) {
1086   uint64_t tmp;
1087   *p = VarintParse(*p, &tmp);
1088   return WireFormatLite::ZigZagDecode32(static_cast<uint32_t>(tmp));
1089 }
1090 
1091 template <typename Func>
1092 PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE const char*
1093 ParseContext::ParseLengthDelimitedInlined(const char* ptr, const Func& func) {
1094   LimitToken old;
1095   ptr = ReadSizeAndPushLimitAndDepthInlined(ptr, &old);
1096   if (ptr == nullptr) return ptr;
1097   auto old_depth = depth_;
1098   PROTOBUF_ALWAYS_INLINE_CALL ptr = func(ptr);
1099   if (ptr != nullptr) ABSL_DCHECK_EQ(old_depth, depth_);
1100   depth_++;
1101   if (!PopLimit(std::move(old))) return nullptr;
1102   return ptr;
1103 }
1104 
1105 template <typename Func>
1106 PROTOBUF_NODISCARD inline PROTOBUF_ALWAYS_INLINE const char*
1107 ParseContext::ParseGroupInlined(const char* ptr, uint32_t start_tag,
1108                                 const Func& func) {
1109   if (--depth_ < 0) return nullptr;
1110   group_depth_++;
1111   auto old_depth = depth_;
1112   auto old_group_depth = group_depth_;
1113   PROTOBUF_ALWAYS_INLINE_CALL ptr = func(ptr);
1114   if (ptr != nullptr) {
1115     ABSL_DCHECK_EQ(old_depth, depth_);
1116     ABSL_DCHECK_EQ(old_group_depth, group_depth_);
1117   }
1118   group_depth_--;
1119   depth_++;
1120   if (PROTOBUF_PREDICT_FALSE(!ConsumeEndGroup(start_tag))) return nullptr;
1121   return ptr;
1122 }
1123 
1124 inline const char* ParseContext::ReadSizeAndPushLimitAndDepthInlined(
1125     const char* ptr, LimitToken* old_limit) {
1126   int size = ReadSize(&ptr);
1127   if (PROTOBUF_PREDICT_FALSE(!ptr) || depth_ <= 0) {
1128     return nullptr;
1129   }
1130   *old_limit = PushLimit(ptr, size);
1131   --depth_;
1132   return ptr;
1133 }
1134 
1135 template <typename Tag, typename T>
1136 const char* EpsCopyInputStream::ReadRepeatedFixed(const char* ptr,
1137                                                   Tag expected_tag,
1138                                                   RepeatedField<T>* out) {
1139   do {
1140     out->Add(UnalignedLoad<T>(ptr));
1141     ptr += sizeof(T);
1142     if (PROTOBUF_PREDICT_FALSE(ptr >= limit_end_)) return ptr;
1143   } while (UnalignedLoad<Tag>(ptr) == expected_tag && (ptr += sizeof(Tag)));
1144   return ptr;
1145 }
1146 
1147 // Add any of the following lines to debug which parse function is failing.
1148 
1149 #define GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, ret) \
1150   if (!(predicate)) {                                  \
1151     /*  ::raise(SIGINT);  */                           \
1152     /*  ABSL_LOG(ERROR) << "Parse failure";  */        \
1153     return ret;                                        \
1154   }
1155 
1156 #define GOOGLE_PROTOBUF_PARSER_ASSERT(predicate) \
1157   GOOGLE_PROTOBUF_ASSERT_RETURN(predicate, nullptr)
1158 
1159 template <typename T>
1160 const char* EpsCopyInputStream::ReadPackedFixed(const char* ptr, int size,
1161                                                 RepeatedField<T>* out) {
1162   GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
1163   int nbytes = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
1164   while (size > nbytes) {
1165     int num = nbytes / sizeof(T);
1166     int old_entries = out->size();
1167     out->Reserve(old_entries + num);
1168     int block_size = num * sizeof(T);
1169     auto dst = out->AddNAlreadyReserved(num);
1170 #ifdef ABSL_IS_LITTLE_ENDIAN
1171     std::memcpy(dst, ptr, block_size);
1172 #else
1173     for (int i = 0; i < num; i++)
1174       dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
1175 #endif
1176     size -= block_size;
1177     if (limit_ <= kSlopBytes) return nullptr;
1178     ptr = Next();
1179     if (ptr == nullptr) return nullptr;
1180     ptr += kSlopBytes - (nbytes - block_size);
1181     nbytes = static_cast<int>(buffer_end_ + kSlopBytes - ptr);
1182   }
1183   int num = size / sizeof(T);
1184   int block_size = num * sizeof(T);
1185   if (num == 0) return size == block_size ? ptr : nullptr;
1186   int old_entries = out->size();
1187   out->Reserve(old_entries + num);
1188   auto dst = out->AddNAlreadyReserved(num);
1189 #ifdef ABSL_IS_LITTLE_ENDIAN
1190   ABSL_CHECK(dst != nullptr) << out << "," << num;
1191   std::memcpy(dst, ptr, block_size);
1192 #else
1193   for (int i = 0; i < num; i++) dst[i] = UnalignedLoad<T>(ptr + i * sizeof(T));
1194 #endif
1195   ptr += block_size;
1196   if (size != block_size) return nullptr;
1197   return ptr;
1198 }
1199 
1200 template <typename Add>
1201 const char* ReadPackedVarintArray(const char* ptr, const char* end, Add add) {
1202   while (ptr < end) {
1203     uint64_t varint;
1204     ptr = VarintParse(ptr, &varint);
1205     if (ptr == nullptr) return nullptr;
1206     add(varint);
1207   }
1208   return ptr;
1209 }
1210 
1211 template <typename Add, typename SizeCb>
1212 const char* EpsCopyInputStream::ReadPackedVarint(const char* ptr, Add add,
1213                                                  SizeCb size_callback) {
1214   int size = ReadSize(&ptr);
1215   size_callback(size);
1216 
1217   GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
1218   int chunk_size = static_cast<int>(buffer_end_ - ptr);
1219   while (size > chunk_size) {
1220     ptr = ReadPackedVarintArray(ptr, buffer_end_, add);
1221     if (ptr == nullptr) return nullptr;
1222     int overrun = static_cast<int>(ptr - buffer_end_);
1223     ABSL_DCHECK(overrun >= 0 && overrun <= kSlopBytes);
1224     if (size - chunk_size <= kSlopBytes) {
1225       // The current buffer contains all the information needed, we don't need
1226       // to flip buffers. However we must parse from a buffer with enough space
1227       // so we are not prone to a buffer overflow.
1228       char buf[kSlopBytes + 10] = {};
1229       std::memcpy(buf, buffer_end_, kSlopBytes);
1230       ABSL_CHECK_LE(size - chunk_size, kSlopBytes);
1231       auto end = buf + (size - chunk_size);
1232       auto res = ReadPackedVarintArray(buf + overrun, end, add);
1233       if (res == nullptr || res != end) return nullptr;
1234       return buffer_end_ + (res - buf);
1235     }
1236     size -= overrun + chunk_size;
1237     ABSL_DCHECK_GT(size, 0);
1238     // We must flip buffers
1239     if (limit_ <= kSlopBytes) return nullptr;
1240     ptr = Next();
1241     if (ptr == nullptr) return nullptr;
1242     ptr += overrun;
1243     chunk_size = static_cast<int>(buffer_end_ - ptr);
1244   }
1245   auto end = ptr + size;
1246   ptr = ReadPackedVarintArray(ptr, end, add);
1247   return end == ptr ? ptr : nullptr;
1248 }
1249 
1250 // Helper for verification of utf8
1251 PROTOBUF_EXPORT
1252 bool VerifyUTF8(absl::string_view s, const char* field_name);
1253 
1254 inline bool VerifyUTF8(const std::string* s, const char* field_name) {
1255   return VerifyUTF8(*s, field_name);
1256 }
1257 
1258 // All the string parsers with or without UTF checking and for all CTypes.
1259 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* InlineGreedyStringParser(
1260     std::string* s, const char* ptr, ParseContext* ctx);
1261 
1262 PROTOBUF_NODISCARD inline const char* InlineCordParser(::absl::Cord* cord,
1263                                                        const char* ptr,
1264                                                        ParseContext* ctx) {
1265   int size = ReadSize(&ptr);
1266   if (!ptr) return nullptr;
1267   return ctx->ReadCord(ptr, size, cord);
1268 }
1269 
1270 
1271 template <typename T>
1272 PROTOBUF_NODISCARD const char* FieldParser(uint64_t tag, T& field_parser,
1273                                            const char* ptr, ParseContext* ctx) {
1274   uint32_t number = tag >> 3;
1275   GOOGLE_PROTOBUF_PARSER_ASSERT(number != 0);
1276   using WireType = internal::WireFormatLite::WireType;
1277   switch (tag & 7) {
1278     case WireType::WIRETYPE_VARINT: {
1279       uint64_t value;
1280       ptr = VarintParse(ptr, &value);
1281       GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
1282       field_parser.AddVarint(number, value);
1283       break;
1284     }
1285     case WireType::WIRETYPE_FIXED64: {
1286       uint64_t value = UnalignedLoad<uint64_t>(ptr);
1287       ptr += 8;
1288       field_parser.AddFixed64(number, value);
1289       break;
1290     }
1291     case WireType::WIRETYPE_LENGTH_DELIMITED: {
1292       ptr = field_parser.ParseLengthDelimited(number, ptr, ctx);
1293       GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
1294       break;
1295     }
1296     case WireType::WIRETYPE_START_GROUP: {
1297       ptr = field_parser.ParseGroup(number, ptr, ctx);
1298       GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
1299       break;
1300     }
1301     case WireType::WIRETYPE_END_GROUP: {
1302       ABSL_LOG(FATAL) << "Can't happen";
1303       break;
1304     }
1305     case WireType::WIRETYPE_FIXED32: {
1306       uint32_t value = UnalignedLoad<uint32_t>(ptr);
1307       ptr += 4;
1308       field_parser.AddFixed32(number, value);
1309       break;
1310     }
1311     default:
1312       return nullptr;
1313   }
1314   return ptr;
1315 }
1316 
1317 template <typename T>
1318 PROTOBUF_NODISCARD const char* WireFormatParser(T& field_parser,
1319                                                 const char* ptr,
1320                                                 ParseContext* ctx) {
1321   while (!ctx->Done(&ptr)) {
1322     uint32_t tag;
1323     ptr = ReadTag(ptr, &tag);
1324     GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
1325     if (tag == 0 || (tag & 7) == 4) {
1326       ctx->SetLastTag(tag);
1327       return ptr;
1328     }
1329     ptr = FieldParser(tag, field_parser, ptr, ctx);
1330     GOOGLE_PROTOBUF_PARSER_ASSERT(ptr != nullptr);
1331   }
1332   return ptr;
1333 }
1334 
1335 // The packed parsers parse repeated numeric primitives directly into  the
1336 // corresponding field
1337 
1338 // These are packed varints
1339 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt32Parser(
1340     void* object, const char* ptr, ParseContext* ctx);
1341 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt32Parser(
1342     void* object, const char* ptr, ParseContext* ctx);
1343 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedInt64Parser(
1344     void* object, const char* ptr, ParseContext* ctx);
1345 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedUInt64Parser(
1346     void* object, const char* ptr, ParseContext* ctx);
1347 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt32Parser(
1348     void* object, const char* ptr, ParseContext* ctx);
1349 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSInt64Parser(
1350     void* object, const char* ptr, ParseContext* ctx);
1351 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedEnumParser(
1352     void* object, const char* ptr, ParseContext* ctx);
1353 
1354 template <typename T>
1355 PROTOBUF_NODISCARD const char* PackedEnumParser(void* object, const char* ptr,
1356                                                 ParseContext* ctx,
1357                                                 bool (*is_valid)(int),
1358                                                 InternalMetadata* metadata,
1359                                                 int field_num) {
1360   return ctx->ReadPackedVarint(
1361       ptr, [object, is_valid, metadata, field_num](int32_t val) {
1362         if (is_valid(val)) {
1363           static_cast<RepeatedField<int>*>(object)->Add(val);
1364         } else {
1365           WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
1366         }
1367       });
1368 }
1369 
1370 template <typename T>
1371 PROTOBUF_NODISCARD const char* PackedEnumParserArg(
1372     void* object, const char* ptr, ParseContext* ctx,
1373     bool (*is_valid)(const void*, int), const void* data,
1374     InternalMetadata* metadata, int field_num) {
1375   return ctx->ReadPackedVarint(
1376       ptr, [object, is_valid, data, metadata, field_num](int32_t val) {
1377         if (is_valid(data, val)) {
1378           static_cast<RepeatedField<int>*>(object)->Add(val);
1379         } else {
1380           WriteVarint(field_num, val, metadata->mutable_unknown_fields<T>());
1381         }
1382       });
1383 }
1384 
1385 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedBoolParser(
1386     void* object, const char* ptr, ParseContext* ctx);
1387 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed32Parser(
1388     void* object, const char* ptr, ParseContext* ctx);
1389 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed32Parser(
1390     void* object, const char* ptr, ParseContext* ctx);
1391 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFixed64Parser(
1392     void* object, const char* ptr, ParseContext* ctx);
1393 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedSFixed64Parser(
1394     void* object, const char* ptr, ParseContext* ctx);
1395 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedFloatParser(
1396     void* object, const char* ptr, ParseContext* ctx);
1397 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* PackedDoubleParser(
1398     void* object, const char* ptr, ParseContext* ctx);
1399 
1400 // This is the only recursive parser.
1401 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownGroupLiteParse(
1402     std::string* unknown, const char* ptr, ParseContext* ctx);
1403 // This is a helper to for the UnknownGroupLiteParse but is actually also
1404 // useful in the generated code. It uses overload on std::string* vs
1405 // UnknownFieldSet* to make the generated code isomorphic between full and lite.
1406 PROTOBUF_NODISCARD PROTOBUF_EXPORT const char* UnknownFieldParse(
1407     uint32_t tag, std::string* unknown, const char* ptr, ParseContext* ctx);
1408 
1409 }  // namespace internal
1410 }  // namespace protobuf
1411 }  // namespace google
1412 
1413 #include "google/protobuf/port_undef.inc"
1414 
1415 #endif  // GOOGLE_PROTOBUF_PARSE_CONTEXT_H__
1416