1 // Protocol Buffers - Google's data interchange format
2 // Copyright 2008 Google Inc. All rights reserved.
3 //
4 // Use of this source code is governed by a BSD-style
5 // license that can be found in the LICENSE file or at
6 // https://developers.google.com/open-source/licenses/bsd
7
8 #include "google/protobuf/parse_context.h"
9
10 #include <algorithm>
11 #include <cstring>
12
13 #include "absl/strings/cord.h"
14 #include "absl/strings/string_view.h"
15 #include "google/protobuf/message_lite.h"
16 #include "google/protobuf/repeated_field.h"
17 #include "google/protobuf/wire_format_lite.h"
18 #include "utf8_validity.h"
19
20
21 // Must be included last.
22 #include "google/protobuf/port_def.inc"
23
24 namespace google {
25 namespace protobuf {
26 namespace internal {
27
28 // Only call if at start of tag.
ParseEndsInSlopRegion(const char * begin,int overrun,int depth)29 bool EpsCopyInputStream::ParseEndsInSlopRegion(const char* begin, int overrun,
30 int depth) {
31 constexpr int kSlopBytes = EpsCopyInputStream::kSlopBytes;
32 ABSL_DCHECK_GE(overrun, 0);
33 ABSL_DCHECK_LE(overrun, kSlopBytes);
34 auto ptr = begin + overrun;
35 auto end = begin + kSlopBytes;
36 while (ptr < end) {
37 uint32_t tag;
38 ptr = ReadTag(ptr, &tag);
39 if (ptr == nullptr || ptr > end) return false;
40 // ending on 0 tag is allowed and is the major reason for the necessity of
41 // this function.
42 if (tag == 0) return true;
43 switch (tag & 7) {
44 case 0: { // Varint
45 uint64_t val;
46 ptr = VarintParse(ptr, &val);
47 if (ptr == nullptr) return false;
48 break;
49 }
50 case 1: { // fixed64
51 ptr += 8;
52 break;
53 }
54 case 2: { // len delim
55 int32_t size = ReadSize(&ptr);
56 if (ptr == nullptr || size > end - ptr) return false;
57 ptr += size;
58 break;
59 }
60 case 3: { // start group
61 depth++;
62 break;
63 }
64 case 4: { // end group
65 if (--depth < 0) return true; // We exit early
66 break;
67 }
68 case 5: { // fixed32
69 ptr += 4;
70 break;
71 }
72 default:
73 return false; // Unknown wireformat
74 }
75 }
76 return false;
77 }
78
NextBuffer(int overrun,int depth)79 const char* EpsCopyInputStream::NextBuffer(int overrun, int depth) {
80 if (next_chunk_ == nullptr) return nullptr; // We've reached end of stream.
81 if (next_chunk_ != patch_buffer_) {
82 ABSL_DCHECK(size_ > kSlopBytes);
83 // The chunk is large enough to be used directly
84 buffer_end_ = next_chunk_ + size_ - kSlopBytes;
85 auto res = next_chunk_;
86 next_chunk_ = patch_buffer_;
87 if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
88 return res;
89 }
90 // Move the slop bytes of previous buffer to start of the patch buffer.
91 // Note we must use memmove because the previous buffer could be part of
92 // patch_buffer_.
93 std::memmove(patch_buffer_, buffer_end_, kSlopBytes);
94 if (overall_limit_ > 0 &&
95 (depth < 0 || !ParseEndsInSlopRegion(patch_buffer_, overrun, depth))) {
96 const void* data;
97 // ZeroCopyInputStream indicates Next may return 0 size buffers. Hence
98 // we loop.
99 while (StreamNext(&data)) {
100 if (size_ > kSlopBytes) {
101 // We got a large chunk
102 std::memcpy(patch_buffer_ + kSlopBytes, data, kSlopBytes);
103 next_chunk_ = static_cast<const char*>(data);
104 buffer_end_ = patch_buffer_ + kSlopBytes;
105 if (aliasing_ >= kNoDelta) aliasing_ = kOnPatch;
106 return patch_buffer_;
107 } else if (size_ > 0) {
108 std::memcpy(patch_buffer_ + kSlopBytes, data, size_);
109 next_chunk_ = patch_buffer_;
110 buffer_end_ = patch_buffer_ + size_;
111 if (aliasing_ >= kNoDelta) aliasing_ = kOnPatch;
112 return patch_buffer_;
113 }
114 ABSL_DCHECK(size_ == 0) << size_;
115 }
116 overall_limit_ = 0; // Next failed, no more needs for next
117 }
118 // End of stream or array
119 if (aliasing_ == kNoDelta) {
120 // If there is no more block and aliasing is true, the previous block
121 // is still valid and we can alias. We have users relying on string_view's
122 // obtained from protos to outlive the proto, when the parse was from an
123 // array. This guarantees string_view's are always aliased if parsed from
124 // an array.
125 aliasing_ = reinterpret_cast<std::uintptr_t>(buffer_end_) -
126 reinterpret_cast<std::uintptr_t>(patch_buffer_);
127 }
128 next_chunk_ = nullptr;
129 buffer_end_ = patch_buffer_ + kSlopBytes;
130 size_ = 0;
131 return patch_buffer_;
132 }
133
Next()134 const char* EpsCopyInputStream::Next() {
135 ABSL_DCHECK(limit_ > kSlopBytes);
136 auto p = NextBuffer(0 /* immaterial */, -1);
137 if (p == nullptr) {
138 limit_end_ = buffer_end_;
139 // Distinguish ending on a pushed limit or ending on end-of-stream.
140 SetEndOfStream();
141 return nullptr;
142 }
143 limit_ -= buffer_end_ - p; // Adjust limit_ relative to new anchor
144 limit_end_ = buffer_end_ + std::min(0, limit_);
145 return p;
146 }
147
DoneFallback(int overrun,int depth)148 std::pair<const char*, bool> EpsCopyInputStream::DoneFallback(int overrun,
149 int depth) {
150 // Did we exceeded the limit (parse error).
151 if (PROTOBUF_PREDICT_FALSE(overrun > limit_)) return {nullptr, true};
152 ABSL_DCHECK(overrun != limit_); // Guaranteed by caller.
153 ABSL_DCHECK(overrun < limit_); // Follows from above
154 // TODO Instead of this dcheck we could just assign, and remove
155 // updating the limit_end from PopLimit, ie.
156 // limit_end_ = buffer_end_ + (std::min)(0, limit_);
157 // if (ptr < limit_end_) return {ptr, false};
158 ABSL_DCHECK(limit_end_ == buffer_end_ + (std::min)(0, limit_));
159 // At this point we know the following assertion holds.
160 ABSL_DCHECK_GT(limit_, 0);
161 ABSL_DCHECK(limit_end_ == buffer_end_); // because limit_ > 0
162 const char* p;
163 do {
164 // We are past the end of buffer_end_, in the slop region.
165 ABSL_DCHECK_GE(overrun, 0);
166 p = NextBuffer(overrun, depth);
167 if (p == nullptr) {
168 // We are at the end of the stream
169 if (PROTOBUF_PREDICT_FALSE(overrun != 0)) return {nullptr, true};
170 ABSL_DCHECK_GT(limit_, 0);
171 limit_end_ = buffer_end_;
172 // Distinguish ending on a pushed limit or ending on end-of-stream.
173 SetEndOfStream();
174 return {buffer_end_, true};
175 }
176 limit_ -= buffer_end_ - p; // Adjust limit_ relative to new anchor
177 p += overrun;
178 overrun = p - buffer_end_;
179 } while (overrun >= 0);
180 limit_end_ = buffer_end_ + std::min(0, limit_);
181 return {p, false};
182 }
183
SkipFallback(const char * ptr,int size)184 const char* EpsCopyInputStream::SkipFallback(const char* ptr, int size) {
185 return AppendSize(ptr, size, [](const char* /*p*/, int /*s*/) {});
186 }
187
ReadStringFallback(const char * ptr,int size,std::string * str)188 const char* EpsCopyInputStream::ReadStringFallback(const char* ptr, int size,
189 std::string* str) {
190 str->clear();
191 if (PROTOBUF_PREDICT_TRUE(size <= buffer_end_ - ptr + limit_)) {
192 // Reserve the string up to a static safe size. If strings are bigger than
193 // this we proceed by growing the string as needed. This protects against
194 // malicious payloads making protobuf hold on to a lot of memory.
195 str->reserve(str->size() + std::min<int>(size, kSafeStringSize));
196 }
197 return AppendSize(ptr, size,
198 [str](const char* p, int s) { str->append(p, s); });
199 }
200
AppendStringFallback(const char * ptr,int size,std::string * str)201 const char* EpsCopyInputStream::AppendStringFallback(const char* ptr, int size,
202 std::string* str) {
203 if (PROTOBUF_PREDICT_TRUE(size <= buffer_end_ - ptr + limit_)) {
204 // Reserve the string up to a static safe size. If strings are bigger than
205 // this we proceed by growing the string as needed. This protects against
206 // malicious payloads making protobuf hold on to a lot of memory.
207 str->reserve(str->size() + std::min<int>(size, kSafeStringSize));
208 }
209 return AppendSize(ptr, size,
210 [str](const char* p, int s) { str->append(p, s); });
211 }
212
ReadCordFallback(const char * ptr,int size,absl::Cord * cord)213 const char* EpsCopyInputStream::ReadCordFallback(const char* ptr, int size,
214 absl::Cord* cord) {
215 if (zcis_ == nullptr) {
216 int bytes_from_buffer = buffer_end_ - ptr + kSlopBytes;
217 if (size <= bytes_from_buffer) {
218 *cord = absl::string_view(ptr, size);
219 return ptr + size;
220 }
221 return AppendSize(ptr, size, [cord](const char* p, int s) {
222 cord->Append(absl::string_view(p, s));
223 });
224 }
225 int new_limit = buffer_end_ - ptr + limit_;
226 if (size > new_limit) return nullptr;
227 new_limit -= size;
228 int bytes_from_buffer = buffer_end_ - ptr + kSlopBytes;
229 const bool in_patch_buf = reinterpret_cast<uintptr_t>(ptr) -
230 reinterpret_cast<uintptr_t>(patch_buffer_) <=
231 kPatchBufferSize;
232 if (bytes_from_buffer > kPatchBufferSize || !in_patch_buf) {
233 cord->Clear();
234 StreamBackUp(bytes_from_buffer);
235 } else if (bytes_from_buffer == kSlopBytes && next_chunk_ != nullptr &&
236 // Only backup if next_chunk_ points to a valid buffer returned by
237 // ZeroCopyInputStream. This happens when NextStream() returns a
238 // chunk that's smaller than or equal to kSlopBytes.
239 next_chunk_ != patch_buffer_) {
240 cord->Clear();
241 StreamBackUp(size_);
242 } else {
243 size -= bytes_from_buffer;
244 ABSL_DCHECK_GT(size, 0);
245 *cord = absl::string_view(ptr, bytes_from_buffer);
246 if (next_chunk_ == patch_buffer_) {
247 // We have read to end of the last buffer returned by
248 // ZeroCopyInputStream. So the stream is in the right position.
249 } else if (next_chunk_ == nullptr) {
250 // There is no remaining chunks. We can't read size.
251 SetEndOfStream();
252 return nullptr;
253 } else {
254 // Next chunk is already loaded
255 ABSL_DCHECK(size_ > kSlopBytes);
256 StreamBackUp(size_ - kSlopBytes);
257 }
258 }
259 if (size > overall_limit_) return nullptr;
260 overall_limit_ -= size;
261 if (!zcis_->ReadCord(cord, size)) return nullptr;
262 ptr = InitFrom(zcis_);
263 limit_ = new_limit - static_cast<int>(buffer_end_ - ptr);
264 limit_end_ = buffer_end_ + (std::min)(0, limit_);
265 return ptr;
266 }
267
268
InitFrom(io::ZeroCopyInputStream * zcis)269 const char* EpsCopyInputStream::InitFrom(io::ZeroCopyInputStream* zcis) {
270 zcis_ = zcis;
271 const void* data;
272 int size;
273 limit_ = INT_MAX;
274 if (zcis->Next(&data, &size)) {
275 overall_limit_ -= size;
276 if (size > kSlopBytes) {
277 auto ptr = static_cast<const char*>(data);
278 limit_ -= size - kSlopBytes;
279 limit_end_ = buffer_end_ = ptr + size - kSlopBytes;
280 next_chunk_ = patch_buffer_;
281 if (aliasing_ == kOnPatch) aliasing_ = kNoDelta;
282 return ptr;
283 } else {
284 limit_end_ = buffer_end_ = patch_buffer_ + kSlopBytes;
285 next_chunk_ = patch_buffer_;
286 auto ptr = patch_buffer_ + kPatchBufferSize - size;
287 std::memcpy(ptr, data, size);
288 return ptr;
289 }
290 }
291 overall_limit_ = 0;
292 next_chunk_ = nullptr;
293 size_ = 0;
294 limit_end_ = buffer_end_ = patch_buffer_;
295 return patch_buffer_;
296 }
297
ReadSizeAndPushLimitAndDepth(const char * ptr,LimitToken * old_limit)298 const char* ParseContext::ReadSizeAndPushLimitAndDepth(const char* ptr,
299 LimitToken* old_limit) {
300 return ReadSizeAndPushLimitAndDepthInlined(ptr, old_limit);
301 }
302
ParseMessage(MessageLite * msg,const char * ptr)303 const char* ParseContext::ParseMessage(MessageLite* msg, const char* ptr) {
304 LimitToken old;
305 ptr = ReadSizeAndPushLimitAndDepth(ptr, &old);
306 if (ptr == nullptr) return ptr;
307 auto old_depth = depth_;
308 ptr = msg->_InternalParse(ptr, this);
309 if (ptr != nullptr) ABSL_DCHECK_EQ(old_depth, depth_);
310 depth_++;
311 if (!PopLimit(std::move(old))) return nullptr;
312 return ptr;
313 }
314
WriteVarint(uint64_t val,std::string * s)315 inline void WriteVarint(uint64_t val, std::string* s) {
316 while (val >= 128) {
317 uint8_t c = val | 0x80;
318 s->push_back(c);
319 val >>= 7;
320 }
321 s->push_back(val);
322 }
323
WriteVarint(uint32_t num,uint64_t val,std::string * s)324 void WriteVarint(uint32_t num, uint64_t val, std::string* s) {
325 WriteVarint(num << 3, s);
326 WriteVarint(val, s);
327 }
328
WriteLengthDelimited(uint32_t num,absl::string_view val,std::string * s)329 void WriteLengthDelimited(uint32_t num, absl::string_view val, std::string* s) {
330 WriteVarint((num << 3) + 2, s);
331 WriteVarint(val.size(), s);
332 s->append(val.data(), val.size());
333 }
334
VarintParseSlow32(const char * p,uint32_t res)335 std::pair<const char*, uint32_t> VarintParseSlow32(const char* p,
336 uint32_t res) {
337 for (std::uint32_t i = 1; i < 5; i++) {
338 uint32_t byte = static_cast<uint8_t>(p[i]);
339 res += (byte - 1) << (7 * i);
340 if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
341 return {p + i + 1, res};
342 }
343 }
344 // Accept >5 bytes
345 for (std::uint32_t i = 5; i < 10; i++) {
346 uint32_t byte = static_cast<uint8_t>(p[i]);
347 if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
348 return {p + i + 1, res};
349 }
350 }
351 return {nullptr, 0};
352 }
353
VarintParseSlow64(const char * p,uint32_t res32)354 std::pair<const char*, uint64_t> VarintParseSlow64(const char* p,
355 uint32_t res32) {
356 uint64_t res = res32;
357 for (std::uint32_t i = 1; i < 10; i++) {
358 uint64_t byte = static_cast<uint8_t>(p[i]);
359 res += (byte - 1) << (7 * i);
360 if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
361 return {p + i + 1, res};
362 }
363 }
364 return {nullptr, 0};
365 }
366
ReadTagFallback(const char * p,uint32_t res)367 std::pair<const char*, uint32_t> ReadTagFallback(const char* p, uint32_t res) {
368 for (std::uint32_t i = 2; i < 5; i++) {
369 uint32_t byte = static_cast<uint8_t>(p[i]);
370 res += (byte - 1) << (7 * i);
371 if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
372 return {p + i + 1, res};
373 }
374 }
375 return {nullptr, 0};
376 }
377
ReadSizeFallback(const char * p,uint32_t res)378 std::pair<const char*, int32_t> ReadSizeFallback(const char* p, uint32_t res) {
379 for (std::uint32_t i = 1; i < 4; i++) {
380 uint32_t byte = static_cast<uint8_t>(p[i]);
381 res += (byte - 1) << (7 * i);
382 if (PROTOBUF_PREDICT_TRUE(byte < 128)) {
383 return {p + i + 1, res};
384 }
385 }
386 std::uint32_t byte = static_cast<uint8_t>(p[4]);
387 if (PROTOBUF_PREDICT_FALSE(byte >= 8)) return {nullptr, 0}; // size >= 2gb
388 res += (byte - 1) << 28;
389 // Protect against sign integer overflow in PushLimit. Limits are relative
390 // to buffer ends and ptr could potential be kSlopBytes beyond a buffer end.
391 // To protect against overflow we reject limits absurdly close to INT_MAX.
392 if (PROTOBUF_PREDICT_FALSE(res > INT_MAX - ParseContext::kSlopBytes)) {
393 return {nullptr, 0};
394 }
395 return {p + 5, res};
396 }
397
StringParser(const char * begin,const char * end,void * object,ParseContext *)398 const char* StringParser(const char* begin, const char* end, void* object,
399 ParseContext*) {
400 auto str = static_cast<std::string*>(object);
401 str->append(begin, end - begin);
402 return end;
403 }
404
405 // Defined in wire_format_lite.cc
406 void PrintUTF8ErrorLog(absl::string_view message_name,
407 absl::string_view field_name, const char* operation_str,
408 bool emit_stacktrace);
409
VerifyUTF8(absl::string_view str,const char * field_name)410 bool VerifyUTF8(absl::string_view str, const char* field_name) {
411 if (!utf8_range::IsStructurallyValid(str)) {
412 PrintUTF8ErrorLog("", field_name, "parsing", false);
413 return false;
414 }
415 return true;
416 }
417
InlineGreedyStringParser(std::string * s,const char * ptr,ParseContext * ctx)418 const char* InlineGreedyStringParser(std::string* s, const char* ptr,
419 ParseContext* ctx) {
420 int size = ReadSize(&ptr);
421 if (!ptr) return nullptr;
422 return ctx->ReadString(ptr, size, s);
423 }
424
425
426 template <typename T, bool sign>
VarintParser(void * object,const char * ptr,ParseContext * ctx)427 const char* VarintParser(void* object, const char* ptr, ParseContext* ctx) {
428 return ctx->ReadPackedVarint(ptr, [object](uint64_t varint) {
429 T val;
430 if (sign) {
431 if (sizeof(T) == 8) {
432 val = WireFormatLite::ZigZagDecode64(varint);
433 } else {
434 val = WireFormatLite::ZigZagDecode32(varint);
435 }
436 } else {
437 val = varint;
438 }
439 static_cast<RepeatedField<T>*>(object)->Add(val);
440 });
441 }
442
PackedInt32Parser(void * object,const char * ptr,ParseContext * ctx)443 const char* PackedInt32Parser(void* object, const char* ptr,
444 ParseContext* ctx) {
445 return VarintParser<int32_t, false>(object, ptr, ctx);
446 }
PackedUInt32Parser(void * object,const char * ptr,ParseContext * ctx)447 const char* PackedUInt32Parser(void* object, const char* ptr,
448 ParseContext* ctx) {
449 return VarintParser<uint32_t, false>(object, ptr, ctx);
450 }
PackedInt64Parser(void * object,const char * ptr,ParseContext * ctx)451 const char* PackedInt64Parser(void* object, const char* ptr,
452 ParseContext* ctx) {
453 return VarintParser<int64_t, false>(object, ptr, ctx);
454 }
PackedUInt64Parser(void * object,const char * ptr,ParseContext * ctx)455 const char* PackedUInt64Parser(void* object, const char* ptr,
456 ParseContext* ctx) {
457 return VarintParser<uint64_t, false>(object, ptr, ctx);
458 }
PackedSInt32Parser(void * object,const char * ptr,ParseContext * ctx)459 const char* PackedSInt32Parser(void* object, const char* ptr,
460 ParseContext* ctx) {
461 return VarintParser<int32_t, true>(object, ptr, ctx);
462 }
PackedSInt64Parser(void * object,const char * ptr,ParseContext * ctx)463 const char* PackedSInt64Parser(void* object, const char* ptr,
464 ParseContext* ctx) {
465 return VarintParser<int64_t, true>(object, ptr, ctx);
466 }
467
PackedEnumParser(void * object,const char * ptr,ParseContext * ctx)468 const char* PackedEnumParser(void* object, const char* ptr, ParseContext* ctx) {
469 return VarintParser<int, false>(object, ptr, ctx);
470 }
471
PackedBoolParser(void * object,const char * ptr,ParseContext * ctx)472 const char* PackedBoolParser(void* object, const char* ptr, ParseContext* ctx) {
473 return VarintParser<bool, false>(object, ptr, ctx);
474 }
475
476 template <typename T>
FixedParser(void * object,const char * ptr,ParseContext * ctx)477 const char* FixedParser(void* object, const char* ptr, ParseContext* ctx) {
478 int size = ReadSize(&ptr);
479 return ctx->ReadPackedFixed(ptr, size,
480 static_cast<RepeatedField<T>*>(object));
481 }
482
PackedFixed32Parser(void * object,const char * ptr,ParseContext * ctx)483 const char* PackedFixed32Parser(void* object, const char* ptr,
484 ParseContext* ctx) {
485 return FixedParser<uint32_t>(object, ptr, ctx);
486 }
PackedSFixed32Parser(void * object,const char * ptr,ParseContext * ctx)487 const char* PackedSFixed32Parser(void* object, const char* ptr,
488 ParseContext* ctx) {
489 return FixedParser<int32_t>(object, ptr, ctx);
490 }
PackedFixed64Parser(void * object,const char * ptr,ParseContext * ctx)491 const char* PackedFixed64Parser(void* object, const char* ptr,
492 ParseContext* ctx) {
493 return FixedParser<uint64_t>(object, ptr, ctx);
494 }
PackedSFixed64Parser(void * object,const char * ptr,ParseContext * ctx)495 const char* PackedSFixed64Parser(void* object, const char* ptr,
496 ParseContext* ctx) {
497 return FixedParser<int64_t>(object, ptr, ctx);
498 }
PackedFloatParser(void * object,const char * ptr,ParseContext * ctx)499 const char* PackedFloatParser(void* object, const char* ptr,
500 ParseContext* ctx) {
501 return FixedParser<float>(object, ptr, ctx);
502 }
PackedDoubleParser(void * object,const char * ptr,ParseContext * ctx)503 const char* PackedDoubleParser(void* object, const char* ptr,
504 ParseContext* ctx) {
505 return FixedParser<double>(object, ptr, ctx);
506 }
507
508 class UnknownFieldLiteParserHelper {
509 public:
UnknownFieldLiteParserHelper(std::string * unknown)510 explicit UnknownFieldLiteParserHelper(std::string* unknown)
511 : unknown_(unknown) {}
512
AddVarint(uint32_t num,uint64_t value)513 void AddVarint(uint32_t num, uint64_t value) {
514 if (unknown_ == nullptr) return;
515 WriteVarint(num * 8, unknown_);
516 WriteVarint(value, unknown_);
517 }
AddFixed64(uint32_t num,uint64_t value)518 void AddFixed64(uint32_t num, uint64_t value) {
519 if (unknown_ == nullptr) return;
520 WriteVarint(num * 8 + 1, unknown_);
521 char buffer[8];
522 io::CodedOutputStream::WriteLittleEndian64ToArray(
523 value, reinterpret_cast<uint8_t*>(buffer));
524 unknown_->append(buffer, 8);
525 }
ParseLengthDelimited(uint32_t num,const char * ptr,ParseContext * ctx)526 const char* ParseLengthDelimited(uint32_t num, const char* ptr,
527 ParseContext* ctx) {
528 int size = ReadSize(&ptr);
529 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
530 if (unknown_ == nullptr) return ctx->Skip(ptr, size);
531 WriteVarint(num * 8 + 2, unknown_);
532 WriteVarint(size, unknown_);
533 return ctx->AppendString(ptr, size, unknown_);
534 }
ParseGroup(uint32_t num,const char * ptr,ParseContext * ctx)535 const char* ParseGroup(uint32_t num, const char* ptr, ParseContext* ctx) {
536 if (unknown_) WriteVarint(num * 8 + 3, unknown_);
537 ptr = ctx->ParseGroupInlined(ptr, num * 8 + 3, [&](const char* ptr) {
538 return WireFormatParser(*this, ptr, ctx);
539 });
540 GOOGLE_PROTOBUF_PARSER_ASSERT(ptr);
541 if (unknown_) WriteVarint(num * 8 + 4, unknown_);
542 return ptr;
543 }
AddFixed32(uint32_t num,uint32_t value)544 void AddFixed32(uint32_t num, uint32_t value) {
545 if (unknown_ == nullptr) return;
546 WriteVarint(num * 8 + 5, unknown_);
547 char buffer[4];
548 io::CodedOutputStream::WriteLittleEndian32ToArray(
549 value, reinterpret_cast<uint8_t*>(buffer));
550 unknown_->append(buffer, 4);
551 }
552
553 private:
554 std::string* unknown_;
555 };
556
UnknownGroupLiteParse(std::string * unknown,const char * ptr,ParseContext * ctx)557 const char* UnknownGroupLiteParse(std::string* unknown, const char* ptr,
558 ParseContext* ctx) {
559 UnknownFieldLiteParserHelper field_parser(unknown);
560 return WireFormatParser(field_parser, ptr, ctx);
561 }
562
UnknownFieldParse(uint32_t tag,std::string * unknown,const char * ptr,ParseContext * ctx)563 const char* UnknownFieldParse(uint32_t tag, std::string* unknown,
564 const char* ptr, ParseContext* ctx) {
565 UnknownFieldLiteParserHelper field_parser(unknown);
566 return FieldParser(tag, field_parser, ptr, ctx);
567 }
568
569 } // namespace internal
570 } // namespace protobuf
571 } // namespace google
572
573 #include "google/protobuf/port_undef.inc"
574