• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
16 
17 #include <cstddef>
18 #include <cstdint>
19 #include <cstring>
20 
21 #include "gtest/gtest.h"
22 #include "pw_assert/check.h"
23 #include "pw_containers/vector.h"
24 
25 using std::byte;
26 
27 namespace pw {
28 namespace ring_buffer {
29 namespace {
30 using Entry = PrefixedEntryRingBufferMulti::Entry;
31 using iterator = PrefixedEntryRingBufferMulti::iterator;
32 
TEST(PrefixedEntryRingBuffer,NoBuffer)33 TEST(PrefixedEntryRingBuffer, NoBuffer) {
34   PrefixedEntryRingBuffer ring(false);
35 
36   byte buf[32];
37   size_t count;
38 
39   EXPECT_EQ(ring.EntryCount(), 0u);
40   EXPECT_EQ(ring.SetBuffer(span<byte>(static_cast<byte*>(nullptr), 10u)),
41             Status::InvalidArgument());
42   EXPECT_EQ(ring.SetBuffer(span(buf, 0u)), Status::InvalidArgument());
43   EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
44 
45   EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
46   EXPECT_EQ(ring.EntryCount(), 0u);
47   EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
48   EXPECT_EQ(count, 0u);
49   EXPECT_EQ(ring.EntryCount(), 0u);
50   EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
51             Status::FailedPrecondition());
52   EXPECT_EQ(count, 0u);
53   EXPECT_EQ(ring.EntryCount(), 0u);
54   EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
55   EXPECT_EQ(ring.EntryCount(), 0u);
56 }
57 
58 // Single entry to write/read/pop over and over again.
59 constexpr byte single_entry_data[] = {byte(1),
60                                       byte(2),
61                                       byte(3),
62                                       byte(4),
63                                       byte(5),
64                                       byte(6),
65                                       byte(7),
66                                       byte(8),
67                                       byte(9)};
68 constexpr size_t single_entry_total_size = sizeof(single_entry_data) + 1;
69 constexpr size_t single_entry_test_buffer_size =
70     (single_entry_total_size * 7) / 2;
71 
72 // Make sure the single_entry_size is even so single_entry_buffer_Size gets the
73 // proper wrap/even behavior when getting to the end of the buffer.
74 static_assert((single_entry_total_size % 2) == 0u);
75 constexpr size_t kSingleEntryCycles = 300u;
76 
77 // Repeatedly write the same data, read it, and pop it, done over and over
78 // again.
SingleEntryWriteReadTest(bool user_data)79 void SingleEntryWriteReadTest(bool user_data) {
80   PrefixedEntryRingBuffer ring(user_data);
81   byte test_buffer[single_entry_test_buffer_size];
82 
83   byte read_buffer[single_entry_total_size];
84 
85   // Set read_size to an unexpected value to make sure result checks don't luck
86   // out and happen to see a previous value.
87   size_t read_size = 500U;
88   uint32_t user_preamble = 0U;
89 
90   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
91 
92   EXPECT_EQ(ring.EntryCount(), 0u);
93   EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
94   EXPECT_EQ(ring.EntryCount(), 0u);
95   EXPECT_EQ(ring.PushBack(span(single_entry_data, sizeof(test_buffer) + 5)),
96             Status::OutOfRange());
97   EXPECT_EQ(ring.EntryCount(), 0u);
98   EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
99   EXPECT_EQ(read_size, 0u);
100   read_size = 500U;
101   EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
102             Status::OutOfRange());
103   EXPECT_EQ(read_size, 0u);
104 
105   size_t user_preamble_bytes = (user_data ? 1 : 0);
106   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
107   size_t data_offset = single_entry_total_size - data_size;
108 
109   byte expect_buffer[single_entry_total_size] = {};
110   expect_buffer[user_preamble_bytes] = byte(data_size);
111   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
112 
113   for (size_t i = 0; i < kSingleEntryCycles; i++) {
114     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
115     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
116 
117     // Limit the value of the preamble to a single byte, to ensure that we
118     // retain a static `single_entry_buffer_size` during the test. Single
119     // bytes are varint-encoded to the same value.
120     uint32_t preamble_byte = i % 128;
121     ASSERT_EQ(ring.PushBack(span(single_entry_data, data_size), preamble_byte),
122               OkStatus());
123     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
124     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
125 
126     read_size = 500U;
127     ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
128     ASSERT_EQ(read_size, data_size);
129 
130     // ASSERT_THAT(span(expect_buffer).last(data_size),
131     //            testing::ElementsAreArray(span(read_buffer, data_size)));
132     ASSERT_EQ(
133         memcmp(
134             span(expect_buffer).last(data_size).data(), read_buffer, data_size),
135         0);
136 
137     read_size = 500U;
138     ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), OkStatus());
139     ASSERT_EQ(read_size, single_entry_total_size);
140 
141     if (user_data) {
142       expect_buffer[0] = byte(preamble_byte);
143     }
144 
145     // ASSERT_THAT(span(expect_buffer),
146     //            testing::ElementsAreArray(span(read_buffer)));
147     ASSERT_EQ(memcmp(expect_buffer, read_buffer, single_entry_total_size), 0);
148 
149     if (user_data) {
150       user_preamble = 0U;
151       ASSERT_EQ(
152           ring.PeekFrontWithPreamble(read_buffer, user_preamble, read_size),
153           OkStatus());
154       ASSERT_EQ(read_size, data_size);
155       ASSERT_EQ(user_preamble, preamble_byte);
156       ASSERT_EQ(memcmp(span(expect_buffer).last(data_size).data(),
157                        read_buffer,
158                        data_size),
159                 0);
160     }
161 
162     ASSERT_EQ(ring.PopFront(), OkStatus());
163   }
164 }
165 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadNoUserData)166 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadNoUserData) {
167   SingleEntryWriteReadTest(false);
168 }
169 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadYesUserData)170 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadYesUserData) {
171   SingleEntryWriteReadTest(true);
172 }
173 
174 // TODO(b/234883746): Increase this to 5000 once we have a way to detect targets
175 // with more computation and memory oomph.
176 constexpr size_t kOuterCycles = 50u;
177 constexpr size_t kCountingUpMaxExpectedEntries =
178     single_entry_test_buffer_size / single_entry_total_size;
179 
180 // Write data that is filled with a byte value that increments each write. Write
181 // many times without read/pop and then check to make sure correct contents are
182 // in the ring buffer.
183 template <bool kUserData>
CountingUpWriteReadTest()184 void CountingUpWriteReadTest() {
185   PrefixedEntryRingBuffer ring(kUserData);
186   byte test_buffer[single_entry_test_buffer_size];
187 
188   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
189   EXPECT_EQ(ring.EntryCount(), 0u);
190 
191   constexpr size_t kDataSize = sizeof(single_entry_data) - (kUserData ? 1 : 0);
192 
193   for (size_t i = 0; i < kOuterCycles; i++) {
194     size_t seed = i;
195 
196     byte write_buffer[kDataSize];
197 
198     size_t j;
199     for (j = 0; j < kSingleEntryCycles; j++) {
200       memset(write_buffer, j + seed, sizeof(write_buffer));
201 
202       ASSERT_EQ(ring.PushBack(write_buffer), OkStatus());
203 
204       size_t expected_count = (j < kCountingUpMaxExpectedEntries)
205                                   ? j + 1
206                                   : kCountingUpMaxExpectedEntries;
207       ASSERT_EQ(ring.EntryCount(), expected_count);
208     }
209     size_t final_write_j = j;
210     size_t fill_val = seed + final_write_j - kCountingUpMaxExpectedEntries;
211 
212     for (j = 0; j < kCountingUpMaxExpectedEntries; j++) {
213       byte read_buffer[sizeof(write_buffer)];
214       size_t read_size;
215       memset(write_buffer, fill_val + j, sizeof(write_buffer));
216       ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
217 
218       ASSERT_EQ(memcmp(write_buffer, read_buffer, kDataSize), 0);
219 
220       ASSERT_EQ(ring.PopFront(), OkStatus());
221     }
222   }
223 }
224 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadNoUserData)225 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadNoUserData) {
226   CountingUpWriteReadTest<false>();
227 }
228 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadYesUserData)229 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadYesUserData) {
230   CountingUpWriteReadTest<true>();
231 }
232 
233 // Create statically to prevent allocating a capture in the lambda below.
234 static pw::Vector<byte, single_entry_total_size> read_buffer;
235 
236 // Repeatedly write the same data, read it, and pop it, done over and over
237 // again.
SingleEntryWriteReadWithSectionWriterTest(bool user_data)238 void SingleEntryWriteReadWithSectionWriterTest(bool user_data) {
239   PrefixedEntryRingBuffer ring(user_data);
240   byte test_buffer[single_entry_test_buffer_size];
241 
242   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
243 
244   auto output = [](span<const byte> src) -> Status {
245     for (byte b : src) {
246       read_buffer.push_back(b);
247     }
248     return OkStatus();
249   };
250 
251   size_t user_preamble_bytes = (user_data ? 1 : 0);
252   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
253   size_t data_offset = single_entry_total_size - data_size;
254 
255   byte expect_buffer[single_entry_total_size] = {};
256   expect_buffer[user_preamble_bytes] = byte(data_size);
257   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
258 
259   for (size_t i = 0; i < kSingleEntryCycles; i++) {
260     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
261     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
262 
263     // Limit the value of the preamble to a single byte, to ensure that we
264     // retain a static `single_entry_buffer_size` during the test. Single
265     // bytes are varint-encoded to the same value.
266     uint32_t preamble_byte = i % 128;
267     ASSERT_EQ(ring.PushBack(span(single_entry_data, data_size), preamble_byte),
268               OkStatus());
269     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
270     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
271 
272     read_buffer.clear();
273     ASSERT_EQ(ring.PeekFront(output), OkStatus());
274     ASSERT_EQ(read_buffer.size(), data_size);
275 
276     ASSERT_EQ(memcmp(span(expect_buffer).last(data_size).data(),
277                      read_buffer.data(),
278                      data_size),
279               0);
280 
281     read_buffer.clear();
282     ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
283     ASSERT_EQ(read_buffer.size(), single_entry_total_size);
284     ASSERT_EQ(ring.PopFront(), OkStatus());
285 
286     if (user_data) {
287       expect_buffer[0] = byte(preamble_byte);
288     }
289 
290     ASSERT_EQ(
291         memcmp(expect_buffer, read_buffer.data(), single_entry_total_size), 0);
292   }
293 }
294 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterNoUserData)295 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadWithSectionWriterNoUserData) {
296   SingleEntryWriteReadWithSectionWriterTest(false);
297 }
298 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterYesUserData)299 TEST(PrefixedEntryRingBuffer,
300      SingleEntryWriteReadWithSectionWriterYesUserData) {
301   SingleEntryWriteReadWithSectionWriterTest(true);
302 }
303 
304 constexpr size_t kEntrySizeBytes = 8u;
305 constexpr size_t kTotalEntryCount = 20u;
306 constexpr size_t kBufferExtraBytes = 5u;
307 constexpr size_t kTestBufferSize =
308     (kEntrySizeBytes * kTotalEntryCount) + kBufferExtraBytes;
309 
310 // Create statically to prevent allocating a capture in the lambda below.
311 static pw::Vector<byte, kTestBufferSize> actual_result;
312 
DeringTest(bool preload)313 void DeringTest(bool preload) {
314   PrefixedEntryRingBuffer ring;
315 
316   byte test_buffer[kTestBufferSize];
317   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
318 
319   // Entry data is entry size - preamble (single byte in this case).
320   byte single_entry_buffer[kEntrySizeBytes - 1u];
321   auto entry_data = span(single_entry_buffer);
322   size_t i;
323 
324   // TODO(b/234883746): Increase this to 500 once we have a way to detect
325   // targets with more computation and memory oomph.
326   size_t loop_goal = preload ? 50 : 1;
327 
328   for (size_t main_loop_count = 0; main_loop_count < loop_goal;
329        main_loop_count++) {
330     if (preload) {
331       // Prime the ringbuffer with some junk data to get the buffer
332       // wrapped.
333       for (i = 0; i < (kTotalEntryCount * (main_loop_count % 64u)); i++) {
334         memset(single_entry_buffer, i, sizeof(single_entry_buffer));
335         ASSERT_EQ(OkStatus(), ring.PushBack(single_entry_buffer));
336       }
337     }
338 
339     // Build up the expected buffer and fill the ring buffer with the test data.
340     pw::Vector<byte, kTestBufferSize> expected_result;
341     for (i = 0; i < kTotalEntryCount; i++) {
342       // First component of the entry: the varint size.
343       static_assert(sizeof(single_entry_buffer) < 127);
344       expected_result.push_back(byte(sizeof(single_entry_buffer)));
345 
346       // Second component of the entry: the raw data.
347       memset(single_entry_buffer, 'a' + i, sizeof(single_entry_buffer));
348       for (byte b : entry_data) {
349         expected_result.push_back(b);
350       }
351 
352       // The ring buffer internally pushes the varint size byte.
353       ASSERT_EQ(OkStatus(), ring.PushBack(single_entry_buffer));
354     }
355 
356     // Check values before doing the dering.
357     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
358     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
359 
360     ASSERT_EQ(ring.Dering(), OkStatus());
361 
362     // Check values after doing the dering.
363     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
364     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
365 
366     // Read out the entries of the ring buffer.
367     actual_result.clear();
368     auto output = [](span<const byte> src) -> Status {
369       for (byte b : src) {
370         actual_result.push_back(b);
371       }
372       return OkStatus();
373     };
374     while (ring.EntryCount()) {
375       ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
376       ASSERT_EQ(ring.PopFront(), OkStatus());
377     }
378 
379     // Ensure the actual result out of the ring buffer matches our manually
380     // computed result.
381     EXPECT_EQ(expected_result.size(), actual_result.size());
382     ASSERT_EQ(memcmp(test_buffer, actual_result.data(), actual_result.size()),
383               0);
384     ASSERT_EQ(
385         memcmp(
386             expected_result.data(), actual_result.data(), actual_result.size()),
387         0);
388   }
389 }
390 
TEST(PrefixedEntryRingBuffer,Dering)391 TEST(PrefixedEntryRingBuffer, Dering) { DeringTest(true); }
TEST(PrefixedEntryRingBuffer,DeringNoPreload)392 TEST(PrefixedEntryRingBuffer, DeringNoPreload) { DeringTest(false); }
393 
394 template <typename T>
PushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)395 Status PushBack(PrefixedEntryRingBufferMulti& ring,
396                 T element,
397                 uint32_t user_preamble = 0) {
398   union {
399     std::array<byte, sizeof(element)> buffer;
400     T item;
401   } aliased;
402   aliased.item = element;
403   return ring.PushBack(aliased.buffer, user_preamble);
404 }
405 
406 template <typename T>
TryPushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)407 Status TryPushBack(PrefixedEntryRingBufferMulti& ring,
408                    T element,
409                    uint32_t user_preamble = 0) {
410   union {
411     std::array<byte, sizeof(element)> buffer;
412     T item;
413   } aliased;
414   aliased.item = element;
415   return ring.TryPushBack(aliased.buffer, user_preamble);
416 }
417 
418 template <typename T>
PeekFront(PrefixedEntryRingBufferMulti::Reader & reader,uint32_t * user_preamble_out=nullptr)419 T PeekFront(PrefixedEntryRingBufferMulti::Reader& reader,
420             uint32_t* user_preamble_out = nullptr) {
421   union {
422     std::array<byte, sizeof(T)> buffer;
423     T item;
424   } aliased;
425   size_t bytes_read = 0;
426   uint32_t user_preamble = 0;
427   PW_CHECK_OK(
428       reader.PeekFrontWithPreamble(aliased.buffer, user_preamble, bytes_read));
429   PW_CHECK_INT_EQ(bytes_read, sizeof(T));
430   if (user_preamble_out) {
431     *user_preamble_out = user_preamble;
432   }
433   return aliased.item;
434 }
435 
436 template <typename T>
GetEntry(span<const std::byte> lhs)437 T GetEntry(span<const std::byte> lhs) {
438   union {
439     std::array<byte, sizeof(T)> buffer;
440     T item;
441   } aliased;
442   std::memcpy(aliased.buffer.data(), lhs.data(), lhs.size_bytes());
443   return aliased.item;
444 }
445 
EmptyDataPushBackTest(bool user_data)446 void EmptyDataPushBackTest(bool user_data) {
447   PrefixedEntryRingBuffer ring(user_data);
448   byte test_buffer[kTestBufferSize];
449   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
450 
451   // Push back an empty span and a non-empty span.
452   EXPECT_EQ(ring.PushBack(span<std::byte>(), 1u), OkStatus());
453   EXPECT_EQ(ring.EntryCount(), 1u);
454   EXPECT_EQ(ring.PushBack(single_entry_data, 2u), OkStatus());
455   EXPECT_EQ(ring.EntryCount(), 2u);
456 
457   // Confirm that both entries can be read back.
458   byte entry_buffer[kTestBufferSize];
459   uint32_t user_preamble = 0;
460   size_t bytes_read = 0;
461   // Read empty span.
462   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
463             OkStatus());
464   EXPECT_EQ(user_preamble, user_data ? 1u : 0u);
465   EXPECT_EQ(bytes_read, 0u);
466   EXPECT_EQ(ring.PopFront(), OkStatus());
467   EXPECT_EQ(ring.EntryCount(), 1u);
468   // Read non-empty span.
469   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
470             OkStatus());
471   EXPECT_EQ(user_preamble, user_data ? 2u : 0u);
472   ASSERT_EQ(bytes_read, sizeof(single_entry_data));
473   EXPECT_EQ(memcmp(entry_buffer, single_entry_data, bytes_read), 0);
474   EXPECT_EQ(ring.PopFront(), OkStatus());
475   EXPECT_EQ(ring.EntryCount(), 0u);
476 }
477 
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestWithPreamble)478 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestWithPreamble) {
479   EmptyDataPushBackTest(true);
480 }
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestNoPreamble)481 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestNoPreamble) {
482   EmptyDataPushBackTest(false);
483 }
484 
TEST(PrefixedEntryRingBuffer,TryPushBack)485 TEST(PrefixedEntryRingBuffer, TryPushBack) {
486   PrefixedEntryRingBuffer ring;
487   byte test_buffer[kTestBufferSize];
488   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
489 
490   // Fill up the ring buffer with a constant.
491   int total_items = 0;
492   while (true) {
493     Status status = TryPushBack<int>(ring, 5);
494     if (status.ok()) {
495       total_items++;
496     } else {
497       EXPECT_EQ(status, Status::ResourceExhausted());
498       break;
499     }
500   }
501   EXPECT_EQ(PeekFront<int>(ring), 5);
502 
503   // Should be unable to push more items.
504   for (int i = 0; i < total_items; ++i) {
505     EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
506     EXPECT_EQ(PeekFront<int>(ring), 5);
507   }
508 
509   // Fill up the ring buffer with a constant.
510   for (int i = 0; i < total_items; ++i) {
511     EXPECT_EQ(PushBack<int>(ring, 100), OkStatus());
512   }
513   EXPECT_EQ(PeekFront<int>(ring), 100);
514 }
515 
TEST(PrefixedEntryRingBuffer,Iterator)516 TEST(PrefixedEntryRingBuffer, Iterator) {
517   PrefixedEntryRingBuffer ring;
518   byte test_buffer[kTestBufferSize];
519   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
520 
521   // Fill up the ring buffer with a constant value.
522   size_t entry_count = 0;
523   while (TryPushBack<size_t>(ring, entry_count).ok()) {
524     entry_count++;
525   }
526 
527   // Iterate over all entries and confirm entry count.
528   size_t validated_entries = 0;
529   for (Result<const Entry> entry_info : ring) {
530     EXPECT_TRUE(entry_info.status().ok());
531     EXPECT_EQ(GetEntry<size_t>(entry_info.value().buffer), validated_entries);
532     validated_entries++;
533   }
534   EXPECT_EQ(validated_entries, entry_count);
535 }
536 
TEST(PrefixedEntryRingBufferMulti,TryPushBack)537 TEST(PrefixedEntryRingBufferMulti, TryPushBack) {
538   PrefixedEntryRingBufferMulti ring;
539   byte test_buffer[kTestBufferSize];
540   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
541 
542   PrefixedEntryRingBufferMulti::Reader fast_reader;
543   PrefixedEntryRingBufferMulti::Reader slow_reader;
544 
545   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
546   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
547 
548   // Fill up the ring buffer with an increasing count.
549   int total_items = 0;
550   while (true) {
551     Status status = TryPushBack<int>(ring, total_items);
552     if (status.ok()) {
553       total_items++;
554     } else {
555       EXPECT_EQ(status, Status::ResourceExhausted());
556       break;
557     }
558   }
559 
560   // Run fast reader twice as fast as the slow reader.
561   size_t total_used_bytes = ring.TotalUsedBytes();
562   for (int i = 0; i < total_items; ++i) {
563     EXPECT_EQ(PeekFront<int>(fast_reader), i);
564     EXPECT_EQ(fast_reader.PopFront(), OkStatus());
565     EXPECT_EQ(ring.TotalUsedBytes(), total_used_bytes);
566     if (i % 2 == 0) {
567       EXPECT_EQ(PeekFront<int>(slow_reader), i / 2);
568       EXPECT_EQ(slow_reader.PopFront(), OkStatus());
569       EXPECT_TRUE(ring.TotalUsedBytes() < total_used_bytes);
570     }
571     total_used_bytes = ring.TotalUsedBytes();
572   }
573   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
574   EXPECT_TRUE(ring.TotalUsedBytes() > 0u);
575 
576   // Fill the buffer again, expect that the fast reader
577   // only sees half the entries as the slow reader.
578   size_t max_items = total_items;
579   while (true) {
580     Status status = TryPushBack<int>(ring, total_items);
581     if (status.ok()) {
582       total_items++;
583     } else {
584       EXPECT_EQ(status, Status::ResourceExhausted());
585       break;
586     }
587   }
588   EXPECT_EQ(slow_reader.EntryCount(), max_items);
589   EXPECT_EQ(fast_reader.EntryCount(), total_items - max_items);
590 
591   for (int i = total_items - max_items; i < total_items; ++i) {
592     EXPECT_EQ(PeekFront<int>(slow_reader), i);
593     EXPECT_EQ(slow_reader.PopFront(), OkStatus());
594     if (static_cast<size_t>(i) >= max_items) {
595       EXPECT_EQ(PeekFront<int>(fast_reader), i);
596       EXPECT_EQ(fast_reader.PopFront(), OkStatus());
597     }
598   }
599   EXPECT_EQ(slow_reader.PopFront(), Status::OutOfRange());
600   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
601   EXPECT_EQ(ring.TotalUsedBytes(), 0u);
602 }
603 
TEST(PrefixedEntryRingBufferMulti,PushBack)604 TEST(PrefixedEntryRingBufferMulti, PushBack) {
605   PrefixedEntryRingBufferMulti ring;
606   byte test_buffer[kTestBufferSize];
607   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
608 
609   PrefixedEntryRingBufferMulti::Reader fast_reader;
610   PrefixedEntryRingBufferMulti::Reader slow_reader;
611 
612   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
613   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
614 
615   // Fill up the ring buffer with an increasing count.
616   size_t total_items = 0;
617   while (true) {
618     Status status = TryPushBack<uint32_t>(ring, total_items);
619     if (status.ok()) {
620       total_items++;
621     } else {
622       EXPECT_EQ(status, Status::ResourceExhausted());
623       break;
624     }
625   }
626   EXPECT_EQ(slow_reader.EntryCount(), total_items);
627 
628   // The following test:
629   //  - Moves the fast reader forward by one entry.
630   //  - Writes a single entry that is guaranteed to be larger than the size of a
631   //    single entry in the buffer (uint64_t entry > uint32_t entry).
632   //  - Checks to see that both readers were moved forward.
633   EXPECT_EQ(fast_reader.PopFront(), OkStatus());
634   EXPECT_EQ(PushBack<uint64_t>(ring, 5u), OkStatus());
635   // The readers have moved past values 0 and 1.
636   EXPECT_EQ(PeekFront<uint32_t>(slow_reader), 2u);
637   EXPECT_EQ(PeekFront<uint32_t>(fast_reader), 2u);
638   // The readers have lost two entries, but gained an entry.
639   EXPECT_EQ(slow_reader.EntryCount(), total_items - 1);
640   EXPECT_EQ(fast_reader.EntryCount(), total_items - 1);
641 }
642 
TEST(PrefixedEntryRingBufferMulti,ReaderAddRemove)643 TEST(PrefixedEntryRingBufferMulti, ReaderAddRemove) {
644   PrefixedEntryRingBufferMulti ring;
645   byte test_buffer[kTestBufferSize];
646   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
647 
648   PrefixedEntryRingBufferMulti::Reader reader;
649   PrefixedEntryRingBufferMulti::Reader transient_reader;
650 
651   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
652 
653   // Fill up the ring buffer with a constant value.
654   size_t total_items = 0;
655   while (true) {
656     Status status = TryPushBack<size_t>(ring, total_items);
657     if (status.ok()) {
658       total_items++;
659     } else {
660       EXPECT_EQ(status, Status::ResourceExhausted());
661       break;
662     }
663   }
664   EXPECT_EQ(reader.EntryCount(), total_items);
665 
666   // Add new reader after filling the buffer.
667   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
668   EXPECT_EQ(transient_reader.EntryCount(), total_items);
669 
670   // Confirm that the transient reader observes all values, even though it was
671   // attached after entries were pushed.
672   for (size_t i = 0; i < total_items; i++) {
673     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
674     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
675   }
676   EXPECT_EQ(transient_reader.EntryCount(), 0u);
677 
678   // Confirm that re-attaching the reader resets it back to the oldest
679   // available entry.
680   EXPECT_EQ(ring.DetachReader(transient_reader), OkStatus());
681   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
682   EXPECT_EQ(transient_reader.EntryCount(), total_items);
683 
684   for (size_t i = 0; i < total_items; i++) {
685     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
686     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
687   }
688   EXPECT_EQ(transient_reader.EntryCount(), 0u);
689 }
690 
TEST(PrefixedEntryRingBufferMulti,SingleBufferPerReader)691 TEST(PrefixedEntryRingBufferMulti, SingleBufferPerReader) {
692   PrefixedEntryRingBufferMulti ring_one;
693   PrefixedEntryRingBufferMulti ring_two;
694   byte test_buffer[kTestBufferSize];
695   EXPECT_EQ(ring_one.SetBuffer(test_buffer), OkStatus());
696 
697   PrefixedEntryRingBufferMulti::Reader reader;
698   EXPECT_EQ(ring_one.AttachReader(reader), OkStatus());
699   EXPECT_EQ(ring_two.AttachReader(reader), Status::InvalidArgument());
700 
701   EXPECT_EQ(ring_one.DetachReader(reader), OkStatus());
702   EXPECT_EQ(ring_two.AttachReader(reader), OkStatus());
703   EXPECT_EQ(ring_one.AttachReader(reader), Status::InvalidArgument());
704 }
705 
TEST(PrefixedEntryRingBufferMulti,IteratorEmptyBuffer)706 TEST(PrefixedEntryRingBufferMulti, IteratorEmptyBuffer) {
707   PrefixedEntryRingBufferMulti ring;
708   // Pick a buffer that can't contain any valid sections.
709   byte test_buffer[1] = {std::byte(0xFF)};
710 
711   PrefixedEntryRingBufferMulti::Reader reader;
712   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
713   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
714 
715   EXPECT_EQ(ring.begin(), ring.end());
716 }
717 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntries)718 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntries) {
719   PrefixedEntryRingBufferMulti ring;
720   byte test_buffer[kTestBufferSize];
721   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
722 
723   PrefixedEntryRingBufferMulti::Reader reader;
724   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
725 
726   // Buffer only contains valid entries. This happens after populating
727   // the buffer and no entries have been read.
728   // E.g. [VALID|VALID|VALID|INVALID]
729 
730   // Fill up the ring buffer with a constant value.
731   size_t entry_count = 0;
732   while (TryPushBack<size_t>(ring, entry_count).ok()) {
733     entry_count++;
734   }
735 
736   // Iterate over all entries and confirm entry count.
737   size_t validated_entries = 0;
738   for (const Entry& entry_info : ring) {
739     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
740     validated_entries++;
741   }
742   EXPECT_EQ(validated_entries, entry_count);
743 }
744 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntriesWithPreamble)745 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntriesWithPreamble) {
746   PrefixedEntryRingBufferMulti ring(true);
747   byte test_buffer[kTestBufferSize];
748   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
749 
750   PrefixedEntryRingBufferMulti::Reader reader;
751   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
752 
753   // Buffer only contains valid entries. This happens after populating
754   // the buffer and no entries have been read.
755   // E.g. [VALID|VALID|VALID|INVALID]
756 
757   // Fill up the ring buffer with a constant value.
758   size_t entry_count = 0;
759   while (TryPushBack<size_t>(ring, entry_count, entry_count).ok()) {
760     entry_count++;
761   }
762 
763   // Iterate over all entries and confirm entry count.
764   size_t validated_entries = 0;
765   for (const Entry& entry_info : ring) {
766     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
767     EXPECT_EQ(entry_info.preamble, validated_entries);
768     validated_entries++;
769   }
770   EXPECT_EQ(validated_entries, entry_count);
771 }
772 
TEST(PrefixedEntryRingBufferMulti,IteratorStaleEntries)773 TEST(PrefixedEntryRingBufferMulti, IteratorStaleEntries) {
774   PrefixedEntryRingBufferMulti ring;
775   byte test_buffer[kTestBufferSize];
776   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
777 
778   // Buffer only contains stale, valid entries. This happens when after
779   // populating the buffer, all entries are read. The buffer retains the
780   // data but has an entry count of zero.
781   // E.g. [STALE|STALE|STALE]
782   PrefixedEntryRingBufferMulti::Reader trailing_reader;
783   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
784 
785   PrefixedEntryRingBufferMulti::Reader reader;
786   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
787 
788   // Push and pop all the entries.
789   size_t entry_count = 0;
790   while (TryPushBack<size_t>(ring, entry_count).ok()) {
791     entry_count++;
792   }
793 
794   while (reader.PopFront().ok()) {
795   }
796 
797   // Iterate over all entries and confirm entry count.
798   size_t validated_entries = 0;
799   for (const Entry& entry_info : ring) {
800     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
801     validated_entries++;
802   }
803   EXPECT_EQ(validated_entries, entry_count);
804 }
805 
TEST(PrefixedEntryRingBufferMulti,IteratorValidStaleEntries)806 TEST(PrefixedEntryRingBufferMulti, IteratorValidStaleEntries) {
807   PrefixedEntryRingBufferMulti ring;
808   byte test_buffer[kTestBufferSize];
809   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
810 
811   // Buffer contains both valid and stale entries. This happens when after
812   // populating the buffer, only some of the entries are read.
813   // E.g. [VALID|INVALID|STALE|STALE]
814   PrefixedEntryRingBufferMulti::Reader trailing_reader;
815   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
816 
817   PrefixedEntryRingBufferMulti::Reader reader;
818   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
819 
820   // Fill the buffer with entries.
821   size_t entry_count = 0;
822   while (TryPushBack<size_t>(ring, entry_count).ok()) {
823     entry_count++;
824   }
825 
826   // Pop roughly half the entries.
827   while (reader.EntryCount() > (entry_count / 2)) {
828     EXPECT_TRUE(reader.PopFront().ok());
829   }
830 
831   // Iterate over all entries and confirm entry count.
832   size_t validated_entries = 0;
833   for (const Entry& entry_info : ring) {
834     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
835     validated_entries++;
836   }
837   EXPECT_EQ(validated_entries, entry_count);
838 }
839 
TEST(PrefixedEntryRingBufferMulti,IteratorBufferCorruption)840 TEST(PrefixedEntryRingBufferMulti, IteratorBufferCorruption) {
841   PrefixedEntryRingBufferMulti ring;
842   byte test_buffer[kTestBufferSize];
843   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
844 
845   // Buffer contains partially written entries. This may happen if writing
846   // is pre-empted (e.g. a crash occurs). In this state, we expect a series
847   // of valid entries followed by an invalid entry.
848   PrefixedEntryRingBufferMulti::Reader trailing_reader;
849   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
850 
851   // Add one entry to capture the second entry index.
852   size_t entry_count = 0;
853   EXPECT_TRUE(TryPushBack<size_t>(ring, entry_count++).ok());
854   size_t entry_size = ring.TotalUsedBytes();
855 
856   // Fill the buffer with entries.
857   while (TryPushBack<size_t>(ring, entry_count++).ok()) {
858   }
859 
860   // Push another entry to move the write index forward and force the oldest
861   // reader forward. This will require the iterator to dering.
862   EXPECT_TRUE(PushBack<size_t>(ring, 0).ok());
863   EXPECT_TRUE(ring.CheckForCorruption().ok());
864 
865   // The first entry is overwritten. Corrupt all data past the fifth entry.
866   // Note that because the first entry has shifted, the entry_count recorded
867   // in each entry is shifted by 1.
868   constexpr size_t valid_entries = 5;
869   size_t offset = valid_entries * entry_size;
870   memset(test_buffer + offset, 0xFF, kTestBufferSize - offset);
871   EXPECT_FALSE(ring.CheckForCorruption().ok());
872 
873   // Iterate over all entries and confirm entry count.
874   size_t validated_entries = 0;
875   iterator it = ring.begin();
876   for (; it != ring.end(); it++) {
877     EXPECT_EQ(GetEntry<size_t>(it->buffer), validated_entries + 1);
878     validated_entries++;
879   }
880   // The final entry will fail to be read.
881   EXPECT_EQ(it.status(), Status::DataLoss());
882   EXPECT_EQ(validated_entries, valid_entries);
883 }
884 
885 }  // namespace
886 }  // namespace ring_buffer
887 }  // namespace pw
888