• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 // Copyright 2020 The Pigweed Authors
2 //
3 // Licensed under the Apache License, Version 2.0 (the "License"); you may not
4 // use this file except in compliance with the License. You may obtain a copy of
5 // the License at
6 //
7 //     https://www.apache.org/licenses/LICENSE-2.0
8 //
9 // Unless required by applicable law or agreed to in writing, software
10 // distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
11 // WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
12 // License for the specific language governing permissions and limitations under
13 // the License.
14 
15 #include "pw_ring_buffer/prefixed_entry_ring_buffer.h"
16 
17 #include <cstddef>
18 #include <cstdint>
19 
20 #include "pw_assert/check.h"
21 #include "pw_containers/vector.h"
22 #include "pw_unit_test/framework.h"
23 
24 using std::byte;
25 
26 namespace pw {
27 namespace ring_buffer {
28 namespace {
29 using Entry = PrefixedEntryRingBufferMulti::Entry;
30 using iterator = PrefixedEntryRingBufferMulti::iterator;
31 
TEST(PrefixedEntryRingBuffer,NoBuffer)32 TEST(PrefixedEntryRingBuffer, NoBuffer) {
33   PrefixedEntryRingBuffer ring(false);
34 
35   byte buf[32];
36   size_t count;
37 
38   EXPECT_EQ(ring.EntryCount(), 0u);
39   EXPECT_EQ(ring.SetBuffer(std::span<byte>(nullptr, 10u)),
40             Status::InvalidArgument());
41   EXPECT_EQ(ring.SetBuffer(std::span(buf, 0u)), Status::InvalidArgument());
42   EXPECT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
43 
44   EXPECT_EQ(ring.PushBack(buf), Status::FailedPrecondition());
45   EXPECT_EQ(ring.EntryCount(), 0u);
46   EXPECT_EQ(ring.PeekFront(buf, &count), Status::FailedPrecondition());
47   EXPECT_EQ(count, 0u);
48   EXPECT_EQ(ring.EntryCount(), 0u);
49   EXPECT_EQ(ring.PeekFrontWithPreamble(buf, &count),
50             Status::FailedPrecondition());
51   EXPECT_EQ(count, 0u);
52   EXPECT_EQ(ring.EntryCount(), 0u);
53   EXPECT_EQ(ring.PopFront(), Status::FailedPrecondition());
54   EXPECT_EQ(ring.EntryCount(), 0u);
55 }
56 
57 // Single entry to write/read/pop over and over again.
58 constexpr byte single_entry_data[] = {byte(1),
59                                       byte(2),
60                                       byte(3),
61                                       byte(4),
62                                       byte(5),
63                                       byte(6),
64                                       byte(7),
65                                       byte(8),
66                                       byte(9)};
67 constexpr size_t single_entry_total_size = sizeof(single_entry_data) + 1;
68 constexpr size_t single_entry_test_buffer_size =
69     (single_entry_total_size * 7) / 2;
70 
71 // Make sure the single_entry_size is even so single_entry_buffer_Size gets the
72 // proper wrap/even behavior when getting to the end of the buffer.
73 static_assert((single_entry_total_size % 2) == 0u);
74 constexpr size_t kSingleEntryCycles = 300u;
75 
76 // Repeatedly write the same data, read it, and pop it, done over and over
77 // again.
SingleEntryWriteReadTest(bool user_data)78 void SingleEntryWriteReadTest(bool user_data) {
79   PrefixedEntryRingBuffer ring(user_data);
80   byte test_buffer[single_entry_test_buffer_size];
81 
82   byte read_buffer[single_entry_total_size];
83 
84   // Set read_size to an unexpected value to make sure result checks don't luck
85   // out and happen to see a previous value.
86   size_t read_size = 500U;
87   uint32_t user_preamble = 0U;
88 
89   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
90 
91   EXPECT_EQ(ring.EntryCount(), 0u);
92   EXPECT_EQ(ring.PopFront(), Status::OutOfRange());
93   EXPECT_EQ(ring.EntryCount(), 0u);
94   EXPECT_EQ(
95       ring.PushBack(std::span(single_entry_data, sizeof(test_buffer) + 5)),
96       Status::OutOfRange());
97   EXPECT_EQ(ring.EntryCount(), 0u);
98   EXPECT_EQ(ring.PeekFront(read_buffer, &read_size), Status::OutOfRange());
99   EXPECT_EQ(read_size, 0u);
100   read_size = 500U;
101   EXPECT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size),
102             Status::OutOfRange());
103   EXPECT_EQ(read_size, 0u);
104 
105   size_t user_preamble_bytes = (user_data ? 1 : 0);
106   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
107   size_t data_offset = single_entry_total_size - data_size;
108 
109   byte expect_buffer[single_entry_total_size] = {};
110   expect_buffer[user_preamble_bytes] = byte(data_size);
111   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
112 
113   for (size_t i = 0; i < kSingleEntryCycles; i++) {
114     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
115     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
116 
117     // Limit the value of the preamble to a single byte, to ensure that we
118     // retain a static `single_entry_buffer_size` during the test. Single
119     // bytes are varint-encoded to the same value.
120     uint32_t preamble_byte = i % 128;
121     ASSERT_EQ(
122         ring.PushBack(std::span(single_entry_data, data_size), preamble_byte),
123         OkStatus());
124     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
125     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
126 
127     read_size = 500U;
128     ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
129     ASSERT_EQ(read_size, data_size);
130 
131     // ASSERT_THAT(std::span(expect_buffer).last(data_size),
132     //            testing::ElementsAreArray(std::span(read_buffer, data_size)));
133     ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
134                      read_buffer,
135                      data_size),
136               0);
137 
138     read_size = 500U;
139     ASSERT_EQ(ring.PeekFrontWithPreamble(read_buffer, &read_size), OkStatus());
140     ASSERT_EQ(read_size, single_entry_total_size);
141 
142     if (user_data) {
143       expect_buffer[0] = byte(preamble_byte);
144     }
145 
146     // ASSERT_THAT(std::span(expect_buffer),
147     //            testing::ElementsAreArray(std::span(read_buffer)));
148     ASSERT_EQ(memcmp(expect_buffer, read_buffer, single_entry_total_size), 0);
149 
150     if (user_data) {
151       user_preamble = 0U;
152       ASSERT_EQ(
153           ring.PeekFrontWithPreamble(read_buffer, user_preamble, read_size),
154           OkStatus());
155       ASSERT_EQ(read_size, data_size);
156       ASSERT_EQ(user_preamble, preamble_byte);
157       ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
158                        read_buffer,
159                        data_size),
160                 0);
161     }
162 
163     ASSERT_EQ(ring.PopFront(), OkStatus());
164   }
165 }
166 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadNoUserData)167 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadNoUserData) {
168   SingleEntryWriteReadTest(false);
169 }
170 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadYesUserData)171 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadYesUserData) {
172   SingleEntryWriteReadTest(true);
173 }
174 
175 // TODO(pwbug/196): Increase this to 5000 once we have a way to detect targets
176 // with more computation and memory oomph.
177 constexpr size_t kOuterCycles = 50u;
178 constexpr size_t kCountingUpMaxExpectedEntries =
179     single_entry_test_buffer_size / single_entry_total_size;
180 
181 // Write data that is filled with a byte value that increments each write. Write
182 // many times without read/pop and then check to make sure correct contents are
183 // in the ring buffer.
184 template <bool kUserData>
CountingUpWriteReadTest()185 void CountingUpWriteReadTest() {
186   PrefixedEntryRingBuffer ring(kUserData);
187   byte test_buffer[single_entry_test_buffer_size];
188 
189   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
190   EXPECT_EQ(ring.EntryCount(), 0u);
191 
192   constexpr size_t kDataSize = sizeof(single_entry_data) - (kUserData ? 1 : 0);
193 
194   for (size_t i = 0; i < kOuterCycles; i++) {
195     size_t seed = i;
196 
197     byte write_buffer[kDataSize];
198 
199     size_t j;
200     for (j = 0; j < kSingleEntryCycles; j++) {
201       memset(write_buffer, j + seed, sizeof(write_buffer));
202 
203       ASSERT_EQ(ring.PushBack(write_buffer), OkStatus());
204 
205       size_t expected_count = (j < kCountingUpMaxExpectedEntries)
206                                   ? j + 1
207                                   : kCountingUpMaxExpectedEntries;
208       ASSERT_EQ(ring.EntryCount(), expected_count);
209     }
210     size_t final_write_j = j;
211     size_t fill_val = seed + final_write_j - kCountingUpMaxExpectedEntries;
212 
213     for (j = 0; j < kCountingUpMaxExpectedEntries; j++) {
214       byte read_buffer[sizeof(write_buffer)];
215       size_t read_size;
216       memset(write_buffer, fill_val + j, sizeof(write_buffer));
217       ASSERT_EQ(ring.PeekFront(read_buffer, &read_size), OkStatus());
218 
219       ASSERT_EQ(memcmp(write_buffer, read_buffer, kDataSize), 0);
220 
221       ASSERT_EQ(ring.PopFront(), OkStatus());
222     }
223   }
224 }
225 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadNoUserData)226 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadNoUserData) {
227   CountingUpWriteReadTest<false>();
228 }
229 
TEST(PrefixedEntryRingBuffer,CountingUpWriteReadYesUserData)230 TEST(PrefixedEntryRingBuffer, CountingUpWriteReadYesUserData) {
231   CountingUpWriteReadTest<true>();
232 }
233 
234 // Create statically to prevent allocating a capture in the lambda below.
235 static pw::Vector<byte, single_entry_total_size> read_buffer;
236 
237 // Repeatedly write the same data, read it, and pop it, done over and over
238 // again.
SingleEntryWriteReadWithSectionWriterTest(bool user_data)239 void SingleEntryWriteReadWithSectionWriterTest(bool user_data) {
240   PrefixedEntryRingBuffer ring(user_data);
241   byte test_buffer[single_entry_test_buffer_size];
242 
243   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
244 
245   auto output = [](std::span<const byte> src) -> Status {
246     for (byte b : src) {
247       read_buffer.push_back(b);
248     }
249     return OkStatus();
250   };
251 
252   size_t user_preamble_bytes = (user_data ? 1 : 0);
253   size_t data_size = sizeof(single_entry_data) - user_preamble_bytes;
254   size_t data_offset = single_entry_total_size - data_size;
255 
256   byte expect_buffer[single_entry_total_size] = {};
257   expect_buffer[user_preamble_bytes] = byte(data_size);
258   memcpy(expect_buffer + data_offset, single_entry_data, data_size);
259 
260   for (size_t i = 0; i < kSingleEntryCycles; i++) {
261     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), 0u);
262     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), 0u);
263 
264     // Limit the value of the preamble to a single byte, to ensure that we
265     // retain a static `single_entry_buffer_size` during the test. Single
266     // bytes are varint-encoded to the same value.
267     uint32_t preamble_byte = i % 128;
268     ASSERT_EQ(
269         ring.PushBack(std::span(single_entry_data, data_size), preamble_byte),
270         OkStatus());
271     ASSERT_EQ(ring.FrontEntryDataSizeBytes(), data_size);
272     ASSERT_EQ(ring.FrontEntryTotalSizeBytes(), single_entry_total_size);
273 
274     read_buffer.clear();
275     ASSERT_EQ(ring.PeekFront(output), OkStatus());
276     ASSERT_EQ(read_buffer.size(), data_size);
277 
278     ASSERT_EQ(memcmp(std::span(expect_buffer).last(data_size).data(),
279                      read_buffer.data(),
280                      data_size),
281               0);
282 
283     read_buffer.clear();
284     ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
285     ASSERT_EQ(read_buffer.size(), single_entry_total_size);
286     ASSERT_EQ(ring.PopFront(), OkStatus());
287 
288     if (user_data) {
289       expect_buffer[0] = byte(preamble_byte);
290     }
291 
292     ASSERT_EQ(
293         memcmp(expect_buffer, read_buffer.data(), single_entry_total_size), 0);
294   }
295 }
296 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterNoUserData)297 TEST(PrefixedEntryRingBuffer, SingleEntryWriteReadWithSectionWriterNoUserData) {
298   SingleEntryWriteReadWithSectionWriterTest(false);
299 }
300 
TEST(PrefixedEntryRingBuffer,SingleEntryWriteReadWithSectionWriterYesUserData)301 TEST(PrefixedEntryRingBuffer,
302      SingleEntryWriteReadWithSectionWriterYesUserData) {
303   SingleEntryWriteReadWithSectionWriterTest(true);
304 }
305 
306 constexpr size_t kEntrySizeBytes = 8u;
307 constexpr size_t kTotalEntryCount = 20u;
308 constexpr size_t kBufferExtraBytes = 5u;
309 constexpr size_t kTestBufferSize =
310     (kEntrySizeBytes * kTotalEntryCount) + kBufferExtraBytes;
311 
312 // Create statically to prevent allocating a capture in the lambda below.
313 static pw::Vector<byte, kTestBufferSize> actual_result;
314 
DeringTest(bool preload)315 void DeringTest(bool preload) {
316   PrefixedEntryRingBuffer ring;
317 
318   byte test_buffer[kTestBufferSize];
319   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
320 
321   // Entry data is entry size - preamble (single byte in this case).
322   byte single_entry_buffer[kEntrySizeBytes - 1u];
323   auto entry_data = std::span(single_entry_buffer);
324   size_t i;
325 
326   // TODO(pwbug/196): Increase this to 500 once we have a way to detect targets
327   // with more computation and memory oomph.
328   size_t loop_goal = preload ? 50 : 1;
329 
330   for (size_t main_loop_count = 0; main_loop_count < loop_goal;
331        main_loop_count++) {
332     if (preload) {
333       // Prime the ringbuffer with some junk data to get the buffer
334       // wrapped.
335       for (i = 0; i < (kTotalEntryCount * (main_loop_count % 64u)); i++) {
336         memset(single_entry_buffer, i, sizeof(single_entry_buffer));
337         ring.PushBack(single_entry_buffer)
338             .IgnoreError();  // TODO(pwbug/387): Handle Status properly
339       }
340     }
341 
342     // Build up the expected buffer and fill the ring buffer with the test data.
343     pw::Vector<byte, kTestBufferSize> expected_result;
344     for (i = 0; i < kTotalEntryCount; i++) {
345       // First component of the entry: the varint size.
346       static_assert(sizeof(single_entry_buffer) < 127);
347       expected_result.push_back(byte(sizeof(single_entry_buffer)));
348 
349       // Second component of the entry: the raw data.
350       memset(single_entry_buffer, 'a' + i, sizeof(single_entry_buffer));
351       for (byte b : entry_data) {
352         expected_result.push_back(b);
353       }
354 
355       // The ring buffer internally pushes the varint size byte.
356       ring.PushBack(single_entry_buffer)
357           .IgnoreError();  // TODO(pwbug/387): Handle Status properly
358     }
359 
360     // Check values before doing the dering.
361     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
362     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
363 
364     ASSERT_EQ(ring.Dering(), OkStatus());
365 
366     // Check values after doing the dering.
367     EXPECT_EQ(ring.EntryCount(), kTotalEntryCount);
368     EXPECT_EQ(expected_result.size(), ring.TotalUsedBytes());
369 
370     // Read out the entries of the ring buffer.
371     actual_result.clear();
372     auto output = [](std::span<const byte> src) -> Status {
373       for (byte b : src) {
374         actual_result.push_back(b);
375       }
376       return OkStatus();
377     };
378     while (ring.EntryCount()) {
379       ASSERT_EQ(ring.PeekFrontWithPreamble(output), OkStatus());
380       ASSERT_EQ(ring.PopFront(), OkStatus());
381     }
382 
383     // Ensure the actual result out of the ring buffer matches our manually
384     // computed result.
385     EXPECT_EQ(expected_result.size(), actual_result.size());
386     ASSERT_EQ(memcmp(test_buffer, actual_result.data(), actual_result.size()),
387               0);
388     ASSERT_EQ(
389         memcmp(
390             expected_result.data(), actual_result.data(), actual_result.size()),
391         0);
392   }
393 }
394 
TEST(PrefixedEntryRingBuffer,Dering)395 TEST(PrefixedEntryRingBuffer, Dering) { DeringTest(true); }
TEST(PrefixedEntryRingBuffer,DeringNoPreload)396 TEST(PrefixedEntryRingBuffer, DeringNoPreload) { DeringTest(false); }
397 
398 template <typename T>
PushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)399 Status PushBack(PrefixedEntryRingBufferMulti& ring,
400                 T element,
401                 uint32_t user_preamble = 0) {
402   union {
403     std::array<byte, sizeof(element)> buffer;
404     T item;
405   } aliased;
406   aliased.item = element;
407   return ring.PushBack(aliased.buffer, user_preamble);
408 }
409 
410 template <typename T>
TryPushBack(PrefixedEntryRingBufferMulti & ring,T element,uint32_t user_preamble=0)411 Status TryPushBack(PrefixedEntryRingBufferMulti& ring,
412                    T element,
413                    uint32_t user_preamble = 0) {
414   union {
415     std::array<byte, sizeof(element)> buffer;
416     T item;
417   } aliased;
418   aliased.item = element;
419   return ring.TryPushBack(aliased.buffer, user_preamble);
420 }
421 
422 template <typename T>
PeekFront(PrefixedEntryRingBufferMulti::Reader & reader,uint32_t * user_preamble_out=nullptr)423 T PeekFront(PrefixedEntryRingBufferMulti::Reader& reader,
424             uint32_t* user_preamble_out = nullptr) {
425   union {
426     std::array<byte, sizeof(T)> buffer;
427     T item;
428   } aliased;
429   size_t bytes_read = 0;
430   uint32_t user_preamble = 0;
431   PW_CHECK_OK(
432       reader.PeekFrontWithPreamble(aliased.buffer, user_preamble, bytes_read));
433   PW_CHECK_INT_EQ(bytes_read, sizeof(T));
434   if (user_preamble_out) {
435     *user_preamble_out = user_preamble;
436   }
437   return aliased.item;
438 }
439 
440 template <typename T>
GetEntry(std::span<const std::byte> lhs)441 T GetEntry(std::span<const std::byte> lhs) {
442   union {
443     std::array<byte, sizeof(T)> buffer;
444     T item;
445   } aliased;
446   std::memcpy(aliased.buffer.data(), lhs.data(), lhs.size_bytes());
447   return aliased.item;
448 }
449 
EmptyDataPushBackTest(bool user_data)450 void EmptyDataPushBackTest(bool user_data) {
451   PrefixedEntryRingBuffer ring(user_data);
452   byte test_buffer[kTestBufferSize];
453   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
454 
455   // Push back an empty span and a non-empty span.
456   EXPECT_EQ(ring.PushBack(std::span<std::byte>(), 1u), OkStatus());
457   EXPECT_EQ(ring.EntryCount(), 1u);
458   EXPECT_EQ(ring.PushBack(single_entry_data, 2u), OkStatus());
459   EXPECT_EQ(ring.EntryCount(), 2u);
460 
461   // Confirm that both entries can be read back.
462   byte entry_buffer[kTestBufferSize];
463   uint32_t user_preamble = 0;
464   size_t bytes_read = 0;
465   // Read empty span.
466   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
467             OkStatus());
468   EXPECT_EQ(user_preamble, user_data ? 1u : 0u);
469   EXPECT_EQ(bytes_read, 0u);
470   EXPECT_EQ(ring.PopFront(), OkStatus());
471   EXPECT_EQ(ring.EntryCount(), 1u);
472   // Read non-empty span.
473   EXPECT_EQ(ring.PeekFrontWithPreamble(entry_buffer, user_preamble, bytes_read),
474             OkStatus());
475   EXPECT_EQ(user_preamble, user_data ? 2u : 0u);
476   ASSERT_EQ(bytes_read, sizeof(single_entry_data));
477   EXPECT_EQ(memcmp(entry_buffer, single_entry_data, bytes_read), 0);
478   EXPECT_EQ(ring.PopFront(), OkStatus());
479   EXPECT_EQ(ring.EntryCount(), 0u);
480 }
481 
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestWithPreamble)482 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestWithPreamble) {
483   EmptyDataPushBackTest(true);
484 }
TEST(PrefixedEntryRingBuffer,EmptyDataPushBackTestNoPreamble)485 TEST(PrefixedEntryRingBuffer, EmptyDataPushBackTestNoPreamble) {
486   EmptyDataPushBackTest(false);
487 }
488 
TEST(PrefixedEntryRingBuffer,TryPushBack)489 TEST(PrefixedEntryRingBuffer, TryPushBack) {
490   PrefixedEntryRingBuffer ring;
491   byte test_buffer[kTestBufferSize];
492   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
493 
494   // Fill up the ring buffer with a constant.
495   int total_items = 0;
496   while (true) {
497     Status status = TryPushBack<int>(ring, 5);
498     if (status.ok()) {
499       total_items++;
500     } else {
501       EXPECT_EQ(status, Status::ResourceExhausted());
502       break;
503     }
504   }
505   EXPECT_EQ(PeekFront<int>(ring), 5);
506 
507   // Should be unable to push more items.
508   for (int i = 0; i < total_items; ++i) {
509     EXPECT_EQ(TryPushBack<int>(ring, 100), Status::ResourceExhausted());
510     EXPECT_EQ(PeekFront<int>(ring), 5);
511   }
512 
513   // Fill up the ring buffer with a constant.
514   for (int i = 0; i < total_items; ++i) {
515     EXPECT_EQ(PushBack<int>(ring, 100), OkStatus());
516   }
517   EXPECT_EQ(PeekFront<int>(ring), 100);
518 }
519 
TEST(PrefixedEntryRingBuffer,Iterator)520 TEST(PrefixedEntryRingBuffer, Iterator) {
521   PrefixedEntryRingBuffer ring;
522   byte test_buffer[kTestBufferSize];
523   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
524 
525   // Fill up the ring buffer with a constant value.
526   size_t entry_count = 0;
527   while (TryPushBack<size_t>(ring, entry_count).ok()) {
528     entry_count++;
529   }
530 
531   // Iterate over all entries and confirm entry count.
532   size_t validated_entries = 0;
533   for (Result<const Entry> entry_info : ring) {
534     EXPECT_TRUE(entry_info.status().ok());
535     EXPECT_EQ(GetEntry<size_t>(entry_info.value().buffer), validated_entries);
536     validated_entries++;
537   }
538   EXPECT_EQ(validated_entries, entry_count);
539 }
540 
TEST(PrefixedEntryRingBufferMulti,TryPushBack)541 TEST(PrefixedEntryRingBufferMulti, TryPushBack) {
542   PrefixedEntryRingBufferMulti ring;
543   byte test_buffer[kTestBufferSize];
544   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
545 
546   PrefixedEntryRingBufferMulti::Reader fast_reader;
547   PrefixedEntryRingBufferMulti::Reader slow_reader;
548 
549   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
550   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
551 
552   // Fill up the ring buffer with an increasing count.
553   int total_items = 0;
554   while (true) {
555     Status status = TryPushBack<int>(ring, total_items);
556     if (status.ok()) {
557       total_items++;
558     } else {
559       EXPECT_EQ(status, Status::ResourceExhausted());
560       break;
561     }
562   }
563 
564   // Run fast reader twice as fast as the slow reader.
565   size_t total_used_bytes = ring.TotalUsedBytes();
566   for (int i = 0; i < total_items; ++i) {
567     EXPECT_EQ(PeekFront<int>(fast_reader), i);
568     EXPECT_EQ(fast_reader.PopFront(), OkStatus());
569     EXPECT_EQ(ring.TotalUsedBytes(), total_used_bytes);
570     if (i % 2 == 0) {
571       EXPECT_EQ(PeekFront<int>(slow_reader), i / 2);
572       EXPECT_EQ(slow_reader.PopFront(), OkStatus());
573       EXPECT_TRUE(ring.TotalUsedBytes() < total_used_bytes);
574     }
575     total_used_bytes = ring.TotalUsedBytes();
576   }
577   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
578   EXPECT_TRUE(ring.TotalUsedBytes() > 0u);
579 
580   // Fill the buffer again, expect that the fast reader
581   // only sees half the entries as the slow reader.
582   size_t max_items = total_items;
583   while (true) {
584     Status status = TryPushBack<int>(ring, total_items);
585     if (status.ok()) {
586       total_items++;
587     } else {
588       EXPECT_EQ(status, Status::ResourceExhausted());
589       break;
590     }
591   }
592   EXPECT_EQ(slow_reader.EntryCount(), max_items);
593   EXPECT_EQ(fast_reader.EntryCount(), total_items - max_items);
594 
595   for (int i = total_items - max_items; i < total_items; ++i) {
596     EXPECT_EQ(PeekFront<int>(slow_reader), i);
597     EXPECT_EQ(slow_reader.PopFront(), OkStatus());
598     if (static_cast<size_t>(i) >= max_items) {
599       EXPECT_EQ(PeekFront<int>(fast_reader), i);
600       EXPECT_EQ(fast_reader.PopFront(), OkStatus());
601     }
602   }
603   EXPECT_EQ(slow_reader.PopFront(), Status::OutOfRange());
604   EXPECT_EQ(fast_reader.PopFront(), Status::OutOfRange());
605   EXPECT_EQ(ring.TotalUsedBytes(), 0u);
606 }
607 
TEST(PrefixedEntryRingBufferMulti,PushBack)608 TEST(PrefixedEntryRingBufferMulti, PushBack) {
609   PrefixedEntryRingBufferMulti ring;
610   byte test_buffer[kTestBufferSize];
611   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
612 
613   PrefixedEntryRingBufferMulti::Reader fast_reader;
614   PrefixedEntryRingBufferMulti::Reader slow_reader;
615 
616   EXPECT_EQ(ring.AttachReader(fast_reader), OkStatus());
617   EXPECT_EQ(ring.AttachReader(slow_reader), OkStatus());
618 
619   // Fill up the ring buffer with an increasing count.
620   size_t total_items = 0;
621   while (true) {
622     Status status = TryPushBack<uint32_t>(ring, total_items);
623     if (status.ok()) {
624       total_items++;
625     } else {
626       EXPECT_EQ(status, Status::ResourceExhausted());
627       break;
628     }
629   }
630   EXPECT_EQ(slow_reader.EntryCount(), total_items);
631 
632   // The following test:
633   //  - Moves the fast reader forward by one entry.
634   //  - Writes a single entry that is guaranteed to be larger than the size of a
635   //    single entry in the buffer (uint64_t entry > uint32_t entry).
636   //  - Checks to see that both readers were moved forward.
637   EXPECT_EQ(fast_reader.PopFront(), OkStatus());
638   EXPECT_EQ(PushBack<uint64_t>(ring, 5u), OkStatus());
639   // The readers have moved past values 0 and 1.
640   EXPECT_EQ(PeekFront<uint32_t>(slow_reader), 2u);
641   EXPECT_EQ(PeekFront<uint32_t>(fast_reader), 2u);
642   // The readers have lost two entries, but gained an entry.
643   EXPECT_EQ(slow_reader.EntryCount(), total_items - 1);
644   EXPECT_EQ(fast_reader.EntryCount(), total_items - 1);
645 }
646 
TEST(PrefixedEntryRingBufferMulti,ReaderAddRemove)647 TEST(PrefixedEntryRingBufferMulti, ReaderAddRemove) {
648   PrefixedEntryRingBufferMulti ring;
649   byte test_buffer[kTestBufferSize];
650   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
651 
652   PrefixedEntryRingBufferMulti::Reader reader;
653   PrefixedEntryRingBufferMulti::Reader transient_reader;
654 
655   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
656 
657   // Fill up the ring buffer with a constant value.
658   size_t total_items = 0;
659   while (true) {
660     Status status = TryPushBack<size_t>(ring, total_items);
661     if (status.ok()) {
662       total_items++;
663     } else {
664       EXPECT_EQ(status, Status::ResourceExhausted());
665       break;
666     }
667   }
668   EXPECT_EQ(reader.EntryCount(), total_items);
669 
670   // Add new reader after filling the buffer.
671   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
672   EXPECT_EQ(transient_reader.EntryCount(), total_items);
673 
674   // Confirm that the transient reader observes all values, even though it was
675   // attached after entries were pushed.
676   for (size_t i = 0; i < total_items; i++) {
677     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
678     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
679   }
680   EXPECT_EQ(transient_reader.EntryCount(), 0u);
681 
682   // Confirm that re-attaching the reader resets it back to the oldest
683   // available entry.
684   EXPECT_EQ(ring.DetachReader(transient_reader), OkStatus());
685   EXPECT_EQ(ring.AttachReader(transient_reader), OkStatus());
686   EXPECT_EQ(transient_reader.EntryCount(), total_items);
687 
688   for (size_t i = 0; i < total_items; i++) {
689     EXPECT_EQ(PeekFront<size_t>(transient_reader), i);
690     EXPECT_EQ(transient_reader.PopFront(), OkStatus());
691   }
692   EXPECT_EQ(transient_reader.EntryCount(), 0u);
693 }
694 
TEST(PrefixedEntryRingBufferMulti,SingleBufferPerReader)695 TEST(PrefixedEntryRingBufferMulti, SingleBufferPerReader) {
696   PrefixedEntryRingBufferMulti ring_one;
697   PrefixedEntryRingBufferMulti ring_two;
698   byte test_buffer[kTestBufferSize];
699   EXPECT_EQ(ring_one.SetBuffer(test_buffer), OkStatus());
700 
701   PrefixedEntryRingBufferMulti::Reader reader;
702   EXPECT_EQ(ring_one.AttachReader(reader), OkStatus());
703   EXPECT_EQ(ring_two.AttachReader(reader), Status::InvalidArgument());
704 
705   EXPECT_EQ(ring_one.DetachReader(reader), OkStatus());
706   EXPECT_EQ(ring_two.AttachReader(reader), OkStatus());
707   EXPECT_EQ(ring_one.AttachReader(reader), Status::InvalidArgument());
708 }
709 
TEST(PrefixedEntryRingBufferMulti,IteratorEmptyBuffer)710 TEST(PrefixedEntryRingBufferMulti, IteratorEmptyBuffer) {
711   PrefixedEntryRingBufferMulti ring;
712   // Pick a buffer that can't contain any valid sections.
713   byte test_buffer[1] = {std::byte(0xFF)};
714 
715   PrefixedEntryRingBufferMulti::Reader reader;
716   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
717   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
718 
719   EXPECT_EQ(ring.begin(), ring.end());
720 }
721 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntries)722 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntries) {
723   PrefixedEntryRingBufferMulti ring;
724   byte test_buffer[kTestBufferSize];
725   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
726 
727   PrefixedEntryRingBufferMulti::Reader reader;
728   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
729 
730   // Buffer only contains valid entries. This happens after populating
731   // the buffer and no entries have been read.
732   // E.g. [VALID|VALID|VALID|INVALID]
733 
734   // Fill up the ring buffer with a constant value.
735   size_t entry_count = 0;
736   while (TryPushBack<size_t>(ring, entry_count).ok()) {
737     entry_count++;
738   }
739 
740   // Iterate over all entries and confirm entry count.
741   size_t validated_entries = 0;
742   for (const Entry& entry_info : ring) {
743     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
744     validated_entries++;
745   }
746   EXPECT_EQ(validated_entries, entry_count);
747 }
748 
TEST(PrefixedEntryRingBufferMulti,IteratorValidEntriesWithPreamble)749 TEST(PrefixedEntryRingBufferMulti, IteratorValidEntriesWithPreamble) {
750   PrefixedEntryRingBufferMulti ring(true);
751   byte test_buffer[kTestBufferSize];
752   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
753 
754   PrefixedEntryRingBufferMulti::Reader reader;
755   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
756 
757   // Buffer only contains valid entries. This happens after populating
758   // the buffer and no entries have been read.
759   // E.g. [VALID|VALID|VALID|INVALID]
760 
761   // Fill up the ring buffer with a constant value.
762   size_t entry_count = 0;
763   while (TryPushBack<size_t>(ring, entry_count, entry_count).ok()) {
764     entry_count++;
765   }
766 
767   // Iterate over all entries and confirm entry count.
768   size_t validated_entries = 0;
769   for (const Entry& entry_info : ring) {
770     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
771     EXPECT_EQ(entry_info.preamble, validated_entries);
772     validated_entries++;
773   }
774   EXPECT_EQ(validated_entries, entry_count);
775 }
776 
TEST(PrefixedEntryRingBufferMulti,IteratorStaleEntries)777 TEST(PrefixedEntryRingBufferMulti, IteratorStaleEntries) {
778   PrefixedEntryRingBufferMulti ring;
779   byte test_buffer[kTestBufferSize];
780   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
781 
782   // Buffer only contains stale, valid entries. This happens when after
783   // populating the buffer, all entries are read. The buffer retains the
784   // data but has an entry count of zero.
785   // E.g. [STALE|STALE|STALE]
786   PrefixedEntryRingBufferMulti::Reader trailing_reader;
787   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
788 
789   PrefixedEntryRingBufferMulti::Reader reader;
790   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
791 
792   // Push and pop all the entries.
793   size_t entry_count = 0;
794   while (TryPushBack<size_t>(ring, entry_count).ok()) {
795     entry_count++;
796   }
797 
798   while (reader.PopFront().ok()) {
799   }
800 
801   // Iterate over all entries and confirm entry count.
802   size_t validated_entries = 0;
803   for (const Entry& entry_info : ring) {
804     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
805     validated_entries++;
806   }
807   EXPECT_EQ(validated_entries, entry_count);
808 }
809 
TEST(PrefixedEntryRingBufferMulti,IteratorValidStaleEntries)810 TEST(PrefixedEntryRingBufferMulti, IteratorValidStaleEntries) {
811   PrefixedEntryRingBufferMulti ring;
812   byte test_buffer[kTestBufferSize];
813   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
814 
815   // Buffer contains both valid and stale entries. This happens when after
816   // populating the buffer, only some of the entries are read.
817   // E.g. [VALID|INVALID|STALE|STALE]
818   PrefixedEntryRingBufferMulti::Reader trailing_reader;
819   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
820 
821   PrefixedEntryRingBufferMulti::Reader reader;
822   EXPECT_EQ(ring.AttachReader(reader), OkStatus());
823 
824   // Fill the buffer with entries.
825   size_t entry_count = 0;
826   while (TryPushBack<size_t>(ring, entry_count).ok()) {
827     entry_count++;
828   }
829 
830   // Pop roughly half the entries.
831   while (reader.EntryCount() > (entry_count / 2)) {
832     EXPECT_TRUE(reader.PopFront().ok());
833   }
834 
835   // Iterate over all entries and confirm entry count.
836   size_t validated_entries = 0;
837   for (const Entry& entry_info : ring) {
838     EXPECT_EQ(GetEntry<size_t>(entry_info.buffer), validated_entries);
839     validated_entries++;
840   }
841   EXPECT_EQ(validated_entries, entry_count);
842 }
843 
TEST(PrefixedEntryRingBufferMulti,IteratorBufferCorruption)844 TEST(PrefixedEntryRingBufferMulti, IteratorBufferCorruption) {
845   PrefixedEntryRingBufferMulti ring;
846   byte test_buffer[kTestBufferSize];
847   EXPECT_EQ(ring.SetBuffer(test_buffer), OkStatus());
848 
849   // Buffer contains partially written entries. This may happen if writing
850   // is pre-empted (e.g. a crash occurs). In this state, we expect a series
851   // of valid entries followed by an invalid entry.
852   PrefixedEntryRingBufferMulti::Reader trailing_reader;
853   EXPECT_EQ(ring.AttachReader(trailing_reader), OkStatus());
854 
855   // Add one entry to capture the second entry index.
856   size_t entry_count = 0;
857   EXPECT_TRUE(TryPushBack<size_t>(ring, entry_count++).ok());
858   size_t entry_size = ring.TotalUsedBytes();
859 
860   // Fill the buffer with entries.
861   while (TryPushBack<size_t>(ring, entry_count++).ok()) {
862   }
863 
864   // Push another entry to move the write index forward and force the oldest
865   // reader forward. This will require the iterator to dering.
866   EXPECT_TRUE(PushBack<size_t>(ring, 0).ok());
867   EXPECT_TRUE(ring.CheckForCorruption().ok());
868 
869   // The first entry is overwritten. Corrupt all data past the fifth entry.
870   // Note that because the first entry has shifted, the entry_count recorded
871   // in each entry is shifted by 1.
872   constexpr size_t valid_entries = 5;
873   size_t offset = valid_entries * entry_size;
874   memset(test_buffer + offset, 0xFF, kTestBufferSize - offset);
875   EXPECT_FALSE(ring.CheckForCorruption().ok());
876 
877   // Iterate over all entries and confirm entry count.
878   size_t validated_entries = 0;
879   iterator it = ring.begin();
880   for (; it != ring.end(); it++) {
881     EXPECT_EQ(GetEntry<size_t>(it->buffer), validated_entries + 1);
882     validated_entries++;
883   }
884   // The final entry will fail to be read.
885   EXPECT_EQ(it.status(), Status::DataLoss());
886   EXPECT_EQ(validated_entries, valid_entries);
887 }
888 
889 }  // namespace
890 }  // namespace ring_buffer
891 }  // namespace pw
892