• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 #ifndef ANDROID_DVR_BROADCAST_RING_H_
2 #define ANDROID_DVR_BROADCAST_RING_H_
3 
4 #include <inttypes.h>
5 #include <stddef.h>
6 #include <stdio.h>
7 #include <atomic>
8 #include <limits>
9 #include <tuple>
10 #include <type_traits>
11 #include <utility>
12 
13 #include "android-base/logging.h"
14 
15 #if ATOMIC_LONG_LOCK_FREE != 2 || ATOMIC_INT_LOCK_FREE != 2
16 #error "This file requires lock free atomic uint32_t and long"
17 #endif
18 
19 namespace android {
20 namespace dvr {
21 
22 struct DefaultRingTraits {
23   // Set this to false to allow compatibly expanding the record size.
24   static constexpr bool kUseStaticRecordSize = false;
25 
26   // Set this to a nonzero value to fix the number of records in the ring.
27   static constexpr uint32_t kStaticRecordCount = 0;
28 
29   // Set this to the max number of records that can be written simultaneously.
30   static constexpr uint32_t kMaxReservedRecords = 1;
31 
32   // Set this to the min number of records that must be readable.
33   static constexpr uint32_t kMinAvailableRecords = 1;
34 };
35 
36 // Nonblocking ring suitable for concurrent single-writer, multi-reader access.
37 //
38 // Readers never block the writer and thus this is a nondeterministically lossy
39 // transport in the absence of external synchronization. Don't use this as a
40 // transport when deterministic behavior is required.
41 //
42 // Readers may have a read-only mapping; each reader's state is a single local
43 // sequence number.
44 //
45 // The implementation takes care to avoid data races on record access.
46 // Inconsistent data can only be returned if at least 2^32 records are written
47 // during the read-side critical section.
48 //
49 // In addition, both readers and the writer are careful to avoid accesses
50 // outside the bounds of the mmap area passed in during initialization even if
51 // there is a misbehaving or malicious task with write access to the mmap area.
52 //
53 // When dynamic record size is enabled, readers use the record size in the ring
54 // header when indexing the ring, so that it is possible to extend the record
55 // type without breaking the read-side ABI.
56 //
57 // Avoid calling Put() in a tight loop; there should be significantly more time
58 // between successive puts than it takes to read one record from memory to
59 // ensure Get() completes quickly. This requirement should not be difficult to
60 // achieve for most practical uses; 4kB puts at 10,000Hz is well below the
61 // scaling limit on current mobile chips.
62 //
63 // Example Writer Usage:
64 //
65 //   using Record = MyRecordType;
66 //   using Ring = BroadcastRing<Record>;
67 //
68 //   uint32_t record_count = kMyDesiredCount;
69 //   uint32_t ring_size = Ring::MemorySize(record_count);
70 //
71 //   size_t page_size = sysconf(_SC_PAGESIZE);
72 //   uint32_t mmap_size = (ring_size + (page_size - 1)) & ~(page_size - 1);
73 //
74 //   // Allocate & map via your preferred mechanism, e.g.
75 //   int fd = open("/dev/shm/ring_test", O_CREAT|O_RDWR|O_CLOEXEC, 0600);
76 //   CHECK(fd >= 0);
77 //   CHECK(!ftruncate(fd, ring_size));
78 //   void *mmap_base = mmap(nullptr, mmap_size, PROT_READ|PROT_WRITE,
79 //                          MAP_SHARED, fd, 0);
80 //   CHECK(mmap_base != MAP_FAILED);
81 //   close(fd);
82 //
83 //   Ring ring = Ring::Create(mmap_base, mmap_size, record_count);
84 //
85 //   while (!done)
86 //     ring.Put(BuildNextRecordBlocking());
87 //
88 //   CHECK(!munmap(mmap_base, mmap_size));
89 //
90 // Example Reader Usage:
91 //
92 //   using Record = MyRecordType;
93 //   using Ring = BroadcastRing<Record>;
94 //
95 //   // Map via your preferred mechanism, e.g.
96 //   int fd = open("/dev/shm/ring_test", O_RDONLY|O_CLOEXEC);
97 //   CHECK(fd >= 0);
98 //   struct stat st;
99 //   CHECK(!fstat(fd, &st));
100 //   size_t mmap_size = st.st_size;
101 //   void *mmap_base = mmap(nullptr, mmap_size, PROT_READ,
102 //                          MAP_SHARED, fd, 0);
103 //   CHECK(mmap_base != MAP_FAILED);
104 //   close(fd);
105 //
106 //   Ring ring;
107 //   bool import_ok;
108 //   std::tie(ring, import_ok) = Ring::Import(mmap_base, mmap_size);
109 //   CHECK(import_ok);
110 //
111 //   uint32_t sequence;
112 //
113 //   // Choose starting point (using "0" is unpredictable but not dangerous)
114 //   sequence = ring.GetOldestSequence();  // The oldest available
115 //   sequence = ring.GetNewestSequence();  // The newest available
116 //   sequence = ring.GetNextSequence();    // The next one produced
117 //
118 //   while (!done) {
119 //     Record record;
120 //
121 //     if (you_want_to_process_all_available_records) {
122 //       while (ring.Get(&sequence, &record)) {
123 //         ProcessRecord(sequence, record);
124 //         sequence++;
125 //       }
126 //     } else if (you_want_to_skip_to_the_newest_record) {
127 //       if (ring.GetNewest(&sequence, &record)) {
128 //         ProcessRecord(sequence, record);
129 //         sequence++;
130 //       }
131 //     }
132 //
133 //     DoSomethingExpensiveOrBlocking();
134 //   }
135 //
136 //   CHECK(!munmap(mmap_base, mmap_size));
137 //
138 template <typename RecordType, typename BaseTraits = DefaultRingTraits>
139 class BroadcastRing {
140  public:
141   using Record = RecordType;
142   struct Traits : public BaseTraits {
143     // Must have enough space for writers, plus enough space for readers.
144     static constexpr int kMinRecordCount =
145         BaseTraits::kMaxReservedRecords + BaseTraits::kMinAvailableRecords;
146 
147     // Count of zero means dynamic, non-zero means static.
148     static constexpr bool kUseStaticRecordCount =
149         (BaseTraits::kStaticRecordCount != 0);
150 
151     // If both record size and count are static then the overall size is too.
152     static constexpr bool kIsStaticSize =
153         BaseTraits::kUseStaticRecordSize && kUseStaticRecordCount;
154   };
155 
IsPowerOfTwo(uint32_t size)156   static constexpr bool IsPowerOfTwo(uint32_t size) {
157     return (size & (size - 1)) == 0;
158   }
159 
160   // Sanity check the options provided in Traits.
161   static_assert(Traits::kMinRecordCount >= 1, "Min record count too small");
162   static_assert(!Traits::kUseStaticRecordCount ||
163                     Traits::kStaticRecordCount >= Traits::kMinRecordCount,
164                 "Static record count is too small");
165   static_assert(!Traits::kStaticRecordCount ||
166                     IsPowerOfTwo(Traits::kStaticRecordCount),
167                 "Static record count is not a power of two");
168   static_assert(std::is_standard_layout<Record>::value,
169                 "Record type must be standard layout");
170 
BroadcastRing()171   BroadcastRing() {}
172 
173   // Creates a new ring at |mmap| with |record_count| records.
174   //
175   // There must be at least |MemorySize(record_count)| bytes of space already
176   // allocated at |mmap|. The ring does not take ownership.
Create(void * mmap,size_t mmap_size,uint32_t record_count)177   static BroadcastRing Create(void* mmap, size_t mmap_size,
178                               uint32_t record_count) {
179     BroadcastRing ring(mmap);
180     CHECK(ring.ValidateGeometry(mmap_size, sizeof(Record), record_count));
181     ring.InitializeHeader(sizeof(Record), record_count);
182     return ring;
183   }
184 
185   // Creates a new ring at |mmap|.
186   //
187   // There must be at least |MemorySize()| bytes of space already allocated at
188   // |mmap|. The ring does not take ownership.
Create(void * mmap,size_t mmap_size)189   static BroadcastRing Create(void* mmap, size_t mmap_size) {
190     return Create(mmap, mmap_size,
191                   Traits::kUseStaticRecordCount
192                       ? Traits::kStaticRecordCount
193                       : BroadcastRing::GetRecordCount(mmap_size));
194   }
195 
196   // Imports an existing ring at |mmap|.
197   //
198   // Import may fail if the ring parameters in the mmap header are not sensible.
199   // In this case the returned boolean is false; make sure to check this value.
Import(void * mmap,size_t mmap_size)200   static std::tuple<BroadcastRing, bool> Import(void* mmap, size_t mmap_size) {
201     BroadcastRing ring(mmap);
202     uint32_t record_size = 0;
203     uint32_t record_count = 0;
204     if (mmap_size >= sizeof(Header)) {
205       record_size = std::atomic_load_explicit(&ring.header_mmap()->record_size,
206                                               std::memory_order_relaxed);
207       record_count = std::atomic_load_explicit(
208           &ring.header_mmap()->record_count, std::memory_order_relaxed);
209     }
210     bool ok = ring.ValidateGeometry(mmap_size, record_size, record_count);
211     return std::make_tuple(ring, ok);
212   }
213 
~BroadcastRing()214   ~BroadcastRing() {}
215 
216   // Calculates the space necessary for a ring of size |record_count|.
217   //
218   // Use this function for dynamically sized rings.
MemorySize(uint32_t record_count)219   static constexpr size_t MemorySize(uint32_t record_count) {
220     return sizeof(Header) + sizeof(Record) * record_count;
221   }
222 
223   // Calculates the space necessary for a statically sized ring.
224   //
225   // Use this function for statically sized rings.
MemorySize()226   static constexpr size_t MemorySize() {
227     static_assert(
228         Traits::kUseStaticRecordCount,
229         "Wrong MemorySize() function called for dynamic record count");
230     return MemorySize(Traits::kStaticRecordCount);
231   }
232 
NextPowerOf2(uint32_t n)233   static uint32_t NextPowerOf2(uint32_t n) {
234     if (n == 0)
235       return 0;
236     n -= 1;
237     n |= n >> 16;
238     n |= n >> 8;
239     n |= n >> 4;
240     n |= n >> 2;
241     n |= n >> 1;
242     return n + 1;
243   }
244 
245   // Gets the biggest power of 2 record count that can fit into this mmap.
246   //
247   // The header size has been taken into account.
GetRecordCount(size_t mmap_size)248   static uint32_t GetRecordCount(size_t mmap_size) {
249     if (mmap_size <= sizeof(Header)) {
250       return 0;
251     }
252     uint32_t count =
253         static_cast<uint32_t>((mmap_size - sizeof(Header)) / sizeof(Record));
254     return IsPowerOfTwo(count) ? count : (NextPowerOf2(count) / 2);
255   }
256 
257   // Writes a record to the ring.
258   //
259   // The oldest record is overwritten unless the ring is not already full.
Put(const Record & record)260   void Put(const Record& record) {
261     const int kRecordCount = 1;
262     Reserve(kRecordCount);
263     Geometry geometry = GetGeometry();
264     PutRecordInternal(&record, record_mmap_writer(geometry.tail_index));
265     Publish(kRecordCount);
266   }
267 
268   // Gets sequence number of the oldest currently available record.
GetOldestSequence()269   uint32_t GetOldestSequence() const {
270     return std::atomic_load_explicit(&header_mmap()->head,
271                                      std::memory_order_relaxed);
272   }
273 
274   // Gets sequence number of the first future record.
275   //
276   // If the returned value is passed to Get() and there is no concurrent Put(),
277   // Get() will return false.
GetNextSequence()278   uint32_t GetNextSequence() const {
279     return std::atomic_load_explicit(&header_mmap()->tail,
280                                      std::memory_order_relaxed);
281   }
282 
283   // Gets sequence number of the newest currently available record.
GetNewestSequence()284   uint32_t GetNewestSequence() const { return GetNextSequence() - 1; }
285 
286   // Copies the oldest available record with sequence at least |*sequence| to
287   // |record|.
288   //
289   // Returns false if there is no recent enough record available.
290   //
291   // Updates |*sequence| with the sequence number of the record returned. To get
292   // the following record, increment this number by one.
293   //
294   // This function synchronizes with two other operations:
295   //
296   //    (1) Load-Acquire of |tail|
297   //
298   //        Together with the store-release in Publish(), this load-acquire
299   //        ensures each store to a record in PutRecordInternal() happens-before
300   //        any corresponding load in GetRecordInternal().
301   //
302   //        i.e. the stores for the records with sequence numbers < |tail| have
303   //        completed from our perspective
304   //
305   //    (2) Acquire Fence between record access & final load of |head|
306   //
307   //        Together with the release fence in Reserve(), this ensures that if
308   //        GetRecordInternal() loads a value stored in some execution of
309   //        PutRecordInternal(), then the store of |head| in the Reserve() that
310   //        preceeded it happens-before our final load of |head|.
311   //
312   //        i.e. if we read a record with sequence number >= |final_head| then
313   //        no later store to that record has completed from our perspective
Get(uint32_t * sequence,Record * record)314   bool Get(uint32_t* sequence /*inout*/, Record* record /*out*/) const {
315     for (;;) {
316       uint32_t tail = std::atomic_load_explicit(&header_mmap()->tail,
317                                                 std::memory_order_acquire);
318       uint32_t head = std::atomic_load_explicit(&header_mmap()->head,
319                                                 std::memory_order_relaxed);
320 
321       if (tail - head > record_count())
322         continue;  // Concurrent modification; re-try.
323 
324       if (*sequence - head > tail - head)
325         *sequence = head;  // Out of window, skip forward to first available.
326 
327       if (*sequence == tail) return false;  // No new records available.
328 
329       Geometry geometry =
330           CalculateGeometry(record_count(), record_size(), *sequence, tail);
331 
332       // Compute address explicitly in case record_size > sizeof(Record).
333       RecordStorage* record_storage = record_mmap_reader(geometry.head_index);
334 
335       GetRecordInternal(record_storage, record);
336 
337       // NB: It is not sufficient to change this to a load-acquire of |head|.
338       std::atomic_thread_fence(std::memory_order_acquire);
339 
340       uint32_t final_head = std::atomic_load_explicit(
341           &header_mmap()->head, std::memory_order_relaxed);
342 
343       if (final_head - head > *sequence - head)
344         continue;  // Concurrent modification; re-try.
345 
346       // Note: Combining the above 4 comparisons gives:
347       // 0 <= final_head - head <= sequence - head < tail - head <= record_count
348       //
349       // We can also write this as:
350       // head <=* final_head <=* sequence <* tail <=* head + record_count
351       //
352       // where <* orders by difference from head: x <* y if x - head < y - head.
353       // This agrees with the order of sequence updates during "put" operations.
354       return true;
355     }
356   }
357 
358   // Copies the newest available record with sequence at least |*sequence| to
359   // |record|.
360   //
361   // Returns false if there is no recent enough record available.
362   //
363   // Updates |*sequence| with the sequence number of the record returned. To get
364   // the following record, increment this number by one.
GetNewest(uint32_t * sequence,Record * record)365   bool GetNewest(uint32_t* sequence, Record* record) const {
366     uint32_t newest_sequence = GetNewestSequence();
367     if (*sequence == newest_sequence + 1) return false;
368     *sequence = newest_sequence;
369     return Get(sequence, record);
370   }
371 
372   // Returns true if this instance has been created or imported.
is_valid()373   bool is_valid() const { return !!data_.mmap; }
374 
record_count()375   uint32_t record_count() const { return record_count_internal(); }
record_size()376   uint32_t record_size() const { return record_size_internal(); }
mmap_alignment()377   static constexpr uint32_t mmap_alignment() { return alignof(Mmap); }
378 
379  private:
380   struct Header {
381     // Record size for reading out of the ring. Writers always write the full
382     // length; readers may need to read a prefix of each record.
383     std::atomic<uint32_t> record_size;
384 
385     // Number of records in the ring.
386     std::atomic<uint32_t> record_count;
387 
388     // Readable region is [head % record_count, tail % record_count).
389     //
390     // The region in [tail % record_count, head % record_count) was either never
391     // populated or is being updated.
392     //
393     // These are sequences numbers, not indexes - indexes should be computed
394     // with a modulus.
395     //
396     // To ensure consistency:
397     //
398     // (1) Writes advance |head| past any updated records before writing to
399     //     them, and advance |tail| after they are written.
400     // (2) Readers check |tail| before reading data and |head| after,
401     //     making sure to discard any data that was written to concurrently.
402     std::atomic<uint32_t> head;
403     std::atomic<uint32_t> tail;
404   };
405 
406   // Store using the standard word size.
407   using StorageType = long;  // NOLINT
408 
409   // Always require 8 byte alignment so that the same record sizes are legal on
410   // 32 and 64 bit builds.
411   static constexpr size_t kRecordAlignment = 8;
412   static_assert(kRecordAlignment % sizeof(StorageType) == 0,
413                 "Bad record alignment");
414 
415   struct RecordStorage {
416     // This is accessed with relaxed atomics to prevent data races on the
417     // contained data, which would be undefined behavior.
418     std::atomic<StorageType> data[sizeof(Record) / sizeof(StorageType)];
419   };
420 
421   static_assert(sizeof(StorageType) *
422                         std::extent<decltype(RecordStorage::data)>() ==
423                     sizeof(Record),
424                 "Record length must be a multiple of sizeof(StorageType)");
425 
426   struct Geometry {
427     // Static geometry.
428     uint32_t record_count;
429     uint32_t record_size;
430 
431     // Copy of atomic sequence counts.
432     uint32_t head;
433     uint32_t tail;
434 
435     // First index of readable region.
436     uint32_t head_index;
437 
438     // First index of writable region.
439     uint32_t tail_index;
440 
441     // Number of records in readable region.
442     uint32_t count;
443 
444     // Number of records in writable region.
445     uint32_t space;
446   };
447 
448   // Mmap area layout.
449   //
450   // Readers should not index directly into |records| as this is not valid when
451   // dynamic record sizes are used; use record_mmap_reader() instead.
452   struct Mmap {
453     Header header;
454     RecordStorage records[];
455   };
456 
457   static_assert(std::is_standard_layout<Mmap>::value,
458                 "Mmap must be standard layout");
459   static_assert(sizeof(std::atomic<uint32_t>) == sizeof(uint32_t),
460                 "Lockless atomics contain extra state");
461   static_assert(sizeof(std::atomic<StorageType>) == sizeof(StorageType),
462                 "Lockless atomics contain extra state");
463 
BroadcastRing(void * mmap)464   explicit BroadcastRing(void* mmap) {
465     CHECK_EQ(0U, reinterpret_cast<uintptr_t>(mmap) % alignof(Mmap));
466     data_.mmap = reinterpret_cast<Mmap*>(mmap);
467   }
468 
469   // Initializes the mmap area header for a new ring.
InitializeHeader(uint32_t record_size,uint32_t record_count)470   void InitializeHeader(uint32_t record_size, uint32_t record_count) {
471     constexpr uint32_t kInitialSequence = -256;  // Force an early wrap.
472     std::atomic_store_explicit(&header_mmap()->record_size, record_size,
473                                std::memory_order_relaxed);
474     std::atomic_store_explicit(&header_mmap()->record_count, record_count,
475                                std::memory_order_relaxed);
476     std::atomic_store_explicit(&header_mmap()->head, kInitialSequence,
477                                std::memory_order_relaxed);
478     std::atomic_store_explicit(&header_mmap()->tail, kInitialSequence,
479                                std::memory_order_relaxed);
480   }
481 
482   // Validates ring geometry.
483   //
484   // Ring geometry is validated carefully on import and then cached. This allows
485   // us to avoid out-of-range accesses even if the parameters in the header are
486   // later changed.
ValidateGeometry(size_t mmap_size,uint32_t header_record_size,uint32_t header_record_count)487   bool ValidateGeometry(size_t mmap_size, uint32_t header_record_size,
488                         uint32_t header_record_count) {
489     set_record_size(header_record_size);
490     set_record_count(header_record_count);
491 
492     if (record_size() != header_record_size) return false;
493     if (record_count() != header_record_count) return false;
494     if (record_count() < Traits::kMinRecordCount) return false;
495     if (record_size() < sizeof(Record)) return false;
496     if (record_size() % kRecordAlignment != 0) return false;
497     if (!IsPowerOfTwo(record_count())) return false;
498 
499     size_t memory_size = record_count() * record_size();
500     if (memory_size / record_size() != record_count()) return false;
501     if (memory_size + sizeof(Header) < memory_size) return false;
502     if (memory_size + sizeof(Header) > mmap_size) return false;
503 
504     return true;
505   }
506 
507   // Copies a record into the ring.
508   //
509   // This is done with relaxed atomics because otherwise it is racy according to
510   // the C++ memory model. This is very low overhead once optimized.
PutRecordInternal(const Record * in,RecordStorage * out)511   static inline void PutRecordInternal(const Record* in, RecordStorage* out) {
512     StorageType data[sizeof(Record) / sizeof(StorageType)];
513     memcpy(data, in, sizeof(*in));
514     for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) {
515       std::atomic_store_explicit(&out->data[i], data[i],
516                                  std::memory_order_relaxed);
517     }
518   }
519 
520   // Copies a record out of the ring.
521   //
522   // This is done with relaxed atomics because otherwise it is racy according to
523   // the C++ memory model. This is very low overhead once optimized.
GetRecordInternal(RecordStorage * in,Record * out)524   static inline void GetRecordInternal(RecordStorage* in, Record* out) {
525     StorageType data[sizeof(Record) / sizeof(StorageType)];
526     for (size_t i = 0; i < std::extent<decltype(data)>(); ++i) {
527       data[i] =
528           std::atomic_load_explicit(&in->data[i], std::memory_order_relaxed);
529     }
530     memcpy(out, &data, sizeof(*out));
531   }
532 
533   // Converts a record's sequence number into a storage index.
SequenceToIndex(uint32_t sequence,uint32_t record_count)534   static uint32_t SequenceToIndex(uint32_t sequence, uint32_t record_count) {
535     return sequence & (record_count - 1);
536   }
537 
538   // Computes readable & writable ranges from ring parameters.
CalculateGeometry(uint32_t record_count,uint32_t record_size,uint32_t head,uint32_t tail)539   static Geometry CalculateGeometry(uint32_t record_count, uint32_t record_size,
540                                     uint32_t head, uint32_t tail) {
541     Geometry geometry;
542     geometry.record_count = record_count;
543     geometry.record_size = record_size;
544     DCHECK_EQ(0U, geometry.record_size % kRecordAlignment);
545     geometry.head = head;
546     geometry.tail = tail;
547     geometry.head_index = SequenceToIndex(head, record_count);
548     geometry.tail_index = SequenceToIndex(tail, record_count);
549     geometry.count = geometry.tail - geometry.head;
550     DCHECK_LE(geometry.count, record_count);
551     geometry.space = geometry.record_count - geometry.count;
552     return geometry;
553   }
554 
555   // Gets the current ring readable & writable regions.
556   //
557   // This this is always safe from the writing thread since it is the only
558   // thread allowed to update the header.
GetGeometry()559   Geometry GetGeometry() const {
560     return CalculateGeometry(
561         record_count(), record_size(),
562         std::atomic_load_explicit(&header_mmap()->head,
563                                   std::memory_order_relaxed),
564         std::atomic_load_explicit(&header_mmap()->tail,
565                                   std::memory_order_relaxed));
566   }
567 
568   // Makes space for at least |reserve_count| records.
569   //
570   // There is nothing to prevent overwriting records that have concurrent
571   // readers. We do however ensure that this situation can be detected: the
572   // fence ensures the |head| update will be the first update seen by readers,
573   // and readers check this value after reading and discard data that may have
574   // been concurrently modified.
Reserve(uint32_t reserve_count)575   void Reserve(uint32_t reserve_count) {
576     Geometry geometry = GetGeometry();
577     DCHECK_LE(reserve_count, Traits::kMaxReservedRecords);
578     uint32_t needed =
579         (geometry.space >= reserve_count ? 0 : reserve_count - geometry.space);
580 
581     std::atomic_store_explicit(&header_mmap()->head, geometry.head + needed,
582                                std::memory_order_relaxed);
583 
584     // NB: It is not sufficient to change this to a store-release of |head|.
585     std::atomic_thread_fence(std::memory_order_release);
586   }
587 
588   // Makes |publish_count| records visible to readers.
589   //
590   // Space must have been reserved by a previous call to Reserve().
Publish(uint32_t publish_count)591   void Publish(uint32_t publish_count) {
592     Geometry geometry = GetGeometry();
593     DCHECK_LE(publish_count, geometry.space);
594     std::atomic_store_explicit(&header_mmap()->tail,
595                                geometry.tail + publish_count,
596                                std::memory_order_release);
597   }
598 
599   // Helpers to compute addresses in mmap area.
mmap()600   Mmap* mmap() const { return data_.mmap; }
header_mmap()601   Header* header_mmap() const { return &data_.mmap->header; }
record_mmap_writer(uint32_t index)602   RecordStorage* record_mmap_writer(uint32_t index) const {
603     DCHECK_EQ(sizeof(Record), record_size());
604     return &data_.mmap->records[index];
605   }
record_mmap_reader(uint32_t index)606   RecordStorage* record_mmap_reader(uint32_t index) const {
607     if (Traits::kUseStaticRecordSize) {
608       return &data_.mmap->records[index];
609     } else {
610       // Calculate the location of a record in the ring without assuming that
611       // sizeof(Record) == record_size.
612       return reinterpret_cast<RecordStorage*>(
613           reinterpret_cast<char*>(data_.mmap->records) + index * record_size());
614     }
615   }
616 
617   // The following horrifying template gunk enables us to store just the mmap
618   // base pointer for compile-time statically sized rings. Dynamically sized
619   // rings also store the validated copy of the record size & count.
620   //
621   // This boils down to: use a compile time constant if available, and otherwise
622   // load the value that was validated on import from a member variable.
623   template <typename T = Traits>
624   typename std::enable_if<T::kUseStaticRecordSize, uint32_t>::type
record_size_internal()625   record_size_internal() const {
626     return sizeof(Record);
627   }
628 
629   template <typename T = Traits>
630   typename std::enable_if<!T::kUseStaticRecordSize, uint32_t>::type
record_size_internal()631   record_size_internal() const {
632     return data_.record_size;
633   }
634 
635   template <typename T = Traits>
set_record_size(uint32_t)636   typename std::enable_if<T::kUseStaticRecordSize, void>::type set_record_size(
637       uint32_t /*record_size*/) {}
638 
639   template <typename T = Traits>
set_record_size(uint32_t record_size)640   typename std::enable_if<!T::kUseStaticRecordSize, void>::type set_record_size(
641       uint32_t record_size) {
642     data_.record_size = record_size;
643   }
644 
645   template <typename T = Traits>
646   typename std::enable_if<T::kUseStaticRecordCount, uint32_t>::type
record_count_internal()647   record_count_internal() const {
648     return Traits::kStaticRecordCount;
649   }
650 
651   template <typename T = Traits>
652   typename std::enable_if<!T::kUseStaticRecordCount, uint32_t>::type
record_count_internal()653   record_count_internal() const {
654     return data_.record_count;
655   }
656 
657   template <typename T = Traits>
658   typename std::enable_if<T::kUseStaticRecordCount, void>::type
set_record_count(uint32_t)659   set_record_count(uint32_t /*record_count*/) const {}
660 
661   template <typename T = Traits>
662   typename std::enable_if<!T::kUseStaticRecordCount, void>::type
set_record_count(uint32_t record_count)663   set_record_count(uint32_t record_count) {
664     data_.record_count = record_count;
665   }
666 
667   // Data we need to store for statically sized rings.
668   struct DataStaticSize {
669     Mmap* mmap = nullptr;
670   };
671 
672   // Data we need to store for dynamically sized rings.
673   struct DataDynamicSize {
674     Mmap* mmap = nullptr;
675 
676     // These are cached to make sure misbehaving writers cannot cause
677     // out-of-bounds memory accesses by updating the values in the mmap header.
678     uint32_t record_size = 0;
679     uint32_t record_count = 0;
680   };
681 
682   using DataStaticOrDynamic =
683       typename std::conditional<Traits::kIsStaticSize, DataStaticSize,
684                                 DataDynamicSize>::type;
685 
686   DataStaticOrDynamic data_;
687 };
688 
689 }  // namespace dvr
690 }  // namespace android
691 
692 #endif  // ANDROID_DVR_BROADCAST_RING_H_
693