• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2016 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <asm-generic/mman.h>
18 #include <gtest/gtest.h>
19 #include <atomic>
20 #include <cstdlib>
21 #include <sstream>
22 #include <thread>
23 #include <fmq/MessageQueue.h>
24 #include <fmq/EventFlag.h>
25 
26 enum EventFlagBits : uint32_t {
27     kFmqNotEmpty = 1 << 0,
28     kFmqNotFull = 1 << 1,
29 };
30 
31 typedef android::hardware::MessageQueue<uint8_t, android::hardware::kSynchronizedReadWrite>
32           MessageQueueSync;
33 typedef android::hardware::MessageQueue<uint8_t, android::hardware::kUnsynchronizedWrite>
34             MessageQueueUnsync;
35 
36 class SynchronizedReadWrites : public ::testing::Test {
37 protected:
TearDown()38     virtual void TearDown() {
39         delete mQueue;
40     }
41 
SetUp()42     virtual void SetUp() {
43         static constexpr size_t kNumElementsInQueue = 2048;
44         mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
45         ASSERT_NE(nullptr, mQueue);
46         ASSERT_TRUE(mQueue->isValid());
47         mNumMessagesMax = mQueue->getQuantumCount();
48         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
49     }
50 
51     MessageQueueSync* mQueue = nullptr;
52     size_t mNumMessagesMax = 0;
53 };
54 
55 class UnsynchronizedWrite : public ::testing::Test {
56 protected:
TearDown()57     virtual void TearDown() {
58         delete mQueue;
59     }
60 
SetUp()61     virtual void SetUp() {
62         static constexpr size_t kNumElementsInQueue = 2048;
63         mQueue = new (std::nothrow) MessageQueueUnsync(kNumElementsInQueue);
64         ASSERT_NE(nullptr, mQueue);
65         ASSERT_TRUE(mQueue->isValid());
66         mNumMessagesMax = mQueue->getQuantumCount();
67         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
68     }
69 
70     MessageQueueUnsync* mQueue = nullptr;
71     size_t mNumMessagesMax = 0;
72 };
73 
74 class BlockingReadWrites : public ::testing::Test {
75 protected:
TearDown()76     virtual void TearDown() {
77         delete mQueue;
78     }
SetUp()79     virtual void SetUp() {
80         static constexpr size_t kNumElementsInQueue = 2048;
81         mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue);
82         ASSERT_NE(nullptr, mQueue);
83         ASSERT_TRUE(mQueue->isValid());
84         mNumMessagesMax = mQueue->getQuantumCount();
85         ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
86         /*
87          * Initialize the EventFlag word to indicate Queue is not full.
88          */
89         std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
90     }
91 
92     MessageQueueSync* mQueue;
93     std::atomic<uint32_t> mFw;
94     size_t mNumMessagesMax = 0;
95 };
96 
97 class QueueSizeOdd : public ::testing::Test {
98 protected:
TearDown()99   virtual void TearDown() {
100       delete mQueue;
101   }
SetUp()102   virtual void SetUp() {
103       static constexpr size_t kNumElementsInQueue = 2049;
104       mQueue = new (std::nothrow) MessageQueueSync(kNumElementsInQueue,
105                                                    true /* configureEventFlagWord */);
106       ASSERT_NE(nullptr, mQueue);
107       ASSERT_TRUE(mQueue->isValid());
108       mNumMessagesMax = mQueue->getQuantumCount();
109       ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
110       auto evFlagWordPtr = mQueue->getEventFlagWord();
111       ASSERT_NE(nullptr, evFlagWordPtr);
112       /*
113        * Initialize the EventFlag word to indicate Queue is not full.
114        */
115       std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
116   }
117 
118   MessageQueueSync* mQueue;
119   size_t mNumMessagesMax = 0;
120 };
121 
122 class BadQueueConfig: public ::testing::Test {
123 };
124 
125 /*
126  * Utility function to initialize data to be written to the FMQ
127  */
initData(uint8_t * data,size_t count)128 inline void initData(uint8_t* data, size_t count) {
129     for (size_t i = 0; i < count; i++) {
130         data[i] = i & 0xFF;
131     }
132 }
133 
134 /*
135  * This thread will attempt to read and block. When wait returns
136  * it checks if the kFmqNotEmpty bit is actually set.
137  * If the read is succesful, it signals Wake to kFmqNotFull.
138  */
ReaderThreadBlocking(android::hardware::MessageQueue<uint8_t,android::hardware::kSynchronizedReadWrite> * fmq,std::atomic<uint32_t> * fwAddr)139 void ReaderThreadBlocking(
140         android::hardware::MessageQueue<uint8_t,
141         android::hardware::kSynchronizedReadWrite>* fmq,
142         std::atomic<uint32_t>* fwAddr) {
143     const size_t dataLen = 64;
144     uint8_t data[dataLen];
145     android::hardware::EventFlag* efGroup = nullptr;
146     android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
147     ASSERT_EQ(android::NO_ERROR, status);
148     ASSERT_NE(nullptr, efGroup);
149 
150     while (true) {
151         uint32_t efState = 0;
152         android::status_t ret = efGroup->wait(kFmqNotEmpty,
153                                               &efState,
154                                               5000000000 /* timeoutNanoSeconds */);
155         /*
156          * Wait should not time out here after 5s
157          */
158         ASSERT_NE(android::TIMED_OUT, ret);
159 
160         if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
161             efGroup->wake(kFmqNotFull);
162             break;
163         }
164     }
165 
166     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
167     ASSERT_EQ(android::NO_ERROR, status);
168 }
169 
170 /*
171  * This thread will attempt to read and block using the readBlocking() API and
172  * passes in a pointer to an EventFlag object.
173  */
ReaderThreadBlocking2(android::hardware::MessageQueue<uint8_t,android::hardware::kSynchronizedReadWrite> * fmq,std::atomic<uint32_t> * fwAddr)174 void ReaderThreadBlocking2(
175         android::hardware::MessageQueue<uint8_t,
176         android::hardware::kSynchronizedReadWrite>* fmq,
177         std::atomic<uint32_t>* fwAddr) {
178     const size_t dataLen = 64;
179     uint8_t data[dataLen];
180     android::hardware::EventFlag* efGroup = nullptr;
181     android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
182     ASSERT_EQ(android::NO_ERROR, status);
183     ASSERT_NE(nullptr, efGroup);
184     bool ret = fmq->readBlocking(data,
185                                  dataLen,
186                                  static_cast<uint32_t>(kFmqNotFull),
187                                  static_cast<uint32_t>(kFmqNotEmpty),
188                                  5000000000 /* timeOutNanos */,
189                                  efGroup);
190     ASSERT_TRUE(ret);
191     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
192     ASSERT_EQ(android::NO_ERROR, status);
193 }
194 
195 
TEST_F(BadQueueConfig,QueueSizeTooLarge)196 TEST_F(BadQueueConfig, QueueSizeTooLarge) {
197     typedef android::hardware::MessageQueue<uint16_t, android::hardware::kSynchronizedReadWrite>
198             MessageQueueSync16;
199     size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
200     MessageQueueSync16 * fmq = new (std::nothrow) MessageQueueSync16(numElementsInQueue);
201     ASSERT_NE(nullptr, fmq);
202     /*
203      * Should fail due to size being too large to fit into size_t.
204      */
205     ASSERT_FALSE(fmq->isValid());
206 }
207 
208 /*
209  * Test that basic blocking works. This test uses the non-blocking read()/write()
210  * APIs.
211  */
TEST_F(BlockingReadWrites,SmallInputTest1)212 TEST_F(BlockingReadWrites, SmallInputTest1) {
213     const size_t dataLen = 64;
214     uint8_t data[dataLen] = {0};
215 
216     android::hardware::EventFlag* efGroup = nullptr;
217     android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
218 
219     ASSERT_EQ(android::NO_ERROR, status);
220     ASSERT_NE(nullptr, efGroup);
221 
222     /*
223      * Start a thread that will try to read and block on kFmqNotEmpty.
224      */
225     std::thread Reader(ReaderThreadBlocking, mQueue, &mFw);
226     struct timespec waitTime = {0, 100 * 1000000};
227     ASSERT_EQ(0, nanosleep(&waitTime, NULL));
228 
229     /*
230      * After waiting for some time write into the FMQ
231      * and call Wake on kFmqNotEmpty.
232      */
233     ASSERT_TRUE(mQueue->write(data, dataLen));
234     status = efGroup->wake(kFmqNotEmpty);
235     ASSERT_EQ(android::NO_ERROR, status);
236 
237     ASSERT_EQ(0, nanosleep(&waitTime, NULL));
238     Reader.join();
239 
240     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
241     ASSERT_EQ(android::NO_ERROR, status);
242 }
243 
244 /*
245  * Test that basic blocking works. This test uses the
246  * writeBlocking()/readBlocking() APIs.
247  */
TEST_F(BlockingReadWrites,SmallInputTest2)248 TEST_F(BlockingReadWrites, SmallInputTest2) {
249     const size_t dataLen = 64;
250     uint8_t data[dataLen] = {0};
251 
252     android::hardware::EventFlag* efGroup = nullptr;
253     android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
254 
255     ASSERT_EQ(android::NO_ERROR, status);
256     ASSERT_NE(nullptr, efGroup);
257 
258     /*
259      * Start a thread that will try to read and block on kFmqNotEmpty. It will
260      * call wake() on kFmqNotFull when the read is successful.
261      */
262     std::thread Reader(ReaderThreadBlocking2, mQueue, &mFw);
263     bool ret = mQueue->writeBlocking(data,
264                                      dataLen,
265                                      static_cast<uint32_t>(kFmqNotFull),
266                                      static_cast<uint32_t>(kFmqNotEmpty),
267                                      5000000000 /* timeOutNanos */,
268                                      efGroup);
269     ASSERT_TRUE(ret);
270     Reader.join();
271 
272     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
273     ASSERT_EQ(android::NO_ERROR, status);
274 }
275 
276 /*
277  * Test that basic blocking times out as intended.
278  */
TEST_F(BlockingReadWrites,BlockingTimeOutTest)279 TEST_F(BlockingReadWrites, BlockingTimeOutTest) {
280     android::hardware::EventFlag* efGroup = nullptr;
281     android::status_t status = android::hardware::EventFlag::createEventFlag(&mFw, &efGroup);
282 
283     ASSERT_EQ(android::NO_ERROR, status);
284     ASSERT_NE(nullptr, efGroup);
285 
286     /* Block on an EventFlag bit that no one will wake and time out in 1s */
287     uint32_t efState = 0;
288     android::status_t ret = efGroup->wait(kFmqNotEmpty,
289                                           &efState,
290                                           1000000000 /* timeoutNanoSeconds */);
291     /*
292      * Wait should time out in a second.
293      */
294     EXPECT_EQ(android::TIMED_OUT, ret);
295 
296     status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
297     ASSERT_EQ(android::NO_ERROR, status);
298 }
299 
300 /*
301  * Test that odd queue sizes do not cause unaligned error
302  * on access to EventFlag object.
303  */
TEST_F(QueueSizeOdd,EventFlagTest)304 TEST_F(QueueSizeOdd, EventFlagTest) {
305     const size_t dataLen = 64;
306     uint8_t data[dataLen] = {0};
307 
308     bool ret = mQueue->writeBlocking(data,
309                                      dataLen,
310                                      static_cast<uint32_t>(kFmqNotFull),
311                                      static_cast<uint32_t>(kFmqNotEmpty),
312                                      5000000000 /* timeOutNanos */);
313     ASSERT_TRUE(ret);
314 }
315 
316 /*
317  * Verify that a few bytes of data can be successfully written and read.
318  */
TEST_F(SynchronizedReadWrites,SmallInputTest1)319 TEST_F(SynchronizedReadWrites, SmallInputTest1) {
320     const size_t dataLen = 16;
321     ASSERT_LE(dataLen, mNumMessagesMax);
322     uint8_t data[dataLen];
323 
324     initData(data, dataLen);
325 
326     ASSERT_TRUE(mQueue->write(data, dataLen));
327     uint8_t readData[dataLen] = {};
328     ASSERT_TRUE(mQueue->read(readData, dataLen));
329     ASSERT_EQ(0, memcmp(data, readData, dataLen));
330 }
331 
332 /*
333  * Verify that a few bytes of data can be successfully written and read using
334  * beginRead/beginWrite/CommitRead/CommitWrite
335  */
TEST_F(SynchronizedReadWrites,SmallInputTest2)336 TEST_F(SynchronizedReadWrites, SmallInputTest2) {
337     const size_t dataLen = 16;
338     ASSERT_LE(dataLen, mNumMessagesMax);
339     uint8_t data[dataLen];
340 
341     initData(data, dataLen);
342 
343     MessageQueueSync::MemTransaction tx;
344     ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
345 
346     ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
347 
348     ASSERT_TRUE(mQueue->commitWrite(dataLen));
349 
350     uint8_t readData[dataLen] = {};
351 
352     ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
353 
354     ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
355 
356     ASSERT_TRUE(mQueue->commitRead(dataLen));
357 
358     ASSERT_EQ(0, memcmp(data, readData, dataLen));
359 }
360 
361 /*
362  * Verify that a few bytes of data can be successfully written and read using
363  * beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
364  */
TEST_F(SynchronizedReadWrites,SmallInputTest3)365 TEST_F(SynchronizedReadWrites, SmallInputTest3) {
366     const size_t dataLen = 16;
367     ASSERT_LE(dataLen, mNumMessagesMax);
368     uint8_t data[dataLen];
369 
370     initData(data, dataLen);
371     MessageQueueSync::MemTransaction tx;
372     ASSERT_TRUE(mQueue->beginWrite(dataLen, &tx));
373 
374     auto first = tx.getFirstRegion();
375     auto second = tx.getSecondRegion();
376 
377     ASSERT_EQ(first.getLength() + second.getLength(),  dataLen);
378     for (size_t i = 0; i < dataLen; i++) {
379         uint8_t* ptr = tx.getSlot(i);
380         *ptr = data[i];
381     }
382 
383     ASSERT_TRUE(mQueue->commitWrite(dataLen));
384 
385     uint8_t readData[dataLen] = {};
386 
387     ASSERT_TRUE(mQueue->beginRead(dataLen, &tx));
388 
389     first = tx.getFirstRegion();
390     second = tx.getSecondRegion();
391 
392     ASSERT_EQ(first.getLength() + second.getLength(),  dataLen);
393 
394     for (size_t i = 0; i < dataLen; i++) {
395         uint8_t* ptr = tx.getSlot(i);
396         readData[i] = *ptr;
397     }
398 
399     ASSERT_TRUE(mQueue->commitRead(dataLen));
400 
401     ASSERT_EQ(0, memcmp(data, readData, dataLen));
402 }
403 
404 /*
405  * Verify that read() returns false when trying to read from an empty queue.
406  */
TEST_F(SynchronizedReadWrites,ReadWhenEmpty1)407 TEST_F(SynchronizedReadWrites, ReadWhenEmpty1) {
408     ASSERT_EQ(0UL, mQueue->availableToRead());
409     const size_t dataLen = 2;
410     ASSERT_LE(dataLen, mNumMessagesMax);
411     uint8_t readData[dataLen];
412     ASSERT_FALSE(mQueue->read(readData, dataLen));
413 }
414 
415 /*
416  * Verify that beginRead() returns a MemTransaction object with null pointers when trying
417  * to read from an empty queue.
418  */
TEST_F(SynchronizedReadWrites,ReadWhenEmpty2)419 TEST_F(SynchronizedReadWrites, ReadWhenEmpty2) {
420     ASSERT_EQ(0UL, mQueue->availableToRead());
421     const size_t dataLen = 2;
422     ASSERT_LE(dataLen, mNumMessagesMax);
423 
424     MessageQueueSync::MemTransaction tx;
425     ASSERT_FALSE(mQueue->beginRead(dataLen, &tx));
426 
427     auto first = tx.getFirstRegion();
428     auto second = tx.getSecondRegion();
429 
430     ASSERT_EQ(nullptr, first.getAddress());
431     ASSERT_EQ(nullptr, second.getAddress());
432 }
433 
434 /*
435  * Write the queue until full. Verify that another write is unsuccessful.
436  * Verify that availableToWrite() returns 0 as expected.
437  */
TEST_F(SynchronizedReadWrites,WriteWhenFull1)438 TEST_F(SynchronizedReadWrites, WriteWhenFull1) {
439     ASSERT_EQ(0UL, mQueue->availableToRead());
440     std::vector<uint8_t> data(mNumMessagesMax);
441 
442     initData(&data[0], mNumMessagesMax);
443     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
444     ASSERT_EQ(0UL, mQueue->availableToWrite());
445     ASSERT_FALSE(mQueue->write(&data[0], 1));
446 
447     std::vector<uint8_t> readData(mNumMessagesMax);
448     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
449     ASSERT_EQ(data, readData);
450 }
451 
452 /*
453  * Write the queue until full. Verify that beginWrite() returns
454  * a MemTransaction object with null base pointers.
455  */
TEST_F(SynchronizedReadWrites,WriteWhenFull2)456 TEST_F(SynchronizedReadWrites, WriteWhenFull2) {
457     ASSERT_EQ(0UL, mQueue->availableToRead());
458     std::vector<uint8_t> data(mNumMessagesMax);
459 
460     initData(&data[0], mNumMessagesMax);
461     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
462     ASSERT_EQ(0UL, mQueue->availableToWrite());
463 
464     MessageQueueSync::MemTransaction tx;
465     ASSERT_FALSE(mQueue->beginWrite(1, &tx));
466 
467     auto first = tx.getFirstRegion();
468     auto second = tx.getSecondRegion();
469 
470     ASSERT_EQ(nullptr, first.getAddress());
471     ASSERT_EQ(nullptr, second.getAddress());
472 }
473 
474 /*
475  * Write a chunk of data equal to the queue size.
476  * Verify that the write is successful and the subsequent read
477  * returns the expected data.
478  */
TEST_F(SynchronizedReadWrites,LargeInputTest1)479 TEST_F(SynchronizedReadWrites, LargeInputTest1) {
480     std::vector<uint8_t> data(mNumMessagesMax);
481     initData(&data[0], mNumMessagesMax);
482     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
483     std::vector<uint8_t> readData(mNumMessagesMax);
484     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
485     ASSERT_EQ(data, readData);
486 }
487 
488 /*
489  * Attempt to write a chunk of data larger than the queue size.
490  * Verify that it fails. Verify that a subsequent read fails and
491  * the queue is still empty.
492  */
TEST_F(SynchronizedReadWrites,LargeInputTest2)493 TEST_F(SynchronizedReadWrites, LargeInputTest2) {
494     ASSERT_EQ(0UL, mQueue->availableToRead());
495     const size_t dataLen = 4096;
496     ASSERT_GT(dataLen, mNumMessagesMax);
497     std::vector<uint8_t> data(dataLen);
498 
499     initData(&data[0], dataLen);
500     ASSERT_FALSE(mQueue->write(&data[0], dataLen));
501     std::vector<uint8_t> readData(mNumMessagesMax);
502     ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
503     ASSERT_NE(data, readData);
504     ASSERT_EQ(0UL, mQueue->availableToRead());
505 }
506 
507 /*
508  * After the queue is full, try to write more data. Verify that
509  * the attempt returns false. Verify that the attempt did not
510  * affect the pre-existing data in the queue.
511  */
TEST_F(SynchronizedReadWrites,LargeInputTest3)512 TEST_F(SynchronizedReadWrites, LargeInputTest3) {
513     std::vector<uint8_t> data(mNumMessagesMax);
514     initData(&data[0], mNumMessagesMax);
515     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
516     ASSERT_FALSE(mQueue->write(&data[0], 1));
517     std::vector<uint8_t> readData(mNumMessagesMax);
518     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
519     ASSERT_EQ(data, readData);
520 }
521 
522 /*
523  * Verify that beginWrite() returns a MemTransaction with
524  * null base pointers when attempting to write data larger
525  * than the queue size.
526  */
TEST_F(SynchronizedReadWrites,LargeInputTest4)527 TEST_F(SynchronizedReadWrites, LargeInputTest4) {
528     ASSERT_EQ(0UL, mQueue->availableToRead());
529     const size_t dataLen = 4096;
530     ASSERT_GT(dataLen, mNumMessagesMax);
531 
532     MessageQueueSync::MemTransaction tx;
533     ASSERT_FALSE(mQueue->beginWrite(dataLen, &tx));
534 
535     auto first = tx.getFirstRegion();
536     auto second = tx.getSecondRegion();
537 
538     ASSERT_EQ(nullptr, first.getAddress());
539     ASSERT_EQ(nullptr, second.getAddress());
540 }
541 
542 /*
543  * Verify that multiple reads one after the other return expected data.
544  */
TEST_F(SynchronizedReadWrites,MultipleRead)545 TEST_F(SynchronizedReadWrites, MultipleRead) {
546     const size_t chunkSize = 100;
547     const size_t chunkNum = 5;
548     const size_t dataLen = chunkSize * chunkNum;
549     ASSERT_LE(dataLen, mNumMessagesMax);
550     uint8_t data[dataLen];
551 
552     initData(data, dataLen);
553     ASSERT_TRUE(mQueue->write(data, dataLen));
554     uint8_t readData[dataLen] = {};
555     for (size_t i = 0; i < chunkNum; i++) {
556         ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
557     }
558     ASSERT_EQ(0, memcmp(readData, data, dataLen));
559 }
560 
561 /*
562  * Verify that multiple writes one after the other happens correctly.
563  */
TEST_F(SynchronizedReadWrites,MultipleWrite)564 TEST_F(SynchronizedReadWrites, MultipleWrite) {
565     const int chunkSize = 100;
566     const int chunkNum = 5;
567     const size_t dataLen = chunkSize * chunkNum;
568     ASSERT_LE(dataLen, mNumMessagesMax);
569     uint8_t data[dataLen];
570 
571     initData(data, dataLen);
572     for (unsigned int i = 0; i < chunkNum; i++) {
573         ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
574     }
575     uint8_t readData[dataLen] = {};
576     ASSERT_TRUE(mQueue->read(readData, dataLen));
577     ASSERT_EQ(0, memcmp(readData, data, dataLen));
578 }
579 
580 /*
581  * Write enough messages into the FMQ to fill half of it
582  * and read back the same.
583  * Write mNumMessagesMax messages into the queue. This will cause a
584  * wrap around. Read and verify the data.
585  */
TEST_F(SynchronizedReadWrites,ReadWriteWrapAround1)586 TEST_F(SynchronizedReadWrites, ReadWriteWrapAround1) {
587     size_t numMessages = mNumMessagesMax - 1;
588     std::vector<uint8_t> data(mNumMessagesMax);
589     std::vector<uint8_t> readData(mNumMessagesMax);
590     initData(&data[0], mNumMessagesMax);
591     ASSERT_TRUE(mQueue->write(&data[0], numMessages));
592     ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
593     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
594     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
595     ASSERT_EQ(data, readData);
596 }
597 
598 /*
599  * Use beginRead/CommitRead/beginWrite/commitWrite APIs
600  * to test wrap arounds are handled correctly.
601  * Write enough messages into the FMQ to fill half of it
602  * and read back the same.
603  * Write mNumMessagesMax messages into the queue. This will cause a
604  * wrap around. Read and verify the data.
605  */
TEST_F(SynchronizedReadWrites,ReadWriteWrapAround2)606 TEST_F(SynchronizedReadWrites, ReadWriteWrapAround2) {
607     size_t dataLen = mNumMessagesMax - 1;
608     std::vector<uint8_t> data(mNumMessagesMax);
609     std::vector<uint8_t> readData(mNumMessagesMax);
610     initData(&data[0], mNumMessagesMax);
611     ASSERT_TRUE(mQueue->write(&data[0], dataLen));
612     ASSERT_TRUE(mQueue->read(&readData[0], dataLen));
613 
614     /*
615      * The next write and read will have to deal with with wrap arounds.
616      */
617     MessageQueueSync::MemTransaction tx;
618     ASSERT_TRUE(mQueue->beginWrite(mNumMessagesMax, &tx));
619 
620     auto first = tx.getFirstRegion();
621     auto second = tx.getSecondRegion();
622 
623     ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
624 
625     ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */,  mNumMessagesMax));
626 
627     ASSERT_TRUE(mQueue->commitWrite(mNumMessagesMax));
628 
629     ASSERT_TRUE(mQueue->beginRead(mNumMessagesMax, &tx));
630 
631     first = tx.getFirstRegion();
632     second = tx.getSecondRegion();
633 
634     ASSERT_EQ(first.getLength() + second.getLength(), mNumMessagesMax);
635 
636     ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, mNumMessagesMax));
637     ASSERT_TRUE(mQueue->commitRead(mNumMessagesMax));
638 
639     ASSERT_EQ(data, readData);
640 }
641 
642 /*
643  * Verify that a few bytes of data can be successfully written and read.
644  */
TEST_F(UnsynchronizedWrite,SmallInputTest1)645 TEST_F(UnsynchronizedWrite, SmallInputTest1) {
646     const size_t dataLen = 16;
647     ASSERT_LE(dataLen, mNumMessagesMax);
648     uint8_t data[dataLen];
649 
650     initData(data, dataLen);
651     ASSERT_TRUE(mQueue->write(data, dataLen));
652     uint8_t readData[dataLen] = {};
653     ASSERT_TRUE(mQueue->read(readData, dataLen));
654     ASSERT_EQ(0, memcmp(data, readData, dataLen));
655 }
656 
657 /*
658  * Verify that read() returns false when trying to read from an empty queue.
659  */
TEST_F(UnsynchronizedWrite,ReadWhenEmpty)660 TEST_F(UnsynchronizedWrite, ReadWhenEmpty) {
661     ASSERT_EQ(0UL, mQueue->availableToRead());
662     const size_t dataLen = 2;
663     ASSERT_TRUE(dataLen < mNumMessagesMax);
664     uint8_t readData[dataLen];
665     ASSERT_FALSE(mQueue->read(readData, dataLen));
666 }
667 
668 /*
669  * Write the queue when full. Verify that a subsequent writes is succesful.
670  * Verify that availableToWrite() returns 0 as expected.
671  */
TEST_F(UnsynchronizedWrite,WriteWhenFull1)672 TEST_F(UnsynchronizedWrite, WriteWhenFull1) {
673     ASSERT_EQ(0UL, mQueue->availableToRead());
674     std::vector<uint8_t> data(mNumMessagesMax);
675 
676     initData(&data[0], mNumMessagesMax);
677     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
678     ASSERT_EQ(0UL, mQueue->availableToWrite());
679     ASSERT_TRUE(mQueue->write(&data[0], 1));
680 
681     std::vector<uint8_t> readData(mNumMessagesMax);
682     ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
683 }
684 
685 /*
686  * Write the queue when full. Verify that a subsequent writes
687  * using beginRead()/commitRead() is succesful.
688  * Verify that the next read fails as expected for unsynchronized flavor.
689  */
TEST_F(UnsynchronizedWrite,WriteWhenFull2)690 TEST_F(UnsynchronizedWrite, WriteWhenFull2) {
691     ASSERT_EQ(0UL, mQueue->availableToRead());
692     std::vector<uint8_t> data(mNumMessagesMax);
693     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
694 
695     MessageQueueUnsync::MemTransaction tx;
696     ASSERT_TRUE(mQueue->beginWrite(1, &tx));
697 
698     ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
699 
700     ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
701 
702     ASSERT_TRUE(mQueue->commitWrite(1));
703 
704     std::vector<uint8_t> readData(mNumMessagesMax);
705     ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
706 }
707 
708 /*
709  * Write a chunk of data equal to the queue size.
710  * Verify that the write is successful and the subsequent read
711  * returns the expected data.
712  */
TEST_F(UnsynchronizedWrite,LargeInputTest1)713 TEST_F(UnsynchronizedWrite, LargeInputTest1) {
714     std::vector<uint8_t> data(mNumMessagesMax);
715     initData(&data[0], mNumMessagesMax);
716     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
717     std::vector<uint8_t> readData(mNumMessagesMax);
718     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
719     ASSERT_EQ(data, readData);
720 }
721 
722 /*
723  * Attempt to write a chunk of data larger than the queue size.
724  * Verify that it fails. Verify that a subsequent read fails and
725  * the queue is still empty.
726  */
TEST_F(UnsynchronizedWrite,LargeInputTest2)727 TEST_F(UnsynchronizedWrite, LargeInputTest2) {
728     ASSERT_EQ(0UL, mQueue->availableToRead());
729     const size_t dataLen = 4096;
730     ASSERT_GT(dataLen, mNumMessagesMax);
731     std::vector<uint8_t> data(dataLen);
732     initData(&data[0], dataLen);
733     ASSERT_FALSE(mQueue->write(&data[0], dataLen));
734     std::vector<uint8_t> readData(mNumMessagesMax);
735     ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
736     ASSERT_NE(data, readData);
737     ASSERT_EQ(0UL, mQueue->availableToRead());
738 }
739 
740 /*
741  * After the queue is full, try to write more data. Verify that
742  * the attempt is succesful. Verify that the read fails
743  * as expected.
744  */
TEST_F(UnsynchronizedWrite,LargeInputTest3)745 TEST_F(UnsynchronizedWrite, LargeInputTest3) {
746     std::vector<uint8_t> data(mNumMessagesMax);
747     initData(&data[0], mNumMessagesMax);
748     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
749     ASSERT_TRUE(mQueue->write(&data[0], 1));
750     std::vector<uint8_t> readData(mNumMessagesMax);
751     ASSERT_FALSE(mQueue->read(&readData[0], mNumMessagesMax));
752 }
753 
754 /*
755  * Verify that multiple reads one after the other return expected data.
756  */
TEST_F(UnsynchronizedWrite,MultipleRead)757 TEST_F(UnsynchronizedWrite, MultipleRead) {
758     const size_t chunkSize = 100;
759     const size_t chunkNum = 5;
760     const size_t dataLen = chunkSize * chunkNum;
761     ASSERT_LE(dataLen, mNumMessagesMax);
762     uint8_t data[dataLen];
763     initData(data, dataLen);
764     ASSERT_TRUE(mQueue->write(data, dataLen));
765     uint8_t readData[dataLen] = {};
766     for (size_t i = 0; i < chunkNum; i++) {
767         ASSERT_TRUE(mQueue->read(readData + i * chunkSize, chunkSize));
768     }
769     ASSERT_EQ(0, memcmp(readData, data, dataLen));
770 }
771 
772 /*
773  * Verify that multiple writes one after the other happens correctly.
774  */
TEST_F(UnsynchronizedWrite,MultipleWrite)775 TEST_F(UnsynchronizedWrite, MultipleWrite) {
776     const size_t chunkSize = 100;
777     const size_t chunkNum = 5;
778     const size_t dataLen = chunkSize * chunkNum;
779     ASSERT_LE(dataLen, mNumMessagesMax);
780     uint8_t data[dataLen];
781 
782     initData(data, dataLen);
783     for (size_t i = 0; i < chunkNum; i++) {
784         ASSERT_TRUE(mQueue->write(data + i * chunkSize, chunkSize));
785     }
786 
787     uint8_t readData[dataLen] = {};
788     ASSERT_TRUE(mQueue->read(readData, dataLen));
789     ASSERT_EQ(0, memcmp(readData, data, dataLen));
790 }
791 
792 /*
793  * Write enough messages into the FMQ to fill half of it
794  * and read back the same.
795  * Write mNumMessagesMax messages into the queue. This will cause a
796  * wrap around. Read and verify the data.
797  */
TEST_F(UnsynchronizedWrite,ReadWriteWrapAround)798 TEST_F(UnsynchronizedWrite, ReadWriteWrapAround) {
799     size_t numMessages = mNumMessagesMax - 1;
800     std::vector<uint8_t> data(mNumMessagesMax);
801     std::vector<uint8_t> readData(mNumMessagesMax);
802 
803     initData(&data[0], mNumMessagesMax);
804     ASSERT_TRUE(mQueue->write(&data[0], numMessages));
805     ASSERT_TRUE(mQueue->read(&readData[0], numMessages));
806     ASSERT_TRUE(mQueue->write(&data[0], mNumMessagesMax));
807     ASSERT_TRUE(mQueue->read(&readData[0], mNumMessagesMax));
808     ASSERT_EQ(data, readData);
809 }
810