1 /*
2 * Copyright (C) 2016 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <android-base/logging.h>
18 #include <asm-generic/mman.h>
19 #include <fmq/AidlMessageQueue.h>
20 #include <fmq/ConvertMQDescriptors.h>
21 #include <fmq/EventFlag.h>
22 #include <fmq/MessageQueue.h>
23 #include <gtest/gtest-death-test.h>
24 #include <gtest/gtest.h>
25 #include <sys/resource.h>
26 #include <atomic>
27 #include <cstdlib>
28 #include <filesystem>
29 #include <sstream>
30 #include <thread>
31
32 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
33 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
34 using android::hardware::kSynchronizedReadWrite;
35 using android::hardware::kUnsynchronizedWrite;
36
37 enum EventFlagBits : uint32_t {
38 kFmqNotEmpty = 1 << 0,
39 kFmqNotFull = 1 << 1,
40 };
41
42 typedef android::AidlMessageQueue<uint8_t, SynchronizedReadWrite> AidlMessageQueueSync;
43 typedef android::AidlMessageQueue<uint8_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
44 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync;
45 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync;
46 typedef android::AidlMessageQueue<uint16_t, SynchronizedReadWrite> AidlMessageQueueSync16;
47 typedef android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> MessageQueueSync16;
48
49 typedef android::hardware::MessageQueue<uint8_t, kSynchronizedReadWrite> MessageQueueSync8;
50 typedef android::hardware::MQDescriptor<uint8_t, kSynchronizedReadWrite> HidlMQDescSync8;
51 typedef android::AidlMessageQueue<int8_t, SynchronizedReadWrite> AidlMessageQueueSync8;
52 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, SynchronizedReadWrite>
53 AidlMQDescSync8;
54 typedef android::hardware::MessageQueue<uint8_t, kUnsynchronizedWrite> MessageQueueUnsync8;
55 typedef android::hardware::MQDescriptor<uint8_t, kUnsynchronizedWrite> HidlMQDescUnsync8;
56 typedef android::AidlMessageQueue<int8_t, UnsynchronizedWrite> AidlMessageQueueUnsync8;
57 typedef aidl::android::hardware::common::fmq::MQDescriptor<int8_t, UnsynchronizedWrite>
58 AidlMQDescUnsync8;
59
60 enum class SetupType {
61 SINGLE_FD,
62 DOUBLE_FD,
63 };
64
65 template <typename T, SetupType setupType>
66 class TestParamTypes {
67 public:
68 typedef T MQType;
69 static constexpr SetupType Setup = setupType;
70 };
71
72 // Run everything on both the AIDL and HIDL versions with one and two FDs
73 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync, SetupType::SINGLE_FD>,
74 TestParamTypes<MessageQueueSync, SetupType::SINGLE_FD>,
75 TestParamTypes<AidlMessageQueueSync, SetupType::DOUBLE_FD>,
76 TestParamTypes<MessageQueueSync, SetupType::DOUBLE_FD>>
77 SyncTypes;
78 typedef ::testing::Types<TestParamTypes<AidlMessageQueueUnsync, SetupType::SINGLE_FD>,
79 TestParamTypes<MessageQueueUnsync, SetupType::SINGLE_FD>,
80 TestParamTypes<AidlMessageQueueUnsync, SetupType::DOUBLE_FD>,
81 TestParamTypes<MessageQueueUnsync, SetupType::DOUBLE_FD>>
82 UnsyncTypes;
83 typedef ::testing::Types<TestParamTypes<AidlMessageQueueSync16, SetupType::SINGLE_FD>,
84 TestParamTypes<MessageQueueSync16, SetupType::SINGLE_FD>,
85 TestParamTypes<AidlMessageQueueSync16, SetupType::DOUBLE_FD>,
86 TestParamTypes<MessageQueueSync16, SetupType::DOUBLE_FD>>
87 BadConfigTypes;
88
89 template <typename T>
90 class TestBase : public ::testing::Test {
91 public:
92 static void ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
93 static void ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr);
94 };
95
96 TYPED_TEST_CASE(SynchronizedReadWrites, SyncTypes);
97
98 template <typename T>
99 class SynchronizedReadWrites : public TestBase<T> {
100 protected:
TearDown()101 virtual void TearDown() {
102 delete mQueue;
103 }
104
SetUp()105 virtual void SetUp() {
106 static constexpr size_t kNumElementsInQueue = 2048;
107 static constexpr size_t kPayloadSizeBytes = 1;
108 if (T::Setup == SetupType::SINGLE_FD) {
109 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
110 } else {
111 android::base::unique_fd ringbufferFd(::ashmem_create_region(
112 "SyncReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
113 mQueue = new (std::nothrow)
114 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
115 kNumElementsInQueue * kPayloadSizeBytes);
116 }
117 ASSERT_NE(nullptr, mQueue);
118 ASSERT_TRUE(mQueue->isValid());
119 mNumMessagesMax = mQueue->getQuantumCount();
120 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
121 }
122
123 typename T::MQType* mQueue = nullptr;
124 size_t mNumMessagesMax = 0;
125 };
126
127 TYPED_TEST_CASE(UnsynchronizedWriteTest, UnsyncTypes);
128
129 template <typename T>
130 class UnsynchronizedWriteTest : public TestBase<T> {
131 protected:
TearDown()132 virtual void TearDown() {
133 delete mQueue;
134 }
135
SetUp()136 virtual void SetUp() {
137 static constexpr size_t kNumElementsInQueue = 2048;
138 static constexpr size_t kPayloadSizeBytes = 1;
139 if (T::Setup == SetupType::SINGLE_FD) {
140 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
141 } else {
142 android::base::unique_fd ringbufferFd(
143 ::ashmem_create_region("UnsyncWrite", kNumElementsInQueue * kPayloadSizeBytes));
144 mQueue = new (std::nothrow)
145 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
146 kNumElementsInQueue * kPayloadSizeBytes);
147 }
148 ASSERT_NE(nullptr, mQueue);
149 ASSERT_TRUE(mQueue->isValid());
150 mNumMessagesMax = mQueue->getQuantumCount();
151 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
152 }
153
154 typename T::MQType* mQueue = nullptr;
155 size_t mNumMessagesMax = 0;
156 };
157
158 TYPED_TEST_CASE(BlockingReadWrites, SyncTypes);
159
160 template <typename T>
161 class BlockingReadWrites : public TestBase<T> {
162 protected:
TearDown()163 virtual void TearDown() {
164 delete mQueue;
165 }
SetUp()166 virtual void SetUp() {
167 static constexpr size_t kNumElementsInQueue = 2048;
168 static constexpr size_t kPayloadSizeBytes = 1;
169 if (T::Setup == SetupType::SINGLE_FD) {
170 mQueue = new (std::nothrow) typename T::MQType(kNumElementsInQueue);
171 } else {
172 android::base::unique_fd ringbufferFd(::ashmem_create_region(
173 "SyncBlockingReadWrite", kNumElementsInQueue * kPayloadSizeBytes));
174 mQueue = new (std::nothrow)
175 typename T::MQType(kNumElementsInQueue, false, std::move(ringbufferFd),
176 kNumElementsInQueue * kPayloadSizeBytes);
177 }
178 ASSERT_NE(nullptr, mQueue);
179 ASSERT_TRUE(mQueue->isValid());
180 mNumMessagesMax = mQueue->getQuantumCount();
181 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
182 /*
183 * Initialize the EventFlag word to indicate Queue is not full.
184 */
185 std::atomic_init(&mFw, static_cast<uint32_t>(kFmqNotFull));
186 }
187
188 typename T::MQType* mQueue;
189 std::atomic<uint32_t> mFw;
190 size_t mNumMessagesMax = 0;
191 };
192
193 TYPED_TEST_CASE(QueueSizeOdd, SyncTypes);
194
195 template <typename T>
196 class QueueSizeOdd : public TestBase<T> {
197 protected:
TearDown()198 virtual void TearDown() { delete mQueue; }
SetUp()199 virtual void SetUp() {
200 static constexpr size_t kNumElementsInQueue = 2049;
201 static constexpr size_t kPayloadSizeBytes = 1;
202 if (T::Setup == SetupType::SINGLE_FD) {
203 mQueue = new (std::nothrow)
204 typename T::MQType(kNumElementsInQueue, true /* configureEventFlagWord */);
205 } else {
206 android::base::unique_fd ringbufferFd(
207 ::ashmem_create_region("SyncSizeOdd", kNumElementsInQueue * kPayloadSizeBytes));
208 mQueue = new (std::nothrow) typename T::MQType(
209 kNumElementsInQueue, true /* configureEventFlagWord */, std::move(ringbufferFd),
210 kNumElementsInQueue * kPayloadSizeBytes);
211 }
212 ASSERT_NE(nullptr, mQueue);
213 ASSERT_TRUE(mQueue->isValid());
214 mNumMessagesMax = mQueue->getQuantumCount();
215 ASSERT_EQ(kNumElementsInQueue, mNumMessagesMax);
216 auto evFlagWordPtr = mQueue->getEventFlagWord();
217 ASSERT_NE(nullptr, evFlagWordPtr);
218 /*
219 * Initialize the EventFlag word to indicate Queue is not full.
220 */
221 std::atomic_init(evFlagWordPtr, static_cast<uint32_t>(kFmqNotFull));
222 }
223
224 typename T::MQType* mQueue;
225 size_t mNumMessagesMax = 0;
226 };
227
228 TYPED_TEST_CASE(BadQueueConfig, BadConfigTypes);
229
230 template <typename T>
231 class BadQueueConfig : public TestBase<T> {};
232
233 class AidlOnlyBadQueueConfig : public ::testing::Test {};
234 class HidlOnlyBadQueueConfig : public ::testing::Test {};
235 class Hidl2AidlOperation : public ::testing::Test {};
236 class DoubleFdFailures : public ::testing::Test {};
237
238 /*
239 * Utility function to initialize data to be written to the FMQ
240 */
initData(uint8_t * data,size_t count)241 inline void initData(uint8_t* data, size_t count) {
242 for (size_t i = 0; i < count; i++) {
243 data[i] = i & 0xFF;
244 }
245 }
246
247 /*
248 * This thread will attempt to read and block. When wait returns
249 * it checks if the kFmqNotEmpty bit is actually set.
250 * If the read is succesful, it signals Wake to kFmqNotFull.
251 */
252 template <typename T>
ReaderThreadBlocking(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)253 void TestBase<T>::ReaderThreadBlocking(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
254 const size_t dataLen = 64;
255 uint8_t data[dataLen];
256 android::hardware::EventFlag* efGroup = nullptr;
257 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
258 ASSERT_EQ(android::NO_ERROR, status);
259 ASSERT_NE(nullptr, efGroup);
260
261 while (true) {
262 uint32_t efState = 0;
263 android::status_t ret = efGroup->wait(kFmqNotEmpty,
264 &efState,
265 5000000000 /* timeoutNanoSeconds */);
266 /*
267 * Wait should not time out here after 5s
268 */
269 ASSERT_NE(android::TIMED_OUT, ret);
270
271 if ((efState & kFmqNotEmpty) && fmq->read(data, dataLen)) {
272 efGroup->wake(kFmqNotFull);
273 break;
274 }
275 }
276
277 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
278 ASSERT_EQ(android::NO_ERROR, status);
279 }
280
281 /*
282 * This thread will attempt to read and block using the readBlocking() API and
283 * passes in a pointer to an EventFlag object.
284 */
285 template <typename T>
ReaderThreadBlocking2(typename T::MQType * fmq,std::atomic<uint32_t> * fwAddr)286 void TestBase<T>::ReaderThreadBlocking2(typename T::MQType* fmq, std::atomic<uint32_t>* fwAddr) {
287 const size_t dataLen = 64;
288 uint8_t data[dataLen];
289 android::hardware::EventFlag* efGroup = nullptr;
290 android::status_t status = android::hardware::EventFlag::createEventFlag(fwAddr, &efGroup);
291 ASSERT_EQ(android::NO_ERROR, status);
292 ASSERT_NE(nullptr, efGroup);
293 bool ret = fmq->readBlocking(data,
294 dataLen,
295 static_cast<uint32_t>(kFmqNotFull),
296 static_cast<uint32_t>(kFmqNotEmpty),
297 5000000000 /* timeOutNanos */,
298 efGroup);
299 ASSERT_TRUE(ret);
300 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
301 ASSERT_EQ(android::NO_ERROR, status);
302 }
303
TYPED_TEST(BadQueueConfig,QueueSizeTooLarge)304 TYPED_TEST(BadQueueConfig, QueueSizeTooLarge) {
305 size_t numElementsInQueue = SIZE_MAX / sizeof(uint16_t) + 1;
306 typename TypeParam::MQType fmq(numElementsInQueue);
307 /*
308 * Should fail due to size being too large to fit into size_t.
309 */
310 ASSERT_FALSE(fmq.isValid());
311 }
312
313 // {flags, fdIndex, offset, extent}
314 static const std::vector<android::hardware::GrantorDescriptor> kGrantors = {
315 {0, 0, 0, 4096},
316 {0, 0, 0, 4096},
317 {0, 0, 0, 4096},
318 };
319
320 // Make sure this passes without invalid index/extent for the next two test
321 // cases
TEST_F(HidlOnlyBadQueueConfig,SanityCheck)322 TEST_F(HidlOnlyBadQueueConfig, SanityCheck) {
323 std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
324
325 native_handle_t* handle = native_handle_create(1, 0);
326 int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
327 ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
328 handle->data[0] = ashmemFd;
329
330 android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
331 sizeof(uint16_t));
332 android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
333 EXPECT_TRUE(fmq.isValid());
334
335 close(ashmemFd);
336 }
337
TEST_F(HidlOnlyBadQueueConfig,BadFdIndex)338 TEST_F(HidlOnlyBadQueueConfig, BadFdIndex) {
339 std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
340 grantors[0].fdIndex = 5;
341
342 native_handle_t* handle = native_handle_create(1, 0);
343 int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
344 ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
345 handle->data[0] = ashmemFd;
346
347 android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
348 sizeof(uint16_t));
349 android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
350 /*
351 * Should fail due fdIndex being out of range of the native_handle.
352 */
353 EXPECT_FALSE(fmq.isValid());
354
355 close(ashmemFd);
356 }
357
TEST_F(HidlOnlyBadQueueConfig,ExtentTooLarge)358 TEST_F(HidlOnlyBadQueueConfig, ExtentTooLarge) {
359 std::vector<android::hardware::GrantorDescriptor> grantors = kGrantors;
360 grantors[0].extent = 0xfffff041;
361
362 native_handle_t* handle = native_handle_create(1, 0);
363 int ashmemFd = ashmem_create_region("QueueHidlOnlyBad", 4096);
364 ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
365 handle->data[0] = ashmemFd;
366
367 android::hardware::MQDescriptor<uint16_t, kSynchronizedReadWrite> desc(grantors, handle,
368 sizeof(uint16_t));
369 android::hardware::MessageQueue<uint16_t, kSynchronizedReadWrite> fmq(desc);
370 /*
371 * Should fail due to extent being too large.
372 */
373 EXPECT_FALSE(fmq.isValid());
374
375 close(ashmemFd);
376 }
377
numFds()378 long numFds() {
379 return std::distance(std::filesystem::directory_iterator("/proc/self/fd"),
380 std::filesystem::directory_iterator{});
381 }
382
TEST_F(AidlOnlyBadQueueConfig,LookForLeakedFds)383 TEST_F(AidlOnlyBadQueueConfig, LookForLeakedFds) {
384 // create/destroy a large number of queues that if we were leaking FDs
385 // we could detect it by looking at the number of FDs opened by the this
386 // test process.
387 constexpr uint32_t kNumQueues = 100;
388 const size_t kPageSize = getpagesize();
389 size_t numElementsInQueue = SIZE_MAX / sizeof(uint32_t) - kPageSize - 1;
390 long numFdsBefore = numFds();
391 for (int i = 0; i < kNumQueues; i++) {
392 android::AidlMessageQueue<uint32_t, SynchronizedReadWrite> fmq(numElementsInQueue);
393 ASSERT_FALSE(fmq.isValid());
394 }
395 long numFdsAfter = numFds();
396 EXPECT_LT(numFdsAfter, kNumQueues);
397 EXPECT_EQ(numFdsAfter, numFdsBefore);
398 }
399
TEST_F(Hidl2AidlOperation,ConvertDescriptorsSync)400 TEST_F(Hidl2AidlOperation, ConvertDescriptorsSync) {
401 size_t numElementsInQueue = 64;
402
403 // Create HIDL side and get MQDescriptor
404 MessageQueueSync8 fmq(numElementsInQueue);
405 ASSERT_TRUE(fmq.isValid());
406 const HidlMQDescSync8* hidlDesc = fmq.getDesc();
407 ASSERT_NE(nullptr, hidlDesc);
408
409 // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
410 AidlMQDescSync8 aidlDesc;
411 android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, SynchronizedReadWrite>(*hidlDesc,
412 &aidlDesc);
413
414 // Other process will create the other side of the queue using the AIDL MQDescriptor
415 AidlMessageQueueSync8 aidlFmq(aidlDesc);
416 ASSERT_TRUE(aidlFmq.isValid());
417
418 // Make sure a write to the HIDL side, will show up for the AIDL side
419 constexpr size_t dataLen = 4;
420 uint8_t data[dataLen] = {12, 11, 10, 9};
421 fmq.write(data, dataLen);
422
423 int8_t readData[dataLen];
424 ASSERT_TRUE(aidlFmq.read(readData, dataLen));
425
426 ASSERT_EQ(data[0], readData[0]);
427 ASSERT_EQ(data[1], readData[1]);
428 ASSERT_EQ(data[2], readData[2]);
429 ASSERT_EQ(data[3], readData[3]);
430 }
431
TEST_F(Hidl2AidlOperation,ConvertDescriptorsUnsync)432 TEST_F(Hidl2AidlOperation, ConvertDescriptorsUnsync) {
433 size_t numElementsInQueue = 64;
434
435 // Create HIDL side and get MQDescriptor
436 MessageQueueUnsync8 fmq(numElementsInQueue);
437 ASSERT_TRUE(fmq.isValid());
438 const HidlMQDescUnsync8* hidlDesc = fmq.getDesc();
439 ASSERT_NE(nullptr, hidlDesc);
440
441 // Create AIDL MQDescriptor to send to another process based off the HIDL MQDescriptor
442 AidlMQDescUnsync8 aidlDesc;
443 android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(*hidlDesc,
444 &aidlDesc);
445
446 // Other process will create the other side of the queue using the AIDL MQDescriptor
447 AidlMessageQueueUnsync8 aidlFmq(aidlDesc);
448 ASSERT_TRUE(aidlFmq.isValid());
449
450 // Can have multiple readers with unsync flavor
451 AidlMessageQueueUnsync8 aidlFmq2(aidlDesc);
452 ASSERT_TRUE(aidlFmq2.isValid());
453
454 // Make sure a write to the HIDL side, will show up for the AIDL side
455 constexpr size_t dataLen = 4;
456 uint8_t data[dataLen] = {12, 11, 10, 9};
457 fmq.write(data, dataLen);
458
459 int8_t readData[dataLen];
460 ASSERT_TRUE(aidlFmq.read(readData, dataLen));
461 int8_t readData2[dataLen];
462 ASSERT_TRUE(aidlFmq2.read(readData2, dataLen));
463
464 ASSERT_EQ(data[0], readData[0]);
465 ASSERT_EQ(data[1], readData[1]);
466 ASSERT_EQ(data[2], readData[2]);
467 ASSERT_EQ(data[3], readData[3]);
468 ASSERT_EQ(data[0], readData2[0]);
469 ASSERT_EQ(data[1], readData2[1]);
470 ASSERT_EQ(data[2], readData2[2]);
471 ASSERT_EQ(data[3], readData2[3]);
472 }
473
TEST_F(Hidl2AidlOperation,ConvertFdIndex1)474 TEST_F(Hidl2AidlOperation, ConvertFdIndex1) {
475 native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
476 if (mqHandle == nullptr) {
477 return;
478 }
479 mqHandle->data[0] = 12;
480 mqHandle->data[1] = 5;
481 ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
482 grantors.resize(3);
483 grantors[0] = {0, 1 /* fdIndex */, 16, 16};
484 grantors[1] = {0, 1 /* fdIndex */, 16, 16};
485 grantors[2] = {0, 1 /* fdIndex */, 16, 16};
486
487 HidlMQDescUnsync8 hidlDesc(grantors, mqHandle, 10);
488 ASSERT_TRUE(hidlDesc.isHandleValid());
489
490 AidlMQDescUnsync8 aidlDesc;
491 bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
492 hidlDesc, &aidlDesc);
493 ASSERT_TRUE(ret);
494 }
495
TEST_F(Hidl2AidlOperation,ConvertMultipleFds)496 TEST_F(Hidl2AidlOperation, ConvertMultipleFds) {
497 native_handle_t* mqHandle = native_handle_create(2 /* numFds */, 0 /* numInts */);
498 if (mqHandle == nullptr) {
499 return;
500 }
501 mqHandle->data[0] = ::ashmem_create_region("ConvertMultipleFds", 8);
502 mqHandle->data[1] = ::ashmem_create_region("ConvertMultipleFds2", 8);
503 ::android::hardware::hidl_vec<android::hardware::GrantorDescriptor> grantors;
504 grantors.resize(3);
505 grantors[0] = {0, 1 /* fdIndex */, 16, 16};
506 grantors[1] = {0, 1 /* fdIndex */, 16, 16};
507 grantors[2] = {0, 0 /* fdIndex */, 16, 16};
508
509 HidlMQDescUnsync8 hidlDesc(grantors, mqHandle, 10);
510 ASSERT_TRUE(hidlDesc.isHandleValid());
511
512 AidlMQDescUnsync8 aidlDesc;
513 bool ret = android::unsafeHidlToAidlMQDescriptor<uint8_t, int8_t, UnsynchronizedWrite>(
514 hidlDesc, &aidlDesc);
515 ASSERT_TRUE(ret);
516 EXPECT_EQ(aidlDesc.handle.fds.size(), 2);
517 }
518
519 // TODO(b/165674950) Since AIDL does not support unsigned integers, it can only support
520 // 1/2 the queue size of HIDL. Once support is added to AIDL, this restriction can be
521 // lifted. Until then, check against SSIZE_MAX instead of SIZE_MAX.
TEST_F(AidlOnlyBadQueueConfig,QueueSizeTooLargeForAidl)522 TEST_F(AidlOnlyBadQueueConfig, QueueSizeTooLargeForAidl) {
523 size_t numElementsInQueue = SSIZE_MAX / sizeof(uint16_t) + 1;
524 AidlMessageQueueSync16 fmq(numElementsInQueue);
525 /*
526 * Should fail due to size being too large to fit into size_t.
527 */
528 ASSERT_FALSE(fmq.isValid());
529 }
530
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptor)531 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptor) {
532 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
533 desc.quantum = -10;
534 AidlMessageQueueSync16 fmq(desc);
535 /*
536 * Should fail due to quantum being negative.
537 */
538 ASSERT_FALSE(fmq.isValid());
539 }
540
TEST_F(AidlOnlyBadQueueConfig,NegativeAidlDescriptorGrantor)541 TEST_F(AidlOnlyBadQueueConfig, NegativeAidlDescriptorGrantor) {
542 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc;
543 desc.quantum = 2;
544 desc.flags = 0;
545 desc.grantors.push_back(
546 aidl::android::hardware::common::fmq::GrantorDescriptor{.offset = 12, .extent = -10});
547 AidlMessageQueueSync16 fmq(desc);
548 /*
549 * Should fail due to grantor having negative extent.
550 */
551 ASSERT_FALSE(fmq.isValid());
552 }
553
554 /*
555 * Test creating a new queue from a modified MQDescriptor of another queue.
556 * If MQDescriptor.quantum doesn't match the size of the payload(T), the queue
557 * should be invalid.
558 */
TEST_F(AidlOnlyBadQueueConfig,MismatchedPayloadSize)559 TEST_F(AidlOnlyBadQueueConfig, MismatchedPayloadSize) {
560 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(64);
561 aidl::android::hardware::common::fmq::MQDescriptor<uint16_t, SynchronizedReadWrite> desc =
562 fmq.dupeDesc();
563 // This should work fine with the unmodified MQDescriptor
564 AidlMessageQueueSync16 fmq2 = AidlMessageQueueSync16(desc);
565 ASSERT_TRUE(fmq2.isValid());
566
567 // Simulate a difference in payload size between processes handling the queue
568 desc.quantum = 8;
569 AidlMessageQueueSync16 fmq3 = AidlMessageQueueSync16(desc);
570
571 // Should fail due to the quantum not matching the sizeof(uint16_t)
572 ASSERT_FALSE(fmq3.isValid());
573 }
574
575 /*
576 * Test creating a new queue with an invalid fd. This should assert with message
577 * "mRing is null".
578 */
TEST_F(DoubleFdFailures,InvalidFd)579 TEST_F(DoubleFdFailures, InvalidFd) {
580 android::base::SetLogger(android::base::StdioLogger);
581 auto queue = AidlMessageQueueSync(64, false, android::base::unique_fd(3000), 64);
582 EXPECT_FALSE(queue.isValid());
583 }
584
585 /*
586 * Test creating a new queue with a buffer fd and bufferSize smaller than the
587 * requested queue. This should fail to create a valid message queue.
588 */
TEST_F(DoubleFdFailures,InvalidFdSize)589 TEST_F(DoubleFdFailures, InvalidFdSize) {
590 constexpr size_t kNumElementsInQueue = 1024;
591 constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
592 android::base::unique_fd ringbufferFd(
593 ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize - 8));
594 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
595 kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize - 8);
596 EXPECT_FALSE(fmq.isValid());
597 }
598
599 /*
600 * Test creating a new queue with a buffer fd and bufferSize larger than the
601 * requested queue. The message queue should be valid.
602 */
TEST_F(DoubleFdFailures,LargerFdSize)603 TEST_F(DoubleFdFailures, LargerFdSize) {
604 constexpr size_t kNumElementsInQueue = 1024;
605 constexpr size_t kRequiredDataBufferSize = kNumElementsInQueue * sizeof(uint16_t);
606 android::base::unique_fd ringbufferFd(
607 ::ashmem_create_region("SyncReadWrite", kRequiredDataBufferSize + 8));
608 AidlMessageQueueSync16 fmq = AidlMessageQueueSync16(
609 kNumElementsInQueue, false, std::move(ringbufferFd), kRequiredDataBufferSize + 8);
610 EXPECT_TRUE(fmq.isValid());
611 }
612
613 /*
614 * Test that basic blocking works. This test uses the non-blocking read()/write()
615 * APIs.
616 */
TYPED_TEST(BlockingReadWrites,SmallInputTest1)617 TYPED_TEST(BlockingReadWrites, SmallInputTest1) {
618 const size_t dataLen = 64;
619 uint8_t data[dataLen] = {0};
620
621 android::hardware::EventFlag* efGroup = nullptr;
622 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
623
624 ASSERT_EQ(android::NO_ERROR, status);
625 ASSERT_NE(nullptr, efGroup);
626
627 /*
628 * Start a thread that will try to read and block on kFmqNotEmpty.
629 */
630 std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking, this->mQueue,
631 &this->mFw);
632 struct timespec waitTime = {0, 100 * 1000000};
633 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
634
635 /*
636 * After waiting for some time write into the FMQ
637 * and call Wake on kFmqNotEmpty.
638 */
639 ASSERT_TRUE(this->mQueue->write(data, dataLen));
640 status = efGroup->wake(kFmqNotEmpty);
641 ASSERT_EQ(android::NO_ERROR, status);
642
643 ASSERT_EQ(0, nanosleep(&waitTime, NULL));
644 Reader.join();
645
646 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
647 ASSERT_EQ(android::NO_ERROR, status);
648 }
649
650 /*
651 * Test that basic blocking works. This test uses the
652 * writeBlocking()/readBlocking() APIs.
653 */
TYPED_TEST(BlockingReadWrites,SmallInputTest2)654 TYPED_TEST(BlockingReadWrites, SmallInputTest2) {
655 const size_t dataLen = 64;
656 uint8_t data[dataLen] = {0};
657
658 android::hardware::EventFlag* efGroup = nullptr;
659 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
660
661 ASSERT_EQ(android::NO_ERROR, status);
662 ASSERT_NE(nullptr, efGroup);
663
664 /*
665 * Start a thread that will try to read and block on kFmqNotEmpty. It will
666 * call wake() on kFmqNotFull when the read is successful.
667 */
668 std::thread Reader(BlockingReadWrites<TypeParam>::ReaderThreadBlocking2, this->mQueue,
669 &this->mFw);
670 bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
671 static_cast<uint32_t>(kFmqNotEmpty),
672 5000000000 /* timeOutNanos */, efGroup);
673 ASSERT_TRUE(ret);
674 Reader.join();
675
676 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
677 ASSERT_EQ(android::NO_ERROR, status);
678 }
679
680 /*
681 * Test that basic blocking times out as intended.
682 */
TYPED_TEST(BlockingReadWrites,BlockingTimeOutTest)683 TYPED_TEST(BlockingReadWrites, BlockingTimeOutTest) {
684 android::hardware::EventFlag* efGroup = nullptr;
685 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
686
687 ASSERT_EQ(android::NO_ERROR, status);
688 ASSERT_NE(nullptr, efGroup);
689
690 /* Block on an EventFlag bit that no one will wake and time out in 1s */
691 uint32_t efState = 0;
692 android::status_t ret = efGroup->wait(kFmqNotEmpty,
693 &efState,
694 1000000000 /* timeoutNanoSeconds */);
695 /*
696 * Wait should time out in a second.
697 */
698 EXPECT_EQ(android::TIMED_OUT, ret);
699
700 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
701 ASSERT_EQ(android::NO_ERROR, status);
702 }
703
704 /*
705 * Test EventFlag wait on a waked flag with a short timeout.
706 */
TYPED_TEST(BlockingReadWrites,ShortEventFlagWaitWithWakeTest)707 TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithWakeTest) {
708 std::atomic<uint32_t> eventFlagWord;
709 std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
710 android::hardware::EventFlag* efGroup = nullptr;
711 android::status_t status =
712 android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
713 ASSERT_EQ(android::NO_ERROR, status);
714 ASSERT_NE(nullptr, efGroup);
715
716 status = efGroup->wake(kFmqNotEmpty);
717 ASSERT_EQ(android::NO_ERROR, status);
718
719 uint32_t efState = 0;
720 android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
721 ASSERT_EQ(android::NO_ERROR, ret);
722
723 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
724 ASSERT_EQ(android::NO_ERROR, status);
725 }
726
727 /*
728 * Test on an EventFlag with no wakeup, short timeout.
729 */
TYPED_TEST(BlockingReadWrites,ShortEventFlagWaitWithoutWakeTest)730 TYPED_TEST(BlockingReadWrites, ShortEventFlagWaitWithoutWakeTest) {
731 std::atomic<uint32_t> eventFlagWord;
732 std::atomic_init(&eventFlagWord, static_cast<uint32_t>(kFmqNotFull));
733 android::hardware::EventFlag* efGroup = nullptr;
734 android::status_t status =
735 android::hardware::EventFlag::createEventFlag(&eventFlagWord, &efGroup);
736 ASSERT_EQ(android::NO_ERROR, status);
737 ASSERT_NE(nullptr, efGroup);
738
739 uint32_t efState = 0;
740 android::status_t ret = efGroup->wait(kFmqNotEmpty, &efState, 1 /* ns */, true /* retry */);
741 ASSERT_EQ(android::TIMED_OUT, ret);
742
743 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
744 ASSERT_EQ(android::NO_ERROR, status);
745 }
746
747 /*
748 * Test FMQ write and read with event flag wait.
749 */
TYPED_TEST(BlockingReadWrites,FmqWriteAndReadWithShortEventFlagWaitTest)750 TYPED_TEST(BlockingReadWrites, FmqWriteAndReadWithShortEventFlagWaitTest) {
751 android::hardware::EventFlag* efGroup = nullptr;
752 android::status_t status = android::hardware::EventFlag::createEventFlag(&this->mFw, &efGroup);
753 ASSERT_EQ(android::NO_ERROR, status);
754 ASSERT_NE(nullptr, efGroup);
755
756 /*
757 * After waiting for some time write into the FMQ
758 * and call Wake on kFmqNotEmpty.
759 */
760 const size_t dataLen = 16;
761 uint8_t dataW[dataLen] = {0};
762 uint8_t dataR[dataLen] = {0};
763 ASSERT_TRUE(this->mQueue->write(dataW, dataLen));
764 status = efGroup->wake(kFmqNotEmpty);
765 ASSERT_EQ(android::NO_ERROR, status);
766
767 ASSERT_TRUE(this->mQueue->readBlocking(dataR, dataLen, static_cast<uint32_t>(kFmqNotEmpty),
768 static_cast<uint32_t>(kFmqNotFull), 1 /* timeOutNanos */,
769 efGroup));
770 ASSERT_EQ(0, memcmp(dataW, dataR, dataLen));
771
772 status = android::hardware::EventFlag::deleteEventFlag(&efGroup);
773 ASSERT_EQ(android::NO_ERROR, status);
774 }
775
776 /*
777 * Test that odd queue sizes do not cause unaligned error
778 * on access to EventFlag object.
779 */
TYPED_TEST(QueueSizeOdd,EventFlagTest)780 TYPED_TEST(QueueSizeOdd, EventFlagTest) {
781 const size_t dataLen = 64;
782 uint8_t data[dataLen] = {0};
783
784 bool ret = this->mQueue->writeBlocking(data, dataLen, static_cast<uint32_t>(kFmqNotFull),
785 static_cast<uint32_t>(kFmqNotEmpty),
786 5000000000 /* timeOutNanos */);
787 ASSERT_TRUE(ret);
788 }
789
790 /*
791 * Verify that a few bytes of data can be successfully written and read.
792 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest1)793 TYPED_TEST(SynchronizedReadWrites, SmallInputTest1) {
794 const size_t dataLen = 16;
795 ASSERT_LE(dataLen, this->mNumMessagesMax);
796 uint8_t data[dataLen];
797
798 initData(data, dataLen);
799
800 ASSERT_TRUE(this->mQueue->write(data, dataLen));
801 uint8_t readData[dataLen] = {};
802 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
803 ASSERT_EQ(0, memcmp(data, readData, dataLen));
804 }
805
806 /*
807 * Verify that a few bytes of data can be successfully written and read using
808 * beginRead/beginWrite/CommitRead/CommitWrite
809 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest2)810 TYPED_TEST(SynchronizedReadWrites, SmallInputTest2) {
811 const size_t dataLen = 16;
812 ASSERT_LE(dataLen, this->mNumMessagesMax);
813 uint8_t data[dataLen];
814
815 initData(data, dataLen);
816
817 typename TypeParam::MQType::MemTransaction tx;
818 ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
819
820 ASSERT_TRUE(tx.copyTo(data, 0 /* startIdx */, dataLen));
821
822 ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
823
824 uint8_t readData[dataLen] = {};
825
826 ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
827
828 ASSERT_TRUE(tx.copyFrom(readData, 0 /* startIdx */, dataLen));
829
830 ASSERT_TRUE(this->mQueue->commitRead(dataLen));
831
832 ASSERT_EQ(0, memcmp(data, readData, dataLen));
833 }
834
835 /*
836 * Verify that a few bytes of data can be successfully written and read using
837 * beginRead/beginWrite/CommitRead/CommitWrite as well as getSlot().
838 */
TYPED_TEST(SynchronizedReadWrites,SmallInputTest3)839 TYPED_TEST(SynchronizedReadWrites, SmallInputTest3) {
840 const size_t dataLen = 16;
841 ASSERT_LE(dataLen, this->mNumMessagesMax);
842 uint8_t data[dataLen];
843
844 initData(data, dataLen);
845 typename TypeParam::MQType::MemTransaction tx;
846 ASSERT_TRUE(this->mQueue->beginWrite(dataLen, &tx));
847
848 auto first = tx.getFirstRegion();
849 auto second = tx.getSecondRegion();
850
851 ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
852 for (size_t i = 0; i < dataLen; i++) {
853 uint8_t* ptr = tx.getSlot(i);
854 *ptr = data[i];
855 }
856
857 ASSERT_TRUE(this->mQueue->commitWrite(dataLen));
858
859 uint8_t readData[dataLen] = {};
860
861 ASSERT_TRUE(this->mQueue->beginRead(dataLen, &tx));
862
863 first = tx.getFirstRegion();
864 second = tx.getSecondRegion();
865
866 ASSERT_EQ(first.getLength() + second.getLength(), dataLen);
867
868 for (size_t i = 0; i < dataLen; i++) {
869 uint8_t* ptr = tx.getSlot(i);
870 readData[i] = *ptr;
871 }
872
873 ASSERT_TRUE(this->mQueue->commitRead(dataLen));
874
875 ASSERT_EQ(0, memcmp(data, readData, dataLen));
876 }
877
878 /*
879 * Verify that read() returns false when trying to read from an empty queue.
880 */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty1)881 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty1) {
882 ASSERT_EQ(0UL, this->mQueue->availableToRead());
883 const size_t dataLen = 2;
884 ASSERT_LE(dataLen, this->mNumMessagesMax);
885 uint8_t readData[dataLen];
886 ASSERT_FALSE(this->mQueue->read(readData, dataLen));
887 }
888
889 /*
890 * Verify that beginRead() returns a MemTransaction object with null pointers when trying
891 * to read from an empty queue.
892 */
TYPED_TEST(SynchronizedReadWrites,ReadWhenEmpty2)893 TYPED_TEST(SynchronizedReadWrites, ReadWhenEmpty2) {
894 ASSERT_EQ(0UL, this->mQueue->availableToRead());
895 const size_t dataLen = 2;
896 ASSERT_LE(dataLen, this->mNumMessagesMax);
897
898 typename TypeParam::MQType::MemTransaction tx;
899 ASSERT_FALSE(this->mQueue->beginRead(dataLen, &tx));
900
901 auto first = tx.getFirstRegion();
902 auto second = tx.getSecondRegion();
903
904 ASSERT_EQ(nullptr, first.getAddress());
905 ASSERT_EQ(nullptr, second.getAddress());
906 }
907
908 /*
909 * Write the queue until full. Verify that another write is unsuccessful.
910 * Verify that availableToWrite() returns 0 as expected.
911 */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull1)912 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull1) {
913 ASSERT_EQ(0UL, this->mQueue->availableToRead());
914 std::vector<uint8_t> data(this->mNumMessagesMax);
915
916 initData(&data[0], this->mNumMessagesMax);
917 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
918 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
919 ASSERT_FALSE(this->mQueue->write(&data[0], 1));
920
921 std::vector<uint8_t> readData(this->mNumMessagesMax);
922 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
923 ASSERT_EQ(data, readData);
924 }
925
926 /*
927 * Write the queue until full. Verify that beginWrite() returns
928 * a MemTransaction object with null base pointers.
929 */
TYPED_TEST(SynchronizedReadWrites,WriteWhenFull2)930 TYPED_TEST(SynchronizedReadWrites, WriteWhenFull2) {
931 ASSERT_EQ(0UL, this->mQueue->availableToRead());
932 std::vector<uint8_t> data(this->mNumMessagesMax);
933
934 initData(&data[0], this->mNumMessagesMax);
935 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
936 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
937
938 typename TypeParam::MQType::MemTransaction tx;
939 ASSERT_FALSE(this->mQueue->beginWrite(1, &tx));
940
941 auto first = tx.getFirstRegion();
942 auto second = tx.getSecondRegion();
943
944 ASSERT_EQ(nullptr, first.getAddress());
945 ASSERT_EQ(nullptr, second.getAddress());
946 }
947
948 /*
949 * Write a chunk of data equal to the queue size.
950 * Verify that the write is successful and the subsequent read
951 * returns the expected data.
952 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest1)953 TYPED_TEST(SynchronizedReadWrites, LargeInputTest1) {
954 std::vector<uint8_t> data(this->mNumMessagesMax);
955 initData(&data[0], this->mNumMessagesMax);
956 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
957 std::vector<uint8_t> readData(this->mNumMessagesMax);
958 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
959 ASSERT_EQ(data, readData);
960 }
961
962 /*
963 * Attempt to write a chunk of data larger than the queue size.
964 * Verify that it fails. Verify that a subsequent read fails and
965 * the queue is still empty.
966 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest2)967 TYPED_TEST(SynchronizedReadWrites, LargeInputTest2) {
968 ASSERT_EQ(0UL, this->mQueue->availableToRead());
969 const size_t dataLen = 4096;
970 ASSERT_GT(dataLen, this->mNumMessagesMax);
971 std::vector<uint8_t> data(dataLen);
972
973 initData(&data[0], dataLen);
974 ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
975 std::vector<uint8_t> readData(this->mNumMessagesMax);
976 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
977 ASSERT_NE(data, readData);
978 ASSERT_EQ(0UL, this->mQueue->availableToRead());
979 }
980
981 /*
982 * After the queue is full, try to write more data. Verify that
983 * the attempt returns false. Verify that the attempt did not
984 * affect the pre-existing data in the queue.
985 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest3)986 TYPED_TEST(SynchronizedReadWrites, LargeInputTest3) {
987 std::vector<uint8_t> data(this->mNumMessagesMax);
988 initData(&data[0], this->mNumMessagesMax);
989 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
990 ASSERT_FALSE(this->mQueue->write(&data[0], 1));
991 std::vector<uint8_t> readData(this->mNumMessagesMax);
992 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
993 ASSERT_EQ(data, readData);
994 }
995
996 /*
997 * Verify that beginWrite() returns a MemTransaction with
998 * null base pointers when attempting to write data larger
999 * than the queue size.
1000 */
TYPED_TEST(SynchronizedReadWrites,LargeInputTest4)1001 TYPED_TEST(SynchronizedReadWrites, LargeInputTest4) {
1002 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1003 const size_t dataLen = 4096;
1004 ASSERT_GT(dataLen, this->mNumMessagesMax);
1005
1006 typename TypeParam::MQType::MemTransaction tx;
1007 ASSERT_FALSE(this->mQueue->beginWrite(dataLen, &tx));
1008
1009 auto first = tx.getFirstRegion();
1010 auto second = tx.getSecondRegion();
1011
1012 ASSERT_EQ(nullptr, first.getAddress());
1013 ASSERT_EQ(nullptr, second.getAddress());
1014 }
1015
1016 /*
1017 * Verify that multiple reads one after the other return expected data.
1018 */
TYPED_TEST(SynchronizedReadWrites,MultipleRead)1019 TYPED_TEST(SynchronizedReadWrites, MultipleRead) {
1020 const size_t chunkSize = 100;
1021 const size_t chunkNum = 5;
1022 const size_t dataLen = chunkSize * chunkNum;
1023 ASSERT_LE(dataLen, this->mNumMessagesMax);
1024 uint8_t data[dataLen];
1025
1026 initData(data, dataLen);
1027 ASSERT_TRUE(this->mQueue->write(data, dataLen));
1028 uint8_t readData[dataLen] = {};
1029 for (size_t i = 0; i < chunkNum; i++) {
1030 ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
1031 }
1032 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1033 }
1034
1035 /*
1036 * Verify that multiple writes one after the other happens correctly.
1037 */
TYPED_TEST(SynchronizedReadWrites,MultipleWrite)1038 TYPED_TEST(SynchronizedReadWrites, MultipleWrite) {
1039 const int chunkSize = 100;
1040 const int chunkNum = 5;
1041 const size_t dataLen = chunkSize * chunkNum;
1042 ASSERT_LE(dataLen, this->mNumMessagesMax);
1043 uint8_t data[dataLen];
1044
1045 initData(data, dataLen);
1046 for (unsigned int i = 0; i < chunkNum; i++) {
1047 ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
1048 }
1049 uint8_t readData[dataLen] = {};
1050 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1051 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1052 }
1053
1054 /*
1055 * Write enough messages into the FMQ to fill half of it
1056 * and read back the same.
1057 * Write this->mNumMessagesMax messages into the queue. This will cause a
1058 * wrap around. Read and verify the data.
1059 */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround1)1060 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround1) {
1061 size_t numMessages = this->mNumMessagesMax - 1;
1062 std::vector<uint8_t> data(this->mNumMessagesMax);
1063 std::vector<uint8_t> readData(this->mNumMessagesMax);
1064 initData(&data[0], this->mNumMessagesMax);
1065 ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
1066 ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
1067 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1068 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1069 ASSERT_EQ(data, readData);
1070 }
1071
1072 /*
1073 * Use beginRead/CommitRead/beginWrite/commitWrite APIs
1074 * to test wrap arounds are handled correctly.
1075 * Write enough messages into the FMQ to fill half of it
1076 * and read back the same.
1077 * Write mNumMessagesMax messages into the queue. This will cause a
1078 * wrap around. Read and verify the data.
1079 */
TYPED_TEST(SynchronizedReadWrites,ReadWriteWrapAround2)1080 TYPED_TEST(SynchronizedReadWrites, ReadWriteWrapAround2) {
1081 size_t dataLen = this->mNumMessagesMax - 1;
1082 std::vector<uint8_t> data(this->mNumMessagesMax);
1083 std::vector<uint8_t> readData(this->mNumMessagesMax);
1084 initData(&data[0], this->mNumMessagesMax);
1085 ASSERT_TRUE(this->mQueue->write(&data[0], dataLen));
1086 ASSERT_TRUE(this->mQueue->read(&readData[0], dataLen));
1087
1088 /*
1089 * The next write and read will have to deal with with wrap arounds.
1090 */
1091 typename TypeParam::MQType::MemTransaction tx;
1092 ASSERT_TRUE(this->mQueue->beginWrite(this->mNumMessagesMax, &tx));
1093
1094 auto first = tx.getFirstRegion();
1095 auto second = tx.getSecondRegion();
1096
1097 ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
1098
1099 ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */, this->mNumMessagesMax));
1100
1101 ASSERT_TRUE(this->mQueue->commitWrite(this->mNumMessagesMax));
1102
1103 ASSERT_TRUE(this->mQueue->beginRead(this->mNumMessagesMax, &tx));
1104
1105 first = tx.getFirstRegion();
1106 second = tx.getSecondRegion();
1107
1108 ASSERT_EQ(first.getLength() + second.getLength(), this->mNumMessagesMax);
1109
1110 ASSERT_TRUE(tx.copyFrom(&readData[0], 0 /* startIdx */, this->mNumMessagesMax));
1111 ASSERT_TRUE(this->mQueue->commitRead(this->mNumMessagesMax));
1112
1113 ASSERT_EQ(data, readData);
1114 }
1115
1116 /*
1117 * Verify that a few bytes of data can be successfully written and read.
1118 */
TYPED_TEST(UnsynchronizedWriteTest,SmallInputTest1)1119 TYPED_TEST(UnsynchronizedWriteTest, SmallInputTest1) {
1120 const size_t dataLen = 16;
1121 ASSERT_LE(dataLen, this->mNumMessagesMax);
1122 uint8_t data[dataLen];
1123
1124 initData(data, dataLen);
1125 ASSERT_TRUE(this->mQueue->write(data, dataLen));
1126 uint8_t readData[dataLen] = {};
1127 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1128 ASSERT_EQ(0, memcmp(data, readData, dataLen));
1129 }
1130
1131 /*
1132 * Verify that read() returns false when trying to read from an empty queue.
1133 */
TYPED_TEST(UnsynchronizedWriteTest,ReadWhenEmpty)1134 TYPED_TEST(UnsynchronizedWriteTest, ReadWhenEmpty) {
1135 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1136 const size_t dataLen = 2;
1137 ASSERT_TRUE(dataLen < this->mNumMessagesMax);
1138 uint8_t readData[dataLen];
1139 ASSERT_FALSE(this->mQueue->read(readData, dataLen));
1140 }
1141
1142 /*
1143 * Write the queue when full. Verify that a subsequent writes is succesful.
1144 * Verify that availableToWrite() returns 0 as expected.
1145 */
TYPED_TEST(UnsynchronizedWriteTest,WriteWhenFull1)1146 TYPED_TEST(UnsynchronizedWriteTest, WriteWhenFull1) {
1147 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1148 std::vector<uint8_t> data(this->mNumMessagesMax);
1149
1150 initData(&data[0], this->mNumMessagesMax);
1151 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1152 ASSERT_EQ(0UL, this->mQueue->availableToWrite());
1153 ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1154
1155 std::vector<uint8_t> readData(this->mNumMessagesMax);
1156 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1157 }
1158
1159 /*
1160 * Write the queue when full. Verify that a subsequent writes
1161 * using beginRead()/commitRead() is succesful.
1162 * Verify that the next read fails as expected for unsynchronized flavor.
1163 */
TYPED_TEST(UnsynchronizedWriteTest,WriteWhenFull2)1164 TYPED_TEST(UnsynchronizedWriteTest, WriteWhenFull2) {
1165 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1166 std::vector<uint8_t> data(this->mNumMessagesMax);
1167 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1168
1169 typename TypeParam::MQType::MemTransaction tx;
1170 ASSERT_TRUE(this->mQueue->beginWrite(1, &tx));
1171
1172 ASSERT_EQ(tx.getFirstRegion().getLength(), 1U);
1173
1174 ASSERT_TRUE(tx.copyTo(&data[0], 0 /* startIdx */));
1175
1176 ASSERT_TRUE(this->mQueue->commitWrite(1));
1177
1178 std::vector<uint8_t> readData(this->mNumMessagesMax);
1179 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1180 }
1181
1182 /*
1183 * Write a chunk of data equal to the queue size.
1184 * Verify that the write is successful and the subsequent read
1185 * returns the expected data.
1186 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest1)1187 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest1) {
1188 std::vector<uint8_t> data(this->mNumMessagesMax);
1189 initData(&data[0], this->mNumMessagesMax);
1190 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1191 std::vector<uint8_t> readData(this->mNumMessagesMax);
1192 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1193 ASSERT_EQ(data, readData);
1194 }
1195
1196 /*
1197 * Attempt to write a chunk of data larger than the queue size.
1198 * Verify that it fails. Verify that a subsequent read fails and
1199 * the queue is still empty.
1200 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest2)1201 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest2) {
1202 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1203 const size_t dataLen = 4096;
1204 ASSERT_GT(dataLen, this->mNumMessagesMax);
1205 std::vector<uint8_t> data(dataLen);
1206 initData(&data[0], dataLen);
1207 ASSERT_FALSE(this->mQueue->write(&data[0], dataLen));
1208 std::vector<uint8_t> readData(this->mNumMessagesMax);
1209 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1210 ASSERT_NE(data, readData);
1211 ASSERT_EQ(0UL, this->mQueue->availableToRead());
1212 }
1213
1214 /*
1215 * After the queue is full, try to write more data. Verify that
1216 * the attempt is succesful. Verify that the read fails
1217 * as expected.
1218 */
TYPED_TEST(UnsynchronizedWriteTest,LargeInputTest3)1219 TYPED_TEST(UnsynchronizedWriteTest, LargeInputTest3) {
1220 std::vector<uint8_t> data(this->mNumMessagesMax);
1221 initData(&data[0], this->mNumMessagesMax);
1222 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1223 ASSERT_TRUE(this->mQueue->write(&data[0], 1));
1224 std::vector<uint8_t> readData(this->mNumMessagesMax);
1225 ASSERT_FALSE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1226 }
1227
1228 /*
1229 * Verify that multiple reads one after the other return expected data.
1230 */
TYPED_TEST(UnsynchronizedWriteTest,MultipleRead)1231 TYPED_TEST(UnsynchronizedWriteTest, MultipleRead) {
1232 const size_t chunkSize = 100;
1233 const size_t chunkNum = 5;
1234 const size_t dataLen = chunkSize * chunkNum;
1235 ASSERT_LE(dataLen, this->mNumMessagesMax);
1236 uint8_t data[dataLen];
1237 initData(data, dataLen);
1238 ASSERT_TRUE(this->mQueue->write(data, dataLen));
1239 uint8_t readData[dataLen] = {};
1240 for (size_t i = 0; i < chunkNum; i++) {
1241 ASSERT_TRUE(this->mQueue->read(readData + i * chunkSize, chunkSize));
1242 }
1243 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1244 }
1245
1246 /*
1247 * Verify that multiple writes one after the other happens correctly.
1248 */
TYPED_TEST(UnsynchronizedWriteTest,MultipleWrite)1249 TYPED_TEST(UnsynchronizedWriteTest, MultipleWrite) {
1250 const size_t chunkSize = 100;
1251 const size_t chunkNum = 5;
1252 const size_t dataLen = chunkSize * chunkNum;
1253 ASSERT_LE(dataLen, this->mNumMessagesMax);
1254 uint8_t data[dataLen];
1255
1256 initData(data, dataLen);
1257 for (size_t i = 0; i < chunkNum; i++) {
1258 ASSERT_TRUE(this->mQueue->write(data + i * chunkSize, chunkSize));
1259 }
1260
1261 uint8_t readData[dataLen] = {};
1262 ASSERT_TRUE(this->mQueue->read(readData, dataLen));
1263 ASSERT_EQ(0, memcmp(readData, data, dataLen));
1264 }
1265
1266 /*
1267 * Write enough messages into the FMQ to fill half of it
1268 * and read back the same.
1269 * Write mNumMessagesMax messages into the queue. This will cause a
1270 * wrap around. Read and verify the data.
1271 */
TYPED_TEST(UnsynchronizedWriteTest,ReadWriteWrapAround)1272 TYPED_TEST(UnsynchronizedWriteTest, ReadWriteWrapAround) {
1273 size_t numMessages = this->mNumMessagesMax - 1;
1274 std::vector<uint8_t> data(this->mNumMessagesMax);
1275 std::vector<uint8_t> readData(this->mNumMessagesMax);
1276
1277 initData(&data[0], this->mNumMessagesMax);
1278 ASSERT_TRUE(this->mQueue->write(&data[0], numMessages));
1279 ASSERT_TRUE(this->mQueue->read(&readData[0], numMessages));
1280 ASSERT_TRUE(this->mQueue->write(&data[0], this->mNumMessagesMax));
1281 ASSERT_TRUE(this->mQueue->read(&readData[0], this->mNumMessagesMax));
1282 ASSERT_EQ(data, readData);
1283 }
1284
1285 /*
1286 * Ensure that the template specialization of MessageQueueBase to element types
1287 * other than MQErased exposes its static knowledge of element size.
1288 */
TEST(MessageQueueErasedTest,MQErasedCompiles)1289 TEST(MessageQueueErasedTest, MQErasedCompiles) {
1290 auto txn = AidlMessageQueueSync::MemRegion();
1291 txn.getLengthInBytes();
1292 }
1293
1294 extern "C" uint8_t fmq_rust_test(void);
1295
1296 /*
1297 * Test using the FMQ from Rust.
1298 */
TEST(RustInteropTest,Simple)1299 TEST(RustInteropTest, Simple) {
1300 ASSERT_EQ(fmq_rust_test(), 1);
1301 }
1302