• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2021 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <iostream>
20 #include <limits>
21 #include <thread>
22 
23 #include <android-base/logging.h>
24 #include <android-base/scopeguard.h>
25 #include <fmq/AidlMessageQueue.h>
26 #include <fmq/ConvertMQDescriptors.h>
27 #include <fmq/EventFlag.h>
28 #include <fmq/MessageQueue.h>
29 
30 #include "fuzzer/FuzzedDataProvider.h"
31 
32 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
33 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
34 using android::hardware::kSynchronizedReadWrite;
35 using android::hardware::kUnsynchronizedWrite;
36 
37 typedef int32_t payload_t;
38 
39 // The reader will wait for 10 ms
40 static constexpr int kBlockingTimeoutNs = 10000000;
41 
42 /*
43  * MessageQueueBase.h contains asserts when memory allocation fails. So we need
44  * to set a reasonable limit if we want to avoid those asserts.
45  */
46 static constexpr size_t kAlignment = 8;
47 static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1;
48 
49 /*
50  * The read counter can be found in the shared memory 16 bytes before the start
51  * of the ring buffer.
52  */
53 static constexpr int kReadCounterOffsetBytes = 16;
54 /*
55  * The write counter can be found in the shared memory 8 bytes before the start
56  * of the ring buffer.
57  */
58 static constexpr int kWriteCounterOffsetBytes = 8;
59 
60 static constexpr int kMaxNumSyncReaders = 1;
61 static constexpr int kMaxNumUnsyncReaders = 5;
62 static constexpr int kMaxDataPerReader = 1000;
63 
64 typedef android::AidlMessageQueue<payload_t, SynchronizedReadWrite> AidlMessageQueueSync;
65 typedef android::AidlMessageQueue<payload_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
66 typedef android::hardware::MessageQueue<payload_t, kSynchronizedReadWrite> MessageQueueSync;
67 typedef android::hardware::MessageQueue<payload_t, kUnsynchronizedWrite> MessageQueueUnsync;
68 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, SynchronizedReadWrite>
69         AidlMQDescSync;
70 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, UnsynchronizedWrite>
71         AidlMQDescUnsync;
72 typedef android::hardware::MQDescriptorSync<payload_t> MQDescSync;
73 typedef android::hardware::MQDescriptorUnsync<payload_t> MQDescUnsync;
74 
getCounterPtr(payload_t * start,int byteOffset)75 static inline uint64_t* getCounterPtr(payload_t* start, int byteOffset) {
76     return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - byteOffset);
77 }
78 
79 template <typename Queue, typename Desc>
reader(const Desc & desc,std::vector<uint8_t> readerData,bool userFd)80 void reader(const Desc& desc, std::vector<uint8_t> readerData, bool userFd) {
81     Queue readMq(desc);
82     if (!readMq.isValid()) {
83         LOG(ERROR) << "read mq invalid";
84         return;
85     }
86     FuzzedDataProvider fdp(&readerData[0], readerData.size());
87     payload_t* ring = nullptr;
88     while (fdp.remaining_bytes()) {
89         typename Queue::MemTransaction tx;
90         size_t numElements = fdp.ConsumeIntegralInRange<size_t>(0, kMaxNumElements);
91         if (!readMq.beginRead(numElements, &tx)) {
92             continue;
93         }
94         const auto& region = tx.getFirstRegion();
95         payload_t* firstStart = region.getAddress();
96 
97         // the ring buffer is only next to the read/write counters when there is
98         // no user supplied fd
99         if (!userFd) {
100             if (ring == nullptr) {
101                 ring = firstStart;
102             }
103             if (fdp.ConsumeIntegral<uint8_t>() == 1) {
104                 uint64_t* writeCounter = getCounterPtr(ring, kWriteCounterOffsetBytes);
105                 *writeCounter = fdp.ConsumeIntegral<uint64_t>();
106             }
107         }
108         (void)std::to_string(*firstStart);
109 
110         readMq.commitRead(numElements);
111     }
112 }
113 
114 template <typename Queue, typename Desc>
readerBlocking(const Desc & desc,std::vector<uint8_t> & readerData,std::atomic<size_t> & readersNotFinished,std::atomic<size_t> & writersNotFinished)115 void readerBlocking(const Desc& desc, std::vector<uint8_t>& readerData,
116                     std::atomic<size_t>& readersNotFinished,
117                     std::atomic<size_t>& writersNotFinished) {
118     android::base::ScopeGuard guard([&readersNotFinished]() { readersNotFinished--; });
119     Queue readMq(desc);
120     if (!readMq.isValid()) {
121         LOG(ERROR) << "read mq invalid";
122         return;
123     }
124     FuzzedDataProvider fdp(&readerData[0], readerData.size());
125     do {
126         size_t count = fdp.remaining_bytes()
127                                ? fdp.ConsumeIntegralInRange<size_t>(1, readMq.getQuantumCount())
128                                : 1;
129         std::vector<payload_t> data;
130         data.resize(count);
131         readMq.readBlocking(data.data(), count, kBlockingTimeoutNs);
132     } while (fdp.remaining_bytes() > sizeof(size_t) && writersNotFinished > 0);
133 }
134 
135 // Can't use blocking calls with Unsync queues(there is a static_assert)
136 template <>
readerBlocking(const AidlMQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)137 void readerBlocking<AidlMessageQueueUnsync, AidlMQDescUnsync>(const AidlMQDescUnsync&,
138                                                               std::vector<uint8_t>&,
139                                                               std::atomic<size_t>&,
140                                                               std::atomic<size_t>&) {}
141 template <>
readerBlocking(const MQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)142 void readerBlocking<MessageQueueUnsync, MQDescUnsync>(const MQDescUnsync&, std::vector<uint8_t>&,
143                                                       std::atomic<size_t>&, std::atomic<size_t>&) {}
144 
145 template <typename Queue>
writer(Queue & writeMq,FuzzedDataProvider & fdp,bool userFd)146 void writer(Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) {
147     payload_t* ring = nullptr;
148     while (fdp.remaining_bytes()) {
149         typename Queue::MemTransaction tx;
150         size_t numElements = 1;
151         if (!writeMq.beginWrite(numElements, &tx)) {
152             // need to consume something so we don't end up looping forever
153             fdp.ConsumeIntegral<uint8_t>();
154             continue;
155         }
156 
157         const auto& region = tx.getFirstRegion();
158         payload_t* firstStart = region.getAddress();
159         // the ring buffer is only next to the read/write counters when there is
160         // no user supplied fd
161         if (!userFd) {
162             if (ring == nullptr) {
163                 ring = firstStart;
164             }
165             if (fdp.ConsumeIntegral<uint8_t>() == 1) {
166                 uint64_t* readCounter = getCounterPtr(ring, kReadCounterOffsetBytes);
167                 *readCounter = fdp.ConsumeIntegral<uint64_t>();
168             }
169         }
170         *firstStart = fdp.ConsumeIntegral<payload_t>();
171 
172         writeMq.commitWrite(numElements);
173     }
174 }
175 
176 template <typename Queue>
writerBlocking(Queue & writeMq,FuzzedDataProvider & fdp,std::atomic<size_t> & writersNotFinished,std::atomic<size_t> & readersNotFinished)177 void writerBlocking(Queue& writeMq, FuzzedDataProvider& fdp,
178                     std::atomic<size_t>& writersNotFinished,
179                     std::atomic<size_t>& readersNotFinished) {
180     android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; });
181     while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) {
182         size_t count = fdp.ConsumeIntegralInRange<size_t>(1, writeMq.getQuantumCount());
183         std::vector<payload_t> data;
184         for (int i = 0; i < count; i++) {
185             data.push_back(fdp.ConsumeIntegral<payload_t>());
186         }
187         writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs);
188     }
189 }
190 
191 // Can't use blocking calls with Unsync queues(there is a static_assert)
192 template <>
writerBlocking(AidlMessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)193 void writerBlocking<AidlMessageQueueUnsync>(AidlMessageQueueUnsync&, FuzzedDataProvider&,
194                                             std::atomic<size_t>&, std::atomic<size_t>&) {}
195 template <>
writerBlocking(MessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)196 void writerBlocking<MessageQueueUnsync>(MessageQueueUnsync&, FuzzedDataProvider&,
197                                         std::atomic<size_t>&, std::atomic<size_t>&) {}
198 
199 template <typename Queue, typename Desc>
fuzzAidlWithReaders(std::vector<uint8_t> & writerData,std::vector<std::vector<uint8_t>> & readerData,bool blocking)200 void fuzzAidlWithReaders(std::vector<uint8_t>& writerData,
201                          std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
202     FuzzedDataProvider fdp(&writerData[0], writerData.size());
203     bool evFlag = blocking || fdp.ConsumeBool();
204     android::base::unique_fd dataFd;
205     size_t bufferSize = 0;
206     size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
207     bool userFd = fdp.ConsumeBool();
208     if (userFd) {
209         // run test with our own data region
210         bufferSize = numElements * sizeof(payload_t);
211         dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize));
212     }
213     Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize);
214     if (!writeMq.isValid()) {
215         LOG(ERROR) << "AIDL write mq invalid";
216         return;
217     }
218     const auto desc = writeMq.dupeDesc();
219     CHECK(desc.handle.fds[0].get() != -1);
220 
221     std::atomic<size_t> readersNotFinished = readerData.size();
222     std::atomic<size_t> writersNotFinished = 1;
223     std::vector<std::thread> readers;
224     for (int i = 0; i < readerData.size(); i++) {
225         if (blocking) {
226             readers.emplace_back(readerBlocking<Queue, Desc>, std::ref(desc),
227                                  std::ref(readerData[i]), std::ref(readersNotFinished),
228                                  std::ref(writersNotFinished));
229 
230         } else {
231             readers.emplace_back(reader<Queue, Desc>, std::ref(desc), std::ref(readerData[i]),
232                                  userFd);
233         }
234     }
235 
236     if (blocking) {
237         writerBlocking<Queue>(writeMq, fdp, writersNotFinished, readersNotFinished);
238     } else {
239         writer<Queue>(writeMq, fdp, userFd);
240     }
241 
242     for (auto& reader : readers) {
243         reader.join();
244     }
245 }
246 
247 template <typename Queue, typename Desc>
fuzzHidlWithReaders(std::vector<uint8_t> & writerData,std::vector<std::vector<uint8_t>> & readerData,bool blocking)248 void fuzzHidlWithReaders(std::vector<uint8_t>& writerData,
249                          std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
250     FuzzedDataProvider fdp(&writerData[0], writerData.size());
251     bool evFlag = blocking || fdp.ConsumeBool();
252     android::base::unique_fd dataFd;
253     size_t bufferSize = 0;
254     size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
255     bool userFd = fdp.ConsumeBool();
256     if (userFd) {
257         // run test with our own data region
258         bufferSize = numElements * sizeof(payload_t);
259         dataFd.reset(::ashmem_create_region("SyncReadWrite", bufferSize));
260     }
261     Queue writeMq(numElements, evFlag, std::move(dataFd), bufferSize);
262     if (!writeMq.isValid()) {
263         LOG(ERROR) << "HIDL write mq invalid";
264         return;
265     }
266     const auto desc = writeMq.getDesc();
267     CHECK(desc->isHandleValid());
268 
269     std::atomic<size_t> readersNotFinished = readerData.size();
270     std::atomic<size_t> writersNotFinished = 1;
271     std::vector<std::thread> readers;
272     for (int i = 0; i < readerData.size(); i++) {
273         if (blocking) {
274             readers.emplace_back(readerBlocking<Queue, Desc>, std::ref(*desc),
275                                  std::ref(readerData[i]), std::ref(readersNotFinished),
276                                  std::ref(writersNotFinished));
277         } else {
278             readers.emplace_back(reader<Queue, Desc>, std::ref(*desc), std::ref(readerData[i]),
279                                  userFd);
280         }
281     }
282 
283     if (blocking) {
284         writerBlocking<Queue>(writeMq, fdp, writersNotFinished, readersNotFinished);
285     } else {
286         writer<Queue>(writeMq, fdp, userFd);
287     }
288 
289     for (auto& reader : readers) {
290         reader.join();
291     }
292 }
293 
LLVMFuzzerTestOneInput(const uint8_t * data,size_t size)294 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
295     if (size < 1 || size > 50000) {
296         return 0;
297     }
298     FuzzedDataProvider fdp(data, size);
299 
300     bool fuzzSync = fdp.ConsumeBool();
301     std::vector<std::vector<uint8_t>> readerData;
302     uint8_t numReaders = fuzzSync ? fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumSyncReaders)
303                                   : fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumUnsyncReaders);
304     for (int i = 0; i < numReaders; i++) {
305         readerData.emplace_back(fdp.ConsumeBytes<uint8_t>(kMaxDataPerReader));
306     }
307     bool fuzzBlocking = fdp.ConsumeBool();
308     std::vector<uint8_t> writerData = fdp.ConsumeRemainingBytes<uint8_t>();
309     if (fuzzSync) {
310         fuzzHidlWithReaders<MessageQueueSync, MQDescSync>(writerData, readerData, fuzzBlocking);
311         fuzzAidlWithReaders<AidlMessageQueueSync, AidlMQDescSync>(writerData, readerData,
312                                                                   fuzzBlocking);
313     } else {
314         fuzzHidlWithReaders<MessageQueueUnsync, MQDescUnsync>(writerData, readerData, false);
315         fuzzAidlWithReaders<AidlMessageQueueUnsync, AidlMQDescUnsync>(writerData, readerData,
316                                                                       false);
317     }
318 
319     return 0;
320 }
321