1 /*
2 * Copyright (C) 2021 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #include <stddef.h>
18 #include <stdint.h>
19 #include <iostream>
20 #include <limits>
21 #include <thread>
22
23 #include <android-base/logging.h>
24 #include <android-base/scopeguard.h>
25 #include <fmq/AidlMessageQueue.h>
26 #include <fmq/ConvertMQDescriptors.h>
27 #include <fmq/EventFlag.h>
28 #include <fmq/MessageQueue.h>
29
30 #include "fuzzer/FuzzedDataProvider.h"
31
32 using aidl::android::hardware::common::fmq::SynchronizedReadWrite;
33 using aidl::android::hardware::common::fmq::UnsynchronizedWrite;
34 using android::hardware::kSynchronizedReadWrite;
35 using android::hardware::kUnsynchronizedWrite;
36
37 typedef int32_t payload_t;
38
39 // The reader/writers will wait during blocking calls
40 static constexpr int kBlockingTimeoutNs = 100000;
41
42 /*
43 * MessageQueueBase.h contains asserts when memory allocation fails. So we need
44 * to set a reasonable limit if we want to avoid those asserts.
45 */
46 static constexpr size_t kAlignment = 8;
47 static constexpr size_t kMaxNumElements = PAGE_SIZE * 10 / sizeof(payload_t) - kAlignment + 1;
48 /*
49 * limit the custom grantor case to one page of memory.
50 * If we want to increase this, we need to make sure that all of grantors offset
51 * plus extent are less than the size of the page aligned ashmem region that is
52 * created
53 */
54 static constexpr size_t kMaxCustomGrantorMemoryBytes = PAGE_SIZE;
55
56 /*
57 * The read counter can be found in the shared memory 16 bytes before the start
58 * of the ring buffer.
59 */
60 static constexpr int kReadCounterOffsetBytes = 16;
61 /*
62 * The write counter can be found in the shared memory 8 bytes before the start
63 * of the ring buffer.
64 */
65 static constexpr int kWriteCounterOffsetBytes = 8;
66
67 static constexpr int kMaxNumSyncReaders = 1;
68 static constexpr int kMaxNumUnsyncReaders = 5;
69 static constexpr int kMaxDataPerReader = 1000;
70
71 typedef android::AidlMessageQueue<payload_t, SynchronizedReadWrite> AidlMessageQueueSync;
72 typedef android::AidlMessageQueue<payload_t, UnsynchronizedWrite> AidlMessageQueueUnsync;
73 typedef android::hardware::MessageQueue<payload_t, kSynchronizedReadWrite> MessageQueueSync;
74 typedef android::hardware::MessageQueue<payload_t, kUnsynchronizedWrite> MessageQueueUnsync;
75 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, SynchronizedReadWrite>
76 AidlMQDescSync;
77 typedef aidl::android::hardware::common::fmq::MQDescriptor<payload_t, UnsynchronizedWrite>
78 AidlMQDescUnsync;
79 typedef android::hardware::MQDescriptorSync<payload_t> MQDescSync;
80 typedef android::hardware::MQDescriptorUnsync<payload_t> MQDescUnsync;
81
82 // AIDL and HIDL have different ways of accessing the grantors
83 template <typename Desc>
84 uint64_t* getCounterPtr(payload_t* start, const Desc& desc, int grantorIndx);
85
createCounterPtr(payload_t * start,uint32_t offset,uint32_t data_offset)86 uint64_t* createCounterPtr(payload_t* start, uint32_t offset, uint32_t data_offset) {
87 // start is the address of the beginning of the FMQ data section in memory
88 // offset is overall offset of the counter in the FMQ memory
89 // data_offset is the overall offset of the data section in the FMQ memory
90 // start - (data_offset) = beginning address of the FMQ memory
91 return reinterpret_cast<uint64_t*>(reinterpret_cast<uint8_t*>(start) - data_offset + offset);
92 }
93
getCounterPtr(payload_t * start,const MQDescSync & desc,int grantorIndx)94 uint64_t* getCounterPtr(payload_t* start, const MQDescSync& desc, int grantorIndx) {
95 uint32_t offset = desc.grantors()[grantorIndx].offset;
96 uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
97 return createCounterPtr(start, offset, data_offset);
98 }
99
getCounterPtr(payload_t * start,const MQDescUnsync & desc,int grantorIndx)100 uint64_t* getCounterPtr(payload_t* start, const MQDescUnsync& desc, int grantorIndx) {
101 uint32_t offset = desc.grantors()[grantorIndx].offset;
102 uint32_t data_offset = desc.grantors()[android::hardware::details::DATAPTRPOS].offset;
103 return createCounterPtr(start, offset, data_offset);
104 }
105
getCounterPtr(payload_t * start,const AidlMQDescSync & desc,int grantorIndx)106 uint64_t* getCounterPtr(payload_t* start, const AidlMQDescSync& desc, int grantorIndx) {
107 uint32_t offset = desc.grantors[grantorIndx].offset;
108 uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
109 return createCounterPtr(start, offset, data_offset);
110 }
111
getCounterPtr(payload_t * start,const AidlMQDescUnsync & desc,int grantorIndx)112 uint64_t* getCounterPtr(payload_t* start, const AidlMQDescUnsync& desc, int grantorIndx) {
113 uint32_t offset = desc.grantors[grantorIndx].offset;
114 uint32_t data_offset = desc.grantors[android::hardware::details::DATAPTRPOS].offset;
115 return createCounterPtr(start, offset, data_offset);
116 }
117
118 template <typename Queue, typename Desc>
reader(const Desc & desc,std::vector<uint8_t> readerData,bool userFd)119 void reader(const Desc& desc, std::vector<uint8_t> readerData, bool userFd) {
120 Queue readMq(desc);
121 if (!readMq.isValid()) {
122 LOG(ERROR) << "read mq invalid";
123 return;
124 }
125 FuzzedDataProvider fdp(&readerData[0], readerData.size());
126 payload_t* ring = reinterpret_cast<payload_t*>(readMq.getRingBufferPtr());
127 while (fdp.remaining_bytes()) {
128 typename Queue::MemTransaction tx;
129 size_t numElements = fdp.ConsumeIntegralInRange<size_t>(0, kMaxNumElements);
130 if (!readMq.beginRead(numElements, &tx)) {
131 continue;
132 }
133 const auto& region = tx.getFirstRegion();
134 payload_t* firstStart = region.getAddress();
135
136 // the ring buffer is only next to the read/write counters when there is
137 // no user supplied fd
138 if (!userFd) {
139 if (fdp.ConsumeIntegral<uint8_t>() == 1) {
140 uint64_t* writeCounter =
141 getCounterPtr(ring, desc, android::hardware::details::WRITEPTRPOS);
142 *writeCounter = fdp.ConsumeIntegral<uint64_t>();
143 }
144 }
145 (void)std::to_string(*firstStart);
146
147 readMq.commitRead(numElements);
148 }
149 }
150
151 template <typename Queue, typename Desc>
readerBlocking(const Desc & desc,std::vector<uint8_t> & readerData,std::atomic<size_t> & readersNotFinished,std::atomic<size_t> & writersNotFinished)152 void readerBlocking(const Desc& desc, std::vector<uint8_t>& readerData,
153 std::atomic<size_t>& readersNotFinished,
154 std::atomic<size_t>& writersNotFinished) {
155 android::base::ScopeGuard guard([&readersNotFinished]() { readersNotFinished--; });
156 Queue readMq(desc);
157 if (!readMq.isValid()) {
158 LOG(ERROR) << "read mq invalid";
159 return;
160 }
161 FuzzedDataProvider fdp(&readerData[0], readerData.size());
162 do {
163 size_t count = fdp.remaining_bytes()
164 ? fdp.ConsumeIntegralInRange<size_t>(0, readMq.getQuantumCount() + 1)
165 : 1;
166 std::vector<payload_t> data;
167 data.resize(count);
168 readMq.readBlocking(data.data(), count, kBlockingTimeoutNs);
169 } while (fdp.remaining_bytes() > sizeof(size_t) && writersNotFinished > 0);
170 }
171
172 // Can't use blocking calls with Unsync queues(there is a static_assert)
173 template <>
readerBlocking(const AidlMQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)174 void readerBlocking<AidlMessageQueueUnsync, AidlMQDescUnsync>(const AidlMQDescUnsync&,
175 std::vector<uint8_t>&,
176 std::atomic<size_t>&,
177 std::atomic<size_t>&) {}
178 template <>
readerBlocking(const MQDescUnsync &,std::vector<uint8_t> &,std::atomic<size_t> &,std::atomic<size_t> &)179 void readerBlocking<MessageQueueUnsync, MQDescUnsync>(const MQDescUnsync&, std::vector<uint8_t>&,
180 std::atomic<size_t>&, std::atomic<size_t>&) {}
181
182 template <typename Queue, typename Desc>
writer(const Desc & desc,Queue & writeMq,FuzzedDataProvider & fdp,bool userFd)183 void writer(const Desc& desc, Queue& writeMq, FuzzedDataProvider& fdp, bool userFd) {
184 payload_t* ring = reinterpret_cast<payload_t*>(writeMq.getRingBufferPtr());
185 while (fdp.remaining_bytes()) {
186 typename Queue::MemTransaction tx;
187 size_t numElements = 1;
188 if (!writeMq.beginWrite(numElements, &tx)) {
189 // need to consume something so we don't end up looping forever
190 fdp.ConsumeIntegral<uint8_t>();
191 continue;
192 }
193
194 const auto& region = tx.getFirstRegion();
195 payload_t* firstStart = region.getAddress();
196 // the ring buffer is only next to the read/write counters when there is
197 // no user supplied fd
198 if (!userFd) {
199 if (fdp.ConsumeIntegral<uint8_t>() == 1) {
200 uint64_t* readCounter =
201 getCounterPtr(ring, desc, android::hardware::details::READPTRPOS);
202 *readCounter = fdp.ConsumeIntegral<uint64_t>();
203 }
204 }
205 *firstStart = fdp.ConsumeIntegral<payload_t>();
206
207 writeMq.commitWrite(numElements);
208 }
209 }
210
211 template <typename Queue>
writerBlocking(Queue & writeMq,FuzzedDataProvider & fdp,std::atomic<size_t> & writersNotFinished,std::atomic<size_t> & readersNotFinished)212 void writerBlocking(Queue& writeMq, FuzzedDataProvider& fdp,
213 std::atomic<size_t>& writersNotFinished,
214 std::atomic<size_t>& readersNotFinished) {
215 android::base::ScopeGuard guard([&writersNotFinished]() { writersNotFinished--; });
216 while (fdp.remaining_bytes() > sizeof(size_t) && readersNotFinished > 0) {
217 size_t count = fdp.ConsumeIntegralInRange<size_t>(0, writeMq.getQuantumCount() + 1);
218 std::vector<payload_t> data;
219 for (int i = 0; i < count; i++) {
220 data.push_back(fdp.ConsumeIntegral<payload_t>());
221 }
222 writeMq.writeBlocking(data.data(), count, kBlockingTimeoutNs);
223 }
224 }
225
226 // Can't use blocking calls with Unsync queues(there is a static_assert)
227 template <>
writerBlocking(AidlMessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)228 void writerBlocking<AidlMessageQueueUnsync>(AidlMessageQueueUnsync&, FuzzedDataProvider&,
229 std::atomic<size_t>&, std::atomic<size_t>&) {}
230 template <>
writerBlocking(MessageQueueUnsync &,FuzzedDataProvider &,std::atomic<size_t> &,std::atomic<size_t> &)231 void writerBlocking<MessageQueueUnsync>(MessageQueueUnsync&, FuzzedDataProvider&,
232 std::atomic<size_t>&, std::atomic<size_t>&) {}
233
234 template <typename Queue, typename Desc>
235 inline std::optional<Desc> getDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp);
236
237 template <typename Queue, typename Desc>
getAidlDesc(std::unique_ptr<Queue> & queue,FuzzedDataProvider & fdp)238 inline std::optional<Desc> getAidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
239 if (queue) {
240 // get the existing descriptor from the queue
241 Desc desc = queue->dupeDesc();
242 if (desc.handle.fds[0].get() == -1) {
243 return std::nullopt;
244 } else {
245 return std::make_optional(std::move(desc));
246 }
247 } else {
248 // create a custom descriptor
249 std::vector<aidl::android::hardware::common::fmq::GrantorDescriptor> grantors;
250 size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
251 for (int i = 0; i < numGrantors; i++) {
252 grantors.push_back({fdp.ConsumeIntegralInRange<int32_t>(-2, 2) /* fdIndex */,
253 fdp.ConsumeIntegralInRange<int32_t>(
254 0, kMaxCustomGrantorMemoryBytes) /* offset */,
255 fdp.ConsumeIntegralInRange<int64_t>(
256 0, kMaxCustomGrantorMemoryBytes) /* extent */});
257 // ashmem region is PAGE_SIZE and we need to make sure all of the
258 // pointers and data region fit inside
259 if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt;
260 }
261
262 android::base::unique_fd fd(
263 ashmem_create_region("AidlCustomGrantors", kMaxCustomGrantorMemoryBytes));
264 ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
265 aidl::android::hardware::common::NativeHandle handle;
266 handle.fds.emplace_back(fd.get());
267
268 return std::make_optional<Desc>(
269 {grantors, std::move(handle), sizeof(payload_t), fdp.ConsumeBool()});
270 }
271 }
272
273 template <>
getDesc(std::unique_ptr<AidlMessageQueueSync> & queue,FuzzedDataProvider & fdp)274 inline std::optional<AidlMQDescSync> getDesc(std::unique_ptr<AidlMessageQueueSync>& queue,
275 FuzzedDataProvider& fdp) {
276 return getAidlDesc<AidlMessageQueueSync, AidlMQDescSync>(queue, fdp);
277 }
278
279 template <>
getDesc(std::unique_ptr<AidlMessageQueueUnsync> & queue,FuzzedDataProvider & fdp)280 inline std::optional<AidlMQDescUnsync> getDesc(std::unique_ptr<AidlMessageQueueUnsync>& queue,
281 FuzzedDataProvider& fdp) {
282 return getAidlDesc<AidlMessageQueueUnsync, AidlMQDescUnsync>(queue, fdp);
283 }
284
285 template <typename Queue, typename Desc>
getHidlDesc(std::unique_ptr<Queue> & queue,FuzzedDataProvider & fdp)286 inline std::optional<Desc> getHidlDesc(std::unique_ptr<Queue>& queue, FuzzedDataProvider& fdp) {
287 if (queue) {
288 auto desc = queue->getDesc();
289 if (!desc->isHandleValid()) {
290 return std::nullopt;
291 } else {
292 return std::make_optional(std::move(*desc));
293 }
294 } else {
295 // create a custom descriptor
296 std::vector<android::hardware::GrantorDescriptor> grantors;
297 size_t numGrantors = fdp.ConsumeIntegralInRange<size_t>(0, 4);
298 for (int i = 0; i < numGrantors; i++) {
299 grantors.push_back({fdp.ConsumeIntegral<uint32_t>() /* flags */,
300 fdp.ConsumeIntegralInRange<uint32_t>(0, 2) /* fdIndex */,
301 fdp.ConsumeIntegralInRange<uint32_t>(
302 0, kMaxCustomGrantorMemoryBytes) /* offset */,
303 fdp.ConsumeIntegralInRange<uint64_t>(
304 0, kMaxCustomGrantorMemoryBytes) /* extent */});
305 // ashmem region is PAGE_SIZE and we need to make sure all of the
306 // pointers and data region fit inside
307 if (grantors.back().offset + grantors.back().extent > PAGE_SIZE) return std::nullopt;
308 }
309
310 native_handle_t* handle = native_handle_create(1, 0);
311 int ashmemFd = ashmem_create_region("HidlCustomGrantors", kMaxCustomGrantorMemoryBytes);
312 ashmem_set_prot_region(ashmemFd, PROT_READ | PROT_WRITE);
313 handle->data[0] = ashmemFd;
314
315 return std::make_optional<Desc>(grantors, handle, sizeof(payload_t));
316 }
317 }
318
319 template <>
getDesc(std::unique_ptr<MessageQueueSync> & queue,FuzzedDataProvider & fdp)320 inline std::optional<MQDescSync> getDesc(std::unique_ptr<MessageQueueSync>& queue,
321 FuzzedDataProvider& fdp) {
322 return getHidlDesc<MessageQueueSync, MQDescSync>(queue, fdp);
323 }
324
325 template <>
getDesc(std::unique_ptr<MessageQueueUnsync> & queue,FuzzedDataProvider & fdp)326 inline std::optional<MQDescUnsync> getDesc(std::unique_ptr<MessageQueueUnsync>& queue,
327 FuzzedDataProvider& fdp) {
328 return getHidlDesc<MessageQueueUnsync, MQDescUnsync>(queue, fdp);
329 }
330
331 template <typename Queue, typename Desc>
fuzzWithReaders(std::vector<uint8_t> & writerData,std::vector<std::vector<uint8_t>> & readerData,bool blocking)332 void fuzzWithReaders(std::vector<uint8_t>& writerData,
333 std::vector<std::vector<uint8_t>>& readerData, bool blocking) {
334 FuzzedDataProvider fdp(&writerData[0], writerData.size());
335 bool evFlag = blocking || fdp.ConsumeBool();
336 size_t numElements = fdp.ConsumeIntegralInRange<size_t>(1, kMaxNumElements);
337 size_t bufferSize = numElements * sizeof(payload_t);
338 bool userFd = fdp.ConsumeBool();
339 bool manualGrantors = fdp.ConsumeBool();
340 std::unique_ptr<Queue> writeMq = nullptr;
341 if (manualGrantors) {
342 std::optional<Desc> customDesc(getDesc<Queue, Desc>(writeMq, fdp));
343 if (customDesc) {
344 writeMq = std::make_unique<Queue>(*customDesc);
345 }
346 } else {
347 android::base::unique_fd dataFd;
348 if (userFd) {
349 // run test with our own data region
350 dataFd.reset(::ashmem_create_region("CustomData", bufferSize));
351 }
352 writeMq = std::make_unique<Queue>(numElements, evFlag, std::move(dataFd), bufferSize);
353 }
354
355 if (writeMq == nullptr || !writeMq->isValid()) {
356 return;
357 }
358 // get optional desc
359 const std::optional<Desc> desc(std::move(getDesc<Queue, Desc>(writeMq, fdp)));
360 CHECK(desc != std::nullopt);
361
362 std::atomic<size_t> readersNotFinished = readerData.size();
363 std::atomic<size_t> writersNotFinished = 1;
364 std::vector<std::thread> readers;
365 for (int i = 0; i < readerData.size(); i++) {
366 if (blocking) {
367 readers.emplace_back(readerBlocking<Queue, Desc>, std::ref(*desc),
368 std::ref(readerData[i]), std::ref(readersNotFinished),
369 std::ref(writersNotFinished));
370 } else {
371 readers.emplace_back(reader<Queue, Desc>, std::ref(*desc), std::ref(readerData[i]),
372 userFd);
373 }
374 }
375
376 if (blocking) {
377 writerBlocking<Queue>(*writeMq, fdp, writersNotFinished, readersNotFinished);
378 } else {
379 writer<Queue>(*desc, *writeMq, fdp, userFd);
380 }
381
382 for (auto& reader : readers) {
383 reader.join();
384 }
385 }
386
LLVMFuzzerTestOneInput(const uint8_t * data,size_t size)387 extern "C" int LLVMFuzzerTestOneInput(const uint8_t* data, size_t size) {
388 if (size < 1 || size > 50000) {
389 return 0;
390 }
391 FuzzedDataProvider fdp(data, size);
392
393 bool fuzzSync = fdp.ConsumeBool();
394 std::vector<std::vector<uint8_t>> readerData;
395 uint8_t numReaders = fuzzSync ? fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumSyncReaders)
396 : fdp.ConsumeIntegralInRange<uint8_t>(0, kMaxNumUnsyncReaders);
397 for (int i = 0; i < numReaders; i++) {
398 readerData.emplace_back(fdp.ConsumeBytes<uint8_t>(kMaxDataPerReader));
399 }
400 bool fuzzBlocking = fdp.ConsumeBool();
401 std::vector<uint8_t> writerData = fdp.ConsumeRemainingBytes<uint8_t>();
402 if (fuzzSync) {
403 fuzzWithReaders<MessageQueueSync, MQDescSync>(writerData, readerData, fuzzBlocking);
404 fuzzWithReaders<AidlMessageQueueSync, AidlMQDescSync>(writerData, readerData, fuzzBlocking);
405 } else {
406 fuzzWithReaders<MessageQueueUnsync, MQDescUnsync>(writerData, readerData, false);
407 fuzzWithReaders<AidlMessageQueueUnsync, AidlMQDescUnsync>(writerData, readerData, false);
408 }
409
410 return 0;
411 }
412