1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <endian.h>
21 #include <errno.h>
22 #include <fcntl.h>
23 #include <inttypes.h>
24 #include <pthread.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/resource.h>
30 #include <sys/stat.h>
31 #include <sys/types.h>
32 #include <unistd.h>
33 #include <algorithm>
34
35 #include <binder/Binder.h>
36 #include <binder/BpBinder.h>
37 #include <binder/Functional.h>
38 #include <binder/IPCThreadState.h>
39 #include <binder/Parcel.h>
40 #include <binder/ProcessState.h>
41 #include <binder/Stability.h>
42 #include <binder/Status.h>
43 #include <binder/TextOutput.h>
44
45 #ifndef BINDER_DISABLE_BLOB
46 #include <cutils/ashmem.h>
47 #endif
48 #include <utils/String16.h>
49 #include <utils/String8.h>
50
51 #include "OS.h"
52 #include "RpcState.h"
53 #include "Static.h"
54 #include "Utils.h"
55
56 // A lot of code in this file uses definitions from the
57 // Linux kernel header for Binder <linux/android/binder.h>
58 // which is included indirectly via "binder_module.h".
59 // Non-Linux OSes do not have that header, so libbinder should be
60 // built for those targets without kernel binder support, i.e.,
61 // without BINDER_WITH_KERNEL_IPC. For this reason, all code in this
62 // file that depends on kernel binder, including the header itself,
63 // is conditional on BINDER_WITH_KERNEL_IPC.
64 #ifdef BINDER_WITH_KERNEL_IPC
65 #include <linux/sched.h>
66 #include "binder_module.h"
67 #else // BINDER_WITH_KERNEL_IPC
68 // Needed by {read,write}Pointer
69 typedef uintptr_t binder_uintptr_t;
70 #endif // BINDER_WITH_KERNEL_IPC
71
72 #ifdef __BIONIC__
73 #include <android/fdsan.h>
74 #endif
75
76 #define LOG_REFS(...)
77 // #define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
78 #define LOG_ALLOC(...)
79 // #define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
80
81 // ---------------------------------------------------------------------------
82
83 // This macro should never be used at runtime, as a too large value
84 // of s could cause an integer overflow. Instead, you should always
85 // use the wrapper function pad_size()
86 #define PAD_SIZE_UNSAFE(s) (((s) + 3) & ~3UL)
87
pad_size(size_t s)88 static size_t pad_size(size_t s) {
89 if (s > (std::numeric_limits<size_t>::max() - 3)) {
90 LOG_ALWAYS_FATAL("pad size too big %zu", s);
91 }
92 return PAD_SIZE_UNSAFE(s);
93 }
94
95 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
96 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
97
98 namespace android {
99
100 using namespace android::binder::impl;
101 using binder::borrowed_fd;
102 using binder::unique_fd;
103
104 // many things compile this into prebuilts on the stack
105 #ifdef __LP64__
106 static_assert(sizeof(Parcel) == 120);
107 #else
108 static_assert(sizeof(Parcel) == 60);
109 #endif
110
111 static std::atomic<size_t> gParcelGlobalAllocCount;
112 static std::atomic<size_t> gParcelGlobalAllocSize;
113
114 // Maximum number of file descriptors per Parcel.
115 constexpr size_t kMaxFds = 1024;
116
117 // Maximum size of a blob to transfer in-place.
118 [[maybe_unused]] static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
119
120 #if defined(__BIONIC__)
FdTag(int fd,const void * old_addr,const void * new_addr)121 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
122 if (android_fdsan_exchange_owner_tag) {
123 uint64_t old_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
124 reinterpret_cast<uint64_t>(old_addr));
125 uint64_t new_tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
126 reinterpret_cast<uint64_t>(new_addr));
127 android_fdsan_exchange_owner_tag(fd, old_tag, new_tag);
128 }
129 }
FdTagClose(int fd,const void * addr)130 static void FdTagClose(int fd, const void* addr) {
131 if (android_fdsan_close_with_tag) {
132 uint64_t tag = android_fdsan_create_owner_tag(ANDROID_FDSAN_OWNER_TYPE_PARCEL,
133 reinterpret_cast<uint64_t>(addr));
134 android_fdsan_close_with_tag(fd, tag);
135 } else {
136 close(fd);
137 }
138 }
139 #else
FdTag(int fd,const void * old_addr,const void * new_addr)140 static void FdTag(int fd, const void* old_addr, const void* new_addr) {
141 (void)fd;
142 (void)old_addr;
143 (void)new_addr;
144 }
FdTagClose(int fd,const void * addr)145 static void FdTagClose(int fd, const void* addr) {
146 (void)addr;
147 close(fd);
148 }
149 #endif
150
151 enum {
152 BLOB_INPLACE = 0,
153 BLOB_ASHMEM_IMMUTABLE = 1,
154 BLOB_ASHMEM_MUTABLE = 2,
155 };
156
157 #ifdef BINDER_WITH_KERNEL_IPC
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,bool tagFds)158 static void acquire_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
159 const void* who, bool tagFds) {
160 switch (obj.hdr.type) {
161 case BINDER_TYPE_BINDER:
162 if (obj.binder) {
163 LOG_REFS("Parcel %p acquiring reference on local %llu", who, obj.cookie);
164 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
165 }
166 return;
167 case BINDER_TYPE_HANDLE: {
168 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
169 if (b != nullptr) {
170 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
171 b->incStrong(who);
172 }
173 return;
174 }
175 case BINDER_TYPE_FD: {
176 if (tagFds && obj.cookie != 0) { // owned
177 FdTag(obj.handle, nullptr, who);
178 }
179 return;
180 }
181 }
182
183 ALOGE("Invalid object type 0x%08x to acquire", obj.hdr.type);
184 }
185
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)186 static void release_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
187 const void* who) {
188 switch (obj.hdr.type) {
189 case BINDER_TYPE_BINDER:
190 if (obj.binder) {
191 LOG_REFS("Parcel %p releasing reference on local %llu", who, obj.cookie);
192 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
193 }
194 return;
195 case BINDER_TYPE_HANDLE: {
196 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
197 if (b != nullptr) {
198 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
199 b->decStrong(who);
200 }
201 return;
202 }
203 case BINDER_TYPE_FD: {
204 // note: this path is not used when mOwner, so the tag is also released
205 // in 'closeFileDescriptors'
206 if (obj.cookie != 0) { // owned
207 FdTagClose(obj.handle, who);
208 }
209 return;
210 }
211 }
212
213 ALOGE("Invalid object type 0x%08x to release", obj.hdr.type);
214 }
215 #endif // BINDER_WITH_KERNEL_IPC
216
toRawFd(const std::variant<unique_fd,borrowed_fd> & v)217 static int toRawFd(const std::variant<unique_fd, borrowed_fd>& v) {
218 return std::visit([](const auto& fd) { return fd.get(); }, v);
219 }
220
RpcFields(const sp<RpcSession> & session)221 Parcel::RpcFields::RpcFields(const sp<RpcSession>& session) : mSession(session) {
222 LOG_ALWAYS_FATAL_IF(mSession == nullptr);
223 }
224
finishFlattenBinder(const sp<IBinder> & binder)225 status_t Parcel::finishFlattenBinder(const sp<IBinder>& binder)
226 {
227 internal::Stability::tryMarkCompilationUnit(binder.get());
228 int16_t rep = internal::Stability::getRepr(binder.get());
229 return writeInt32(rep);
230 }
231
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const232 status_t Parcel::finishUnflattenBinder(
233 const sp<IBinder>& binder, sp<IBinder>* out) const
234 {
235 int32_t stability;
236 status_t status = readInt32(&stability);
237 if (status != OK) return status;
238
239 status = internal::Stability::setRepr(binder.get(), static_cast<int16_t>(stability),
240 true /*log*/);
241 if (status != OK) return status;
242
243 *out = binder;
244 return OK;
245 }
246
247 #ifdef BINDER_WITH_KERNEL_IPC
schedPolicyMask(int policy,int priority)248 static constexpr inline int schedPolicyMask(int policy, int priority) {
249 return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
250 }
251 #endif // BINDER_WITH_KERNEL_IPC
252
flattenBinder(const sp<IBinder> & binder)253 status_t Parcel::flattenBinder(const sp<IBinder>& binder) {
254 BBinder* local = nullptr;
255 if (binder) local = binder->localBinder();
256 if (local) local->setParceled();
257
258 if (const auto* rpcFields = maybeRpcFields()) {
259 if (binder) {
260 status_t status = writeInt32(RpcFields::TYPE_BINDER); // non-null
261 if (status != OK) return status;
262 uint64_t address;
263 // TODO(b/167966510): need to undo this if the Parcel is not sent
264 status = rpcFields->mSession->state()->onBinderLeaving(rpcFields->mSession, binder,
265 &address);
266 if (status != OK) return status;
267 status = writeUint64(address);
268 if (status != OK) return status;
269 } else {
270 status_t status = writeInt32(RpcFields::TYPE_BINDER_NULL); // null
271 if (status != OK) return status;
272 }
273 return finishFlattenBinder(binder);
274 }
275
276 #ifdef BINDER_WITH_KERNEL_IPC
277 flat_binder_object obj;
278
279 int schedBits = 0;
280 if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
281 schedBits = schedPolicyMask(SCHED_NORMAL, 19);
282 }
283
284 if (binder != nullptr) {
285 if (!local) {
286 BpBinder *proxy = binder->remoteBinder();
287 if (proxy == nullptr) {
288 ALOGE("null proxy");
289 } else {
290 if (proxy->isRpcBinder()) {
291 ALOGE("Sending a socket binder over kernel binder is prohibited");
292 return INVALID_OPERATION;
293 }
294 }
295 const int32_t handle = proxy ? proxy->getPrivateAccessor().binderHandle() : 0;
296 obj.hdr.type = BINDER_TYPE_HANDLE;
297 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
298 obj.flags = 0;
299 obj.handle = handle;
300 obj.cookie = 0;
301 } else {
302 #if __linux__
303 int policy = local->getMinSchedulerPolicy();
304 int priority = local->getMinSchedulerPriority();
305 #else
306 int policy = 0;
307 int priority = 0;
308 #endif
309
310 if (policy != 0 || priority != 0) {
311 // override value, since it is set explicitly
312 schedBits = schedPolicyMask(policy, priority);
313 }
314 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
315 if (local->isRequestingSid()) {
316 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
317 }
318 if (local->isInheritRt()) {
319 obj.flags |= FLAT_BINDER_FLAG_INHERIT_RT;
320 }
321 obj.hdr.type = BINDER_TYPE_BINDER;
322 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
323 obj.cookie = reinterpret_cast<uintptr_t>(local);
324 }
325 } else {
326 obj.hdr.type = BINDER_TYPE_BINDER;
327 obj.flags = 0;
328 obj.binder = 0;
329 obj.cookie = 0;
330 }
331
332 obj.flags |= schedBits;
333
334 status_t status = writeObject(obj, false);
335 if (status != OK) return status;
336
337 return finishFlattenBinder(binder);
338 #else // BINDER_WITH_KERNEL_IPC
339 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
340 return INVALID_OPERATION;
341 #endif // BINDER_WITH_KERNEL_IPC
342 }
343
unflattenBinder(sp<IBinder> * out) const344 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
345 {
346 if (const auto* rpcFields = maybeRpcFields()) {
347 int32_t isPresent;
348 status_t status = readInt32(&isPresent);
349 if (status != OK) return status;
350
351 sp<IBinder> binder;
352
353 if (isPresent & 1) {
354 uint64_t addr;
355 if (status_t status = readUint64(&addr); status != OK) return status;
356 if (status_t status =
357 rpcFields->mSession->state()->onBinderEntering(rpcFields->mSession, addr,
358 &binder);
359 status != OK)
360 return status;
361 if (status_t status =
362 rpcFields->mSession->state()->flushExcessBinderRefs(rpcFields->mSession,
363 addr, binder);
364 status != OK)
365 return status;
366 }
367
368 return finishUnflattenBinder(binder, out);
369 }
370
371 #ifdef BINDER_WITH_KERNEL_IPC
372 const flat_binder_object* flat = readObject(false);
373
374 if (flat) {
375 switch (flat->hdr.type) {
376 case BINDER_TYPE_BINDER: {
377 sp<IBinder> binder =
378 sp<IBinder>::fromExisting(reinterpret_cast<IBinder*>(flat->cookie));
379 return finishUnflattenBinder(binder, out);
380 }
381 case BINDER_TYPE_HANDLE: {
382 sp<IBinder> binder =
383 ProcessState::self()->getStrongProxyForHandle(flat->handle);
384 return finishUnflattenBinder(binder, out);
385 }
386 }
387 }
388 return BAD_TYPE;
389 #else // BINDER_WITH_KERNEL_IPC
390 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
391 return INVALID_OPERATION;
392 #endif // BINDER_WITH_KERNEL_IPC
393 }
394
395 // ---------------------------------------------------------------------------
396
Parcel()397 Parcel::Parcel()
398 {
399 LOG_ALLOC("Parcel %p: constructing", this);
400 initState();
401 }
402
~Parcel()403 Parcel::~Parcel()
404 {
405 freeDataNoInit();
406 LOG_ALLOC("Parcel %p: destroyed", this);
407 }
408
getGlobalAllocSize()409 size_t Parcel::getGlobalAllocSize() {
410 return gParcelGlobalAllocSize.load();
411 }
412
getGlobalAllocCount()413 size_t Parcel::getGlobalAllocCount() {
414 return gParcelGlobalAllocCount.load();
415 }
416
data() const417 const uint8_t* Parcel::data() const
418 {
419 return mData;
420 }
421
dataSize() const422 size_t Parcel::dataSize() const
423 {
424 return (mDataSize > mDataPos ? mDataSize : mDataPos);
425 }
426
dataBufferSize() const427 size_t Parcel::dataBufferSize() const {
428 return mDataSize;
429 }
430
dataAvail() const431 size_t Parcel::dataAvail() const
432 {
433 size_t result = dataSize() - dataPosition();
434 if (result > INT32_MAX) {
435 LOG_ALWAYS_FATAL("result too big: %zu", result);
436 }
437 return result;
438 }
439
dataPosition() const440 size_t Parcel::dataPosition() const
441 {
442 return mDataPos;
443 }
444
dataCapacity() const445 size_t Parcel::dataCapacity() const
446 {
447 return mDataCapacity;
448 }
449
setDataSize(size_t size)450 status_t Parcel::setDataSize(size_t size)
451 {
452 if (size > INT32_MAX) {
453 // don't accept size_t values which may have come from an
454 // inadvertent conversion from a negative int.
455 return BAD_VALUE;
456 }
457
458 status_t err;
459 err = continueWrite(size);
460 if (err == NO_ERROR) {
461 mDataSize = size;
462 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
463 }
464 return err;
465 }
466
setDataPosition(size_t pos) const467 void Parcel::setDataPosition(size_t pos) const
468 {
469 if (pos > INT32_MAX) {
470 // don't accept size_t values which may have come from an
471 // inadvertent conversion from a negative int.
472 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
473 }
474
475 mDataPos = pos;
476 if (const auto* kernelFields = maybeKernelFields()) {
477 kernelFields->mNextObjectHint = 0;
478 kernelFields->mObjectsSorted = false;
479 }
480 }
481
setDataCapacity(size_t size)482 status_t Parcel::setDataCapacity(size_t size)
483 {
484 if (size > INT32_MAX) {
485 // don't accept size_t values which may have come from an
486 // inadvertent conversion from a negative int.
487 return BAD_VALUE;
488 }
489
490 if (size > mDataCapacity) return continueWrite(size);
491 return NO_ERROR;
492 }
493
setData(const uint8_t * buffer,size_t len)494 status_t Parcel::setData(const uint8_t* buffer, size_t len)
495 {
496 if (len > INT32_MAX) {
497 // don't accept size_t values which may have come from an
498 // inadvertent conversion from a negative int.
499 return BAD_VALUE;
500 }
501
502 status_t err = restartWrite(len);
503 if (err == NO_ERROR) {
504 memcpy(const_cast<uint8_t*>(data()), buffer, len);
505 mDataSize = len;
506 if (auto* kernelFields = maybeKernelFields()) {
507 kernelFields->mFdsKnown = false;
508 }
509 }
510 return err;
511 }
512
appendFrom(const Parcel * parcel,size_t offset,size_t len)513 status_t Parcel::appendFrom(const Parcel* parcel, size_t offset, size_t len) {
514 if (isForRpc() != parcel->isForRpc()) {
515 ALOGE("Cannot append Parcel from one context to another. They may be different formats, "
516 "and objects are specific to a context.");
517 return BAD_TYPE;
518 }
519 if (isForRpc() && maybeRpcFields()->mSession != parcel->maybeRpcFields()->mSession) {
520 ALOGE("Cannot append Parcels from different sessions");
521 return BAD_TYPE;
522 }
523
524 status_t err;
525 const uint8_t* data = parcel->mData;
526 int startPos = mDataPos;
527
528 if (len == 0) {
529 return NO_ERROR;
530 }
531
532 if (len > INT32_MAX) {
533 // don't accept size_t values which may have come from an
534 // inadvertent conversion from a negative int.
535 return BAD_VALUE;
536 }
537
538 // range checks against the source parcel size
539 if ((offset > parcel->mDataSize)
540 || (len > parcel->mDataSize)
541 || (offset + len > parcel->mDataSize)) {
542 return BAD_VALUE;
543 }
544
545 if ((mDataPos + len) > mDataCapacity) {
546 // grow data
547 err = growData(len);
548 if (err != NO_ERROR) {
549 return err;
550 }
551 }
552
553 // append data
554 memcpy(mData + mDataPos, data + offset, len);
555 mDataPos += len;
556 mDataSize += len;
557
558 err = NO_ERROR;
559
560 if (auto* kernelFields = maybeKernelFields()) {
561 #ifdef BINDER_WITH_KERNEL_IPC
562 auto* otherKernelFields = parcel->maybeKernelFields();
563 LOG_ALWAYS_FATAL_IF(otherKernelFields == nullptr);
564
565 const binder_size_t* objects = otherKernelFields->mObjects;
566 size_t size = otherKernelFields->mObjectsSize;
567 // Count objects in range
568 int firstIndex = -1, lastIndex = -2;
569 for (int i = 0; i < (int)size; i++) {
570 size_t off = objects[i];
571 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
572 if (firstIndex == -1) {
573 firstIndex = i;
574 }
575 lastIndex = i;
576 }
577 }
578 int numObjects = lastIndex - firstIndex + 1;
579 if (numObjects > 0) {
580 const sp<ProcessState> proc(ProcessState::self());
581 // grow objects
582 if (kernelFields->mObjectsCapacity < kernelFields->mObjectsSize + numObjects) {
583 if ((size_t)numObjects > SIZE_MAX - kernelFields->mObjectsSize)
584 return NO_MEMORY; // overflow
585 if (kernelFields->mObjectsSize + numObjects > SIZE_MAX / 3)
586 return NO_MEMORY; // overflow
587 size_t newSize = ((kernelFields->mObjectsSize + numObjects) * 3) / 2;
588 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
589 binder_size_t* objects = (binder_size_t*)realloc(kernelFields->mObjects,
590 newSize * sizeof(binder_size_t));
591 if (objects == (binder_size_t*)nullptr) {
592 return NO_MEMORY;
593 }
594 kernelFields->mObjects = objects;
595 kernelFields->mObjectsCapacity = newSize;
596 }
597
598 // append and acquire objects
599 int idx = kernelFields->mObjectsSize;
600 for (int i = firstIndex; i <= lastIndex; i++) {
601 size_t off = objects[i] - offset + startPos;
602 kernelFields->mObjects[idx++] = off;
603 kernelFields->mObjectsSize++;
604
605 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(mData + off);
606
607 if (flat->hdr.type == BINDER_TYPE_FD) {
608 // If this is a file descriptor, we need to dup it so the
609 // new Parcel now owns its own fd, and can declare that we
610 // officially know we have fds.
611 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
612 flat->cookie = 1;
613 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
614 if (!mAllowFds) {
615 err = FDS_NOT_ALLOWED;
616 }
617 }
618
619 acquire_object(proc, *flat, this, true /*tagFds*/);
620 }
621 }
622 #else
623 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
624 (void)kernelFields;
625 return INVALID_OPERATION;
626 #endif // BINDER_WITH_KERNEL_IPC
627 } else {
628 auto* rpcFields = maybeRpcFields();
629 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
630 auto* otherRpcFields = parcel->maybeRpcFields();
631 if (otherRpcFields == nullptr) {
632 return BAD_TYPE;
633 }
634 if (rpcFields->mSession != otherRpcFields->mSession) {
635 return BAD_TYPE;
636 }
637
638 const size_t savedDataPos = mDataPos;
639 auto scopeGuard = make_scope_guard([&]() { mDataPos = savedDataPos; });
640
641 rpcFields->mObjectPositions.reserve(otherRpcFields->mObjectPositions.size());
642 if (otherRpcFields->mFds != nullptr) {
643 if (rpcFields->mFds == nullptr) {
644 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
645 }
646 rpcFields->mFds->reserve(otherRpcFields->mFds->size());
647 }
648 for (size_t i = 0; i < otherRpcFields->mObjectPositions.size(); i++) {
649 const binder_size_t objPos = otherRpcFields->mObjectPositions[i];
650 if (offset <= objPos && objPos < offset + len) {
651 size_t newDataPos = objPos - offset + startPos;
652 rpcFields->mObjectPositions.push_back(newDataPos);
653
654 mDataPos = newDataPos;
655 int32_t objectType;
656 if (status_t status = readInt32(&objectType); status != OK) {
657 return status;
658 }
659 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
660 continue;
661 }
662
663 if (!mAllowFds) {
664 return FDS_NOT_ALLOWED;
665 }
666
667 // Read FD, duplicate, and add to list.
668 int32_t fdIndex;
669 if (status_t status = readInt32(&fdIndex); status != OK) {
670 return status;
671 }
672 int oldFd = toRawFd(otherRpcFields->mFds->at(fdIndex));
673 // To match kernel binder behavior, we always dup, even if the
674 // FD was unowned in the source parcel.
675 int newFd = -1;
676 if (status_t status = binder::os::dupFileDescriptor(oldFd, &newFd); status != OK) {
677 ALOGW("Failed to duplicate file descriptor %d: %s", oldFd,
678 statusToString(status).c_str());
679 }
680 rpcFields->mFds->emplace_back(unique_fd(newFd));
681 // Fixup the index in the data.
682 mDataPos = newDataPos + 4;
683 if (status_t status = writeInt32(rpcFields->mFds->size() - 1); status != OK) {
684 return status;
685 }
686 }
687 }
688 }
689
690 return err;
691 }
692
compareData(const Parcel & other) const693 int Parcel::compareData(const Parcel& other) const {
694 size_t size = dataSize();
695 if (size != other.dataSize()) {
696 return size < other.dataSize() ? -1 : 1;
697 }
698 return memcmp(data(), other.data(), size);
699 }
700
compareDataInRange(size_t thisOffset,const Parcel & other,size_t otherOffset,size_t len,int * result) const701 status_t Parcel::compareDataInRange(size_t thisOffset, const Parcel& other, size_t otherOffset,
702 size_t len, int* result) const {
703 if (len > INT32_MAX || thisOffset > INT32_MAX || otherOffset > INT32_MAX) {
704 // Don't accept size_t values which may have come from an inadvertent conversion from a
705 // negative int.
706 return BAD_VALUE;
707 }
708 size_t thisLimit;
709 if (__builtin_add_overflow(thisOffset, len, &thisLimit) || thisLimit > mDataSize) {
710 return BAD_VALUE;
711 }
712 size_t otherLimit;
713 if (__builtin_add_overflow(otherOffset, len, &otherLimit) || otherLimit > other.mDataSize) {
714 return BAD_VALUE;
715 }
716 *result = memcmp(data() + thisOffset, other.data() + otherOffset, len);
717 return NO_ERROR;
718 }
719
allowFds() const720 bool Parcel::allowFds() const
721 {
722 return mAllowFds;
723 }
724
pushAllowFds(bool allowFds)725 bool Parcel::pushAllowFds(bool allowFds)
726 {
727 const bool origValue = mAllowFds;
728 if (!allowFds) {
729 mAllowFds = false;
730 }
731 return origValue;
732 }
733
restoreAllowFds(bool lastValue)734 void Parcel::restoreAllowFds(bool lastValue)
735 {
736 mAllowFds = lastValue;
737 }
738
hasFileDescriptors() const739 bool Parcel::hasFileDescriptors() const
740 {
741 if (const auto* rpcFields = maybeRpcFields()) {
742 return rpcFields->mFds != nullptr && !rpcFields->mFds->empty();
743 }
744 auto* kernelFields = maybeKernelFields();
745 if (!kernelFields->mFdsKnown) {
746 scanForFds();
747 }
748 return kernelFields->mHasFds;
749 }
750
hasBinders(bool * result) const751 status_t Parcel::hasBinders(bool* result) const {
752 status_t status = hasBindersInRange(0, dataSize(), result);
753 ALOGE_IF(status != NO_ERROR, "Error %d calling hasBindersInRange()", status);
754 return status;
755 }
756
debugReadAllStrongBinders() const757 std::vector<sp<IBinder>> Parcel::debugReadAllStrongBinders() const {
758 std::vector<sp<IBinder>> ret;
759
760 #ifdef BINDER_WITH_KERNEL_IPC
761 const auto* kernelFields = maybeKernelFields();
762 if (kernelFields == nullptr) {
763 return ret;
764 }
765
766 size_t initPosition = dataPosition();
767 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
768 binder_size_t offset = kernelFields->mObjects[i];
769 const flat_binder_object* flat =
770 reinterpret_cast<const flat_binder_object*>(mData + offset);
771 if (flat->hdr.type != BINDER_TYPE_BINDER) continue;
772
773 setDataPosition(offset);
774
775 sp<IBinder> binder = readStrongBinder();
776 if (binder != nullptr) ret.push_back(binder);
777 }
778
779 setDataPosition(initPosition);
780 #endif // BINDER_WITH_KERNEL_IPC
781
782 return ret;
783 }
784
debugReadAllFileDescriptors() const785 std::vector<int> Parcel::debugReadAllFileDescriptors() const {
786 std::vector<int> ret;
787
788 if (const auto* kernelFields = maybeKernelFields()) {
789 #ifdef BINDER_WITH_KERNEL_IPC
790 size_t initPosition = dataPosition();
791 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
792 binder_size_t offset = kernelFields->mObjects[i];
793 const flat_binder_object* flat =
794 reinterpret_cast<const flat_binder_object*>(mData + offset);
795 if (flat->hdr.type != BINDER_TYPE_FD) continue;
796
797 setDataPosition(offset);
798
799 int fd = readFileDescriptor();
800 LOG_ALWAYS_FATAL_IF(fd == -1);
801 ret.push_back(fd);
802 }
803 setDataPosition(initPosition);
804 #else
805 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
806 (void)kernelFields;
807 #endif
808 } else if (const auto* rpcFields = maybeRpcFields(); rpcFields && rpcFields->mFds) {
809 for (const auto& fd : *rpcFields->mFds) {
810 ret.push_back(toRawFd(fd));
811 }
812 }
813
814 return ret;
815 }
816
hasBindersInRange(size_t offset,size_t len,bool * result) const817 status_t Parcel::hasBindersInRange(size_t offset, size_t len, bool* result) const {
818 if (len > INT32_MAX || offset > INT32_MAX) {
819 // Don't accept size_t values which may have come from an inadvertent conversion from a
820 // negative int.
821 return BAD_VALUE;
822 }
823 size_t limit;
824 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
825 return BAD_VALUE;
826 }
827 *result = false;
828 if (const auto* kernelFields = maybeKernelFields()) {
829 #ifdef BINDER_WITH_KERNEL_IPC
830 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
831 size_t pos = kernelFields->mObjects[i];
832 if (pos < offset) continue;
833 if (pos + sizeof(flat_binder_object) > offset + len) {
834 if (kernelFields->mObjectsSorted) {
835 break;
836 } else {
837 continue;
838 }
839 }
840 const flat_binder_object* flat =
841 reinterpret_cast<const flat_binder_object*>(mData + pos);
842 if (flat->hdr.type == BINDER_TYPE_BINDER || flat->hdr.type == BINDER_TYPE_HANDLE) {
843 *result = true;
844 break;
845 }
846 }
847 #else
848 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
849 (void)kernelFields;
850 return INVALID_OPERATION;
851 #endif // BINDER_WITH_KERNEL_IPC
852 } else if (maybeRpcFields()) {
853 return INVALID_OPERATION;
854 }
855 return NO_ERROR;
856 }
857
hasFileDescriptorsInRange(size_t offset,size_t len,bool * result) const858 status_t Parcel::hasFileDescriptorsInRange(size_t offset, size_t len, bool* result) const {
859 if (len > INT32_MAX || offset > INT32_MAX) {
860 // Don't accept size_t values which may have come from an inadvertent conversion from a
861 // negative int.
862 return BAD_VALUE;
863 }
864 size_t limit;
865 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
866 return BAD_VALUE;
867 }
868 *result = false;
869 if (const auto* kernelFields = maybeKernelFields()) {
870 #ifdef BINDER_WITH_KERNEL_IPC
871 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
872 size_t pos = kernelFields->mObjects[i];
873 if (pos < offset) continue;
874 if (pos + sizeof(flat_binder_object) > offset + len) {
875 if (kernelFields->mObjectsSorted) {
876 break;
877 } else {
878 continue;
879 }
880 }
881 const flat_binder_object* flat =
882 reinterpret_cast<const flat_binder_object*>(mData + pos);
883 if (flat->hdr.type == BINDER_TYPE_FD) {
884 *result = true;
885 break;
886 }
887 }
888 #else
889 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
890 (void)kernelFields;
891 return INVALID_OPERATION;
892 #endif // BINDER_WITH_KERNEL_IPC
893 } else if (const auto* rpcFields = maybeRpcFields()) {
894 for (uint32_t pos : rpcFields->mObjectPositions) {
895 if (offset <= pos && pos < limit) {
896 const auto* type = reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
897 if (*type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
898 *result = true;
899 break;
900 }
901 }
902 }
903 }
904 return NO_ERROR;
905 }
906
markSensitive() const907 void Parcel::markSensitive() const
908 {
909 mDeallocZero = true;
910 }
911
markForBinder(const sp<IBinder> & binder)912 void Parcel::markForBinder(const sp<IBinder>& binder) {
913 LOG_ALWAYS_FATAL_IF(mData != nullptr, "format must be set before data is written");
914
915 if (binder && binder->remoteBinder() && binder->remoteBinder()->isRpcBinder()) {
916 markForRpc(binder->remoteBinder()->getPrivateAccessor().rpcSession());
917 }
918 }
919
markForRpc(const sp<RpcSession> & session)920 void Parcel::markForRpc(const sp<RpcSession>& session) {
921 LOG_ALWAYS_FATAL_IF(mData != nullptr && mOwner == nullptr,
922 "format must be set before data is written OR on IPC data");
923
924 mVariantFields.emplace<RpcFields>(session);
925 }
926
isForRpc() const927 bool Parcel::isForRpc() const {
928 return std::holds_alternative<RpcFields>(mVariantFields);
929 }
930
updateWorkSourceRequestHeaderPosition() const931 void Parcel::updateWorkSourceRequestHeaderPosition() const {
932 auto* kernelFields = maybeKernelFields();
933 if (kernelFields == nullptr) {
934 return;
935 }
936
937 // Only update the request headers once. We only want to point
938 // to the first headers read/written.
939 if (!kernelFields->mRequestHeaderPresent) {
940 kernelFields->mWorkSourceRequestHeaderPosition = dataPosition();
941 kernelFields->mRequestHeaderPresent = true;
942 }
943 }
944
945 #ifdef BINDER_WITH_KERNEL_IPC
946
947 #if defined(__ANDROID__)
948
949 #if defined(__ANDROID_VNDK__)
950 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
951 #elif defined(__ANDROID_RECOVERY__)
952 constexpr int32_t kHeader = B_PACK_CHARS('R', 'E', 'C', 'O');
953 #else
954 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
955 #endif
956
957 #else // ANDROID not defined
958
959 // If kernel binder is used in new environments, we need to make sure it's separated
960 // out and has a separate header.
961 constexpr int32_t kHeader = B_PACK_CHARS('U', 'N', 'K', 'N');
962 #endif
963
964 #endif // BINDER_WITH_KERNEL_IPC
965
966 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)967 status_t Parcel::writeInterfaceToken(const String16& interface)
968 {
969 return writeInterfaceToken(interface.c_str(), interface.size());
970 }
971
writeInterfaceToken(const char16_t * str,size_t len)972 status_t Parcel::writeInterfaceToken(const char16_t* str, size_t len) {
973 if (auto* kernelFields = maybeKernelFields()) {
974 #ifdef BINDER_WITH_KERNEL_IPC
975 const IPCThreadState* threadState = IPCThreadState::self();
976 writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
977 updateWorkSourceRequestHeaderPosition();
978 writeInt32(threadState->shouldPropagateWorkSource() ? threadState->getCallingWorkSourceUid()
979 : IPCThreadState::kUnsetWorkSource);
980 writeInt32(kHeader);
981 #else // BINDER_WITH_KERNEL_IPC
982 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
983 (void)kernelFields;
984 return INVALID_OPERATION;
985 #endif // BINDER_WITH_KERNEL_IPC
986 }
987
988 // currently the interface identification token is just its name as a string
989 return writeString16(str, len);
990 }
991
replaceCallingWorkSourceUid(uid_t uid)992 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
993 {
994 auto* kernelFields = maybeKernelFields();
995 if (kernelFields == nullptr) {
996 return false;
997 }
998 if (!kernelFields->mRequestHeaderPresent) {
999 return false;
1000 }
1001
1002 const size_t initialPosition = dataPosition();
1003 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
1004 status_t err = writeInt32(uid);
1005 setDataPosition(initialPosition);
1006 return err == NO_ERROR;
1007 }
1008
readCallingWorkSourceUid() const1009 uid_t Parcel::readCallingWorkSourceUid() const
1010 {
1011 auto* kernelFields = maybeKernelFields();
1012 if (kernelFields == nullptr) {
1013 return false;
1014 }
1015 if (!kernelFields->mRequestHeaderPresent) {
1016 return IPCThreadState::kUnsetWorkSource;
1017 }
1018
1019 const size_t initialPosition = dataPosition();
1020 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
1021 uid_t uid = readInt32();
1022 setDataPosition(initialPosition);
1023 return uid;
1024 }
1025
checkInterface(IBinder * binder) const1026 bool Parcel::checkInterface(IBinder* binder) const
1027 {
1028 return enforceInterface(binder->getInterfaceDescriptor());
1029 }
1030
enforceInterface(const String16 & interface,IPCThreadState * threadState) const1031 bool Parcel::enforceInterface(const String16& interface,
1032 IPCThreadState* threadState) const
1033 {
1034 return enforceInterface(interface.c_str(), interface.size(), threadState);
1035 }
1036
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const1037 bool Parcel::enforceInterface(const char16_t* interface,
1038 size_t len,
1039 IPCThreadState* threadState) const
1040 {
1041 if (auto* kernelFields = maybeKernelFields()) {
1042 #ifdef BINDER_WITH_KERNEL_IPC
1043 // StrictModePolicy.
1044 int32_t strictPolicy = readInt32();
1045 if (threadState == nullptr) {
1046 threadState = IPCThreadState::self();
1047 }
1048 if ((threadState->getLastTransactionBinderFlags() & IBinder::FLAG_ONEWAY) != 0) {
1049 // For one-way calls, the callee is running entirely
1050 // disconnected from the caller, so disable StrictMode entirely.
1051 // Not only does disk/network usage not impact the caller, but
1052 // there's no way to communicate back violations anyway.
1053 threadState->setStrictModePolicy(0);
1054 } else {
1055 threadState->setStrictModePolicy(strictPolicy);
1056 }
1057 // WorkSource.
1058 updateWorkSourceRequestHeaderPosition();
1059 int32_t workSource = readInt32();
1060 threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
1061 // vendor header
1062 int32_t header = readInt32();
1063
1064 // fuzzers skip this check, because it is for protecting the underlying ABI, but
1065 // we don't want it to reduce our coverage
1066 if (header != kHeader && !mServiceFuzzing) {
1067 ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader,
1068 header);
1069 return false;
1070 }
1071 #else // BINDER_WITH_KERNEL_IPC
1072 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1073 (void)threadState;
1074 (void)kernelFields;
1075 return false;
1076 #endif // BINDER_WITH_KERNEL_IPC
1077 }
1078
1079 // Interface descriptor.
1080 size_t parcel_interface_len;
1081 const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
1082 if (len == parcel_interface_len &&
1083 (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
1084 return true;
1085 } else {
1086 if (mServiceFuzzing) {
1087 // ignore. Theoretically, this could cause a few false positives, because
1088 // people could assume things about getInterfaceDescriptor if they pass
1089 // this point, but it would be extremely fragile. It's more important that
1090 // we fuzz with the above things read from the Parcel.
1091 return true;
1092 } else {
1093 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
1094 String8(interface, len).c_str(),
1095 String8(parcel_interface, parcel_interface_len).c_str());
1096 return false;
1097 }
1098 }
1099 }
1100
setEnforceNoDataAvail(bool enforceNoDataAvail)1101 void Parcel::setEnforceNoDataAvail(bool enforceNoDataAvail) {
1102 mEnforceNoDataAvail = enforceNoDataAvail;
1103 }
1104
setServiceFuzzing()1105 void Parcel::setServiceFuzzing() {
1106 mServiceFuzzing = true;
1107 }
1108
isServiceFuzzing() const1109 bool Parcel::isServiceFuzzing() const {
1110 return mServiceFuzzing;
1111 }
1112
enforceNoDataAvail() const1113 binder::Status Parcel::enforceNoDataAvail() const {
1114 if (!mEnforceNoDataAvail) {
1115 return binder::Status::ok();
1116 }
1117
1118 const auto n = dataAvail();
1119 if (n == 0) {
1120 return binder::Status::ok();
1121 }
1122 return binder::Status::
1123 fromExceptionCode(binder::Status::Exception::EX_BAD_PARCELABLE,
1124 String8::format("Parcel data not fully consumed, unread size: %zu",
1125 n));
1126 }
1127
objectsCount() const1128 size_t Parcel::objectsCount() const
1129 {
1130 if (const auto* kernelFields = maybeKernelFields()) {
1131 return kernelFields->mObjectsSize;
1132 }
1133 return 0;
1134 }
1135
errorCheck() const1136 status_t Parcel::errorCheck() const
1137 {
1138 return mError;
1139 }
1140
setError(status_t err)1141 void Parcel::setError(status_t err)
1142 {
1143 mError = err;
1144 }
1145
finishWrite(size_t len)1146 status_t Parcel::finishWrite(size_t len)
1147 {
1148 if (len > INT32_MAX) {
1149 // don't accept size_t values which may have come from an
1150 // inadvertent conversion from a negative int.
1151 return BAD_VALUE;
1152 }
1153
1154 //printf("Finish write of %d\n", len);
1155 mDataPos += len;
1156 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
1157 if (mDataPos > mDataSize) {
1158 mDataSize = mDataPos;
1159 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
1160 }
1161 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
1162 return NO_ERROR;
1163 }
1164
write(const void * data,size_t len)1165 status_t Parcel::write(const void* data, size_t len)
1166 {
1167 if (len > INT32_MAX) {
1168 // don't accept size_t values which may have come from an
1169 // inadvertent conversion from a negative int.
1170 return BAD_VALUE;
1171 }
1172
1173 void* const d = writeInplace(len);
1174 if (d) {
1175 memcpy(d, data, len);
1176 return NO_ERROR;
1177 }
1178 return mError;
1179 }
1180
writeInplace(size_t len)1181 void* Parcel::writeInplace(size_t len)
1182 {
1183 if (len > INT32_MAX) {
1184 // don't accept size_t values which may have come from an
1185 // inadvertent conversion from a negative int.
1186 return nullptr;
1187 }
1188
1189 const size_t padded = pad_size(len);
1190
1191 // check for integer overflow
1192 if (mDataPos+padded < mDataPos) {
1193 return nullptr;
1194 }
1195
1196 if ((mDataPos+padded) <= mDataCapacity) {
1197 restart_write:
1198 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
1199 uint8_t* const data = mData+mDataPos;
1200
1201 if (status_t status = validateReadData(mDataPos + padded); status != OK) {
1202 return nullptr; // drops status
1203 }
1204
1205 // Need to pad at end?
1206 if (padded != len) {
1207 #if BYTE_ORDER == BIG_ENDIAN
1208 static const uint32_t mask[4] = {
1209 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
1210 };
1211 #endif
1212 #if BYTE_ORDER == LITTLE_ENDIAN
1213 static const uint32_t mask[4] = {
1214 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
1215 };
1216 #endif
1217 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
1218 // *reinterpret_cast<void**>(data+padded-4));
1219 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
1220 }
1221
1222 finishWrite(padded);
1223 return data;
1224 }
1225
1226 status_t err = growData(padded);
1227 if (err == NO_ERROR) goto restart_write;
1228 return nullptr;
1229 }
1230
writeUtf8AsUtf16(const std::string & str)1231 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
1232 const uint8_t* strData = (uint8_t*)str.data();
1233 const size_t strLen= str.length();
1234 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
1235 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
1236 return BAD_VALUE;
1237 }
1238
1239 status_t err = writeInt32(utf16Len);
1240 if (err) {
1241 return err;
1242 }
1243
1244 // Allocate enough bytes to hold our converted string and its terminating NULL.
1245 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
1246 if (!dst) {
1247 return NO_MEMORY;
1248 }
1249
1250 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
1251
1252 return NO_ERROR;
1253 }
1254
1255
writeUtf8AsUtf16(const std::optional<std::string> & str)1256 status_t Parcel::writeUtf8AsUtf16(const std::optional<std::string>& str) { return writeData(str); }
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)1257 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) { return writeData(str); }
1258
writeString16(const std::optional<String16> & str)1259 status_t Parcel::writeString16(const std::optional<String16>& str) { return writeData(str); }
writeString16(const std::unique_ptr<String16> & str)1260 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) { return writeData(str); }
1261
writeByteVector(const std::vector<int8_t> & val)1262 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<int8_t>> & val)1263 status_t Parcel::writeByteVector(const std::optional<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)1264 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::vector<uint8_t> & val)1265 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<uint8_t>> & val)1266 status_t Parcel::writeByteVector(const std::optional<std::vector<uint8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)1267 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val){ return writeData(val); }
writeInt32Vector(const std::vector<int32_t> & val)1268 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) { return writeData(val); }
writeInt32Vector(const std::optional<std::vector<int32_t>> & val)1269 status_t Parcel::writeInt32Vector(const std::optional<std::vector<int32_t>>& val) { return writeData(val); }
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)1270 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) { return writeData(val); }
writeInt64Vector(const std::vector<int64_t> & val)1271 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) { return writeData(val); }
writeInt64Vector(const std::optional<std::vector<int64_t>> & val)1272 status_t Parcel::writeInt64Vector(const std::optional<std::vector<int64_t>>& val) { return writeData(val); }
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)1273 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::vector<uint64_t> & val)1274 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val) { return writeData(val); }
writeUint64Vector(const std::optional<std::vector<uint64_t>> & val)1275 status_t Parcel::writeUint64Vector(const std::optional<std::vector<uint64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)1276 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val) { return writeData(val); }
writeFloatVector(const std::vector<float> & val)1277 status_t Parcel::writeFloatVector(const std::vector<float>& val) { return writeData(val); }
writeFloatVector(const std::optional<std::vector<float>> & val)1278 status_t Parcel::writeFloatVector(const std::optional<std::vector<float>>& val) { return writeData(val); }
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)1279 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) { return writeData(val); }
writeDoubleVector(const std::vector<double> & val)1280 status_t Parcel::writeDoubleVector(const std::vector<double>& val) { return writeData(val); }
writeDoubleVector(const std::optional<std::vector<double>> & val)1281 status_t Parcel::writeDoubleVector(const std::optional<std::vector<double>>& val) { return writeData(val); }
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)1282 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) { return writeData(val); }
writeBoolVector(const std::vector<bool> & val)1283 status_t Parcel::writeBoolVector(const std::vector<bool>& val) { return writeData(val); }
writeBoolVector(const std::optional<std::vector<bool>> & val)1284 status_t Parcel::writeBoolVector(const std::optional<std::vector<bool>>& val) { return writeData(val); }
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)1285 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) { return writeData(val); }
writeCharVector(const std::vector<char16_t> & val)1286 status_t Parcel::writeCharVector(const std::vector<char16_t>& val) { return writeData(val); }
writeCharVector(const std::optional<std::vector<char16_t>> & val)1287 status_t Parcel::writeCharVector(const std::optional<std::vector<char16_t>>& val) { return writeData(val); }
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)1288 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) { return writeData(val); }
1289
writeString16Vector(const std::vector<String16> & val)1290 status_t Parcel::writeString16Vector(const std::vector<String16>& val) { return writeData(val); }
writeString16Vector(const std::optional<std::vector<std::optional<String16>>> & val)1291 status_t Parcel::writeString16Vector(
1292 const std::optional<std::vector<std::optional<String16>>>& val) { return writeData(val); }
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)1293 status_t Parcel::writeString16Vector(
1294 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::optional<std::vector<std::optional<std::string>>> & val)1295 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1296 const std::optional<std::vector<std::optional<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)1297 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1298 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)1299 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) { return writeData(val); }
1300
writeUniqueFileDescriptorVector(const std::vector<unique_fd> & val)1301 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<unique_fd>& val) {
1302 return writeData(val);
1303 }
writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>> & val)1304 status_t Parcel::writeUniqueFileDescriptorVector(const std::optional<std::vector<unique_fd>>& val) {
1305 return writeData(val);
1306 }
1307
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1308 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) { return writeData(val); }
writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>> & val)1309 status_t Parcel::writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>>& val) { return writeData(val); }
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1310 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) { return writeData(val); }
1311
writeParcelable(const Parcelable & parcelable)1312 status_t Parcel::writeParcelable(const Parcelable& parcelable) { return writeData(parcelable); }
1313
readUtf8FromUtf16(std::optional<std::string> * str) const1314 status_t Parcel::readUtf8FromUtf16(std::optional<std::string>* str) const { return readData(str); }
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1315 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const { return readData(str); }
1316
readString16(std::optional<String16> * pArg) const1317 status_t Parcel::readString16(std::optional<String16>* pArg) const { return readData(pArg); }
readString16(std::unique_ptr<String16> * pArg) const1318 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const { return readData(pArg); }
1319
readByteVector(std::vector<int8_t> * val) const1320 status_t Parcel::readByteVector(std::vector<int8_t>* val) const { return readData(val); }
readByteVector(std::vector<uint8_t> * val) const1321 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<int8_t>> * val) const1322 status_t Parcel::readByteVector(std::optional<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1323 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<uint8_t>> * val) const1324 status_t Parcel::readByteVector(std::optional<std::vector<uint8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1325 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const { return readData(val); }
readInt32Vector(std::optional<std::vector<int32_t>> * val) const1326 status_t Parcel::readInt32Vector(std::optional<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1327 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::vector<int32_t> * val) const1328 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { return readData(val); }
readInt64Vector(std::optional<std::vector<int64_t>> * val) const1329 status_t Parcel::readInt64Vector(std::optional<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1330 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::vector<int64_t> * val) const1331 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { return readData(val); }
readUint64Vector(std::optional<std::vector<uint64_t>> * val) const1332 status_t Parcel::readUint64Vector(std::optional<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1333 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::vector<uint64_t> * val) const1334 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const { return readData(val); }
readFloatVector(std::optional<std::vector<float>> * val) const1335 status_t Parcel::readFloatVector(std::optional<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1336 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::vector<float> * val) const1337 status_t Parcel::readFloatVector(std::vector<float>* val) const { return readData(val); }
readDoubleVector(std::optional<std::vector<double>> * val) const1338 status_t Parcel::readDoubleVector(std::optional<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1339 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::vector<double> * val) const1340 status_t Parcel::readDoubleVector(std::vector<double>* val) const { return readData(val); }
readBoolVector(std::optional<std::vector<bool>> * val) const1341 status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1342 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::vector<bool> * val) const1343 status_t Parcel::readBoolVector(std::vector<bool>* val) const { return readData(val); }
readCharVector(std::optional<std::vector<char16_t>> * val) const1344 status_t Parcel::readCharVector(std::optional<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1345 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::vector<char16_t> * val) const1346 status_t Parcel::readCharVector(std::vector<char16_t>* val) const { return readData(val); }
1347
readString16Vector(std::optional<std::vector<std::optional<String16>>> * val) const1348 status_t Parcel::readString16Vector(
1349 std::optional<std::vector<std::optional<String16>>>* val) const { return readData(val); }
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1350 status_t Parcel::readString16Vector(
1351 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { return readData(val); }
readString16Vector(std::vector<String16> * val) const1352 status_t Parcel::readString16Vector(std::vector<String16>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::optional<std::vector<std::optional<std::string>>> * val) const1353 status_t Parcel::readUtf8VectorFromUtf16Vector(
1354 std::optional<std::vector<std::optional<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1355 status_t Parcel::readUtf8VectorFromUtf16Vector(
1356 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1357 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const { return readData(val); }
1358
readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>> * val) const1359 status_t Parcel::readUniqueFileDescriptorVector(std::optional<std::vector<unique_fd>>* val) const {
1360 return readData(val);
1361 }
readUniqueFileDescriptorVector(std::vector<unique_fd> * val) const1362 status_t Parcel::readUniqueFileDescriptorVector(std::vector<unique_fd>* val) const {
1363 return readData(val);
1364 }
1365
readStrongBinderVector(std::optional<std::vector<sp<IBinder>>> * val) const1366 status_t Parcel::readStrongBinderVector(std::optional<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1367 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1368 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { return readData(val); }
1369
readParcelable(Parcelable * parcelable) const1370 status_t Parcel::readParcelable(Parcelable* parcelable) const { return readData(parcelable); }
1371
writeInt32(int32_t val)1372 status_t Parcel::writeInt32(int32_t val)
1373 {
1374 return writeAligned(val);
1375 }
1376
writeUint32(uint32_t val)1377 status_t Parcel::writeUint32(uint32_t val)
1378 {
1379 return writeAligned(val);
1380 }
1381
writeInt32Array(size_t len,const int32_t * val)1382 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
1383 if (len > INT32_MAX) {
1384 // don't accept size_t values which may have come from an
1385 // inadvertent conversion from a negative int.
1386 return BAD_VALUE;
1387 }
1388
1389 if (!val) {
1390 return writeInt32(-1);
1391 }
1392 status_t ret = writeInt32(static_cast<uint32_t>(len));
1393 if (ret == NO_ERROR) {
1394 ret = write(val, len * sizeof(*val));
1395 }
1396 return ret;
1397 }
writeByteArray(size_t len,const uint8_t * val)1398 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
1399 if (len > INT32_MAX) {
1400 // don't accept size_t values which may have come from an
1401 // inadvertent conversion from a negative int.
1402 return BAD_VALUE;
1403 }
1404
1405 if (!val) {
1406 return writeInt32(-1);
1407 }
1408 status_t ret = writeInt32(static_cast<uint32_t>(len));
1409 if (ret == NO_ERROR) {
1410 ret = write(val, len * sizeof(*val));
1411 }
1412 return ret;
1413 }
1414
writeBool(bool val)1415 status_t Parcel::writeBool(bool val)
1416 {
1417 return writeInt32(int32_t(val));
1418 }
1419
writeChar(char16_t val)1420 status_t Parcel::writeChar(char16_t val)
1421 {
1422 return writeInt32(int32_t(val));
1423 }
1424
writeByte(int8_t val)1425 status_t Parcel::writeByte(int8_t val)
1426 {
1427 return writeInt32(int32_t(val));
1428 }
1429
writeInt64(int64_t val)1430 status_t Parcel::writeInt64(int64_t val)
1431 {
1432 return writeAligned(val);
1433 }
1434
writeUint64(uint64_t val)1435 status_t Parcel::writeUint64(uint64_t val)
1436 {
1437 return writeAligned(val);
1438 }
1439
writePointer(uintptr_t val)1440 status_t Parcel::writePointer(uintptr_t val)
1441 {
1442 return writeAligned<binder_uintptr_t>(val);
1443 }
1444
writeFloat(float val)1445 status_t Parcel::writeFloat(float val)
1446 {
1447 return writeAligned(val);
1448 }
1449
1450 #if defined(__mips__) && defined(__mips_hard_float)
1451
writeDouble(double val)1452 status_t Parcel::writeDouble(double val)
1453 {
1454 union {
1455 double d;
1456 unsigned long long ll;
1457 } u;
1458 u.d = val;
1459 return writeAligned(u.ll);
1460 }
1461
1462 #else
1463
writeDouble(double val)1464 status_t Parcel::writeDouble(double val)
1465 {
1466 return writeAligned(val);
1467 }
1468
1469 #endif
1470
writeCString(const char * str)1471 status_t Parcel::writeCString(const char* str)
1472 {
1473 return write(str, strlen(str)+1);
1474 }
1475
writeString8(const String8 & str)1476 status_t Parcel::writeString8(const String8& str)
1477 {
1478 return writeString8(str.c_str(), str.size());
1479 }
1480
writeString8(const char * str,size_t len)1481 status_t Parcel::writeString8(const char* str, size_t len)
1482 {
1483 if (str == nullptr) return writeInt32(-1);
1484
1485 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1486 status_t err = writeInt32(len);
1487 if (err == NO_ERROR) {
1488 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1489 if (data) {
1490 memcpy(data, str, len);
1491 *reinterpret_cast<char*>(data+len) = 0;
1492 return NO_ERROR;
1493 }
1494 err = mError;
1495 }
1496 return err;
1497 }
1498
writeString16(const String16 & str)1499 status_t Parcel::writeString16(const String16& str)
1500 {
1501 return writeString16(str.c_str(), str.size());
1502 }
1503
writeString16(const char16_t * str,size_t len)1504 status_t Parcel::writeString16(const char16_t* str, size_t len)
1505 {
1506 if (str == nullptr) return writeInt32(-1);
1507
1508 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1509 status_t err = writeInt32(len);
1510 if (err == NO_ERROR) {
1511 len *= sizeof(char16_t);
1512 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1513 if (data) {
1514 memcpy(data, str, len);
1515 *reinterpret_cast<char16_t*>(data+len) = 0;
1516 return NO_ERROR;
1517 }
1518 err = mError;
1519 }
1520 return err;
1521 }
1522
writeStrongBinder(const sp<IBinder> & val)1523 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1524 {
1525 return flattenBinder(val);
1526 }
1527
1528
writeRawNullableParcelable(const Parcelable * parcelable)1529 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1530 if (!parcelable) {
1531 return writeInt32(0);
1532 }
1533
1534 return writeParcelable(*parcelable);
1535 }
1536
1537 #ifndef BINDER_DISABLE_NATIVE_HANDLE
writeNativeHandle(const native_handle * handle)1538 status_t Parcel::writeNativeHandle(const native_handle* handle)
1539 {
1540 if (!handle || handle->version != sizeof(native_handle))
1541 return BAD_TYPE;
1542
1543 status_t err;
1544 err = writeInt32(handle->numFds);
1545 if (err != NO_ERROR) return err;
1546
1547 err = writeInt32(handle->numInts);
1548 if (err != NO_ERROR) return err;
1549
1550 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1551 err = writeDupFileDescriptor(handle->data[i]);
1552
1553 if (err != NO_ERROR) {
1554 ALOGD("write native handle, write dup fd failed");
1555 return err;
1556 }
1557 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1558 return err;
1559 }
1560 #endif
1561
writeFileDescriptor(int fd,bool takeOwnership)1562 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) {
1563 if (auto* rpcFields = maybeRpcFields()) {
1564 std::variant<unique_fd, borrowed_fd> fdVariant;
1565 if (takeOwnership) {
1566 fdVariant = unique_fd(fd);
1567 } else {
1568 fdVariant = borrowed_fd(fd);
1569 }
1570 if (!mAllowFds) {
1571 ALOGE("FDs are not allowed in this parcel. Both the service and the client must set "
1572 "the FileDescriptorTransportMode and agree on the support.");
1573 return FDS_NOT_ALLOWED;
1574 }
1575 switch (rpcFields->mSession->getFileDescriptorTransportMode()) {
1576 case RpcSession::FileDescriptorTransportMode::NONE: {
1577 ALOGE("FDs are not allowed in this RpcSession. Both the service and the client "
1578 "must set "
1579 "the FileDescriptorTransportMode and agree on the support.");
1580 return FDS_NOT_ALLOWED;
1581 }
1582 case RpcSession::FileDescriptorTransportMode::UNIX:
1583 case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1584 if (rpcFields->mFds == nullptr) {
1585 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
1586 }
1587 size_t dataPos = mDataPos;
1588 if (dataPos > UINT32_MAX) {
1589 return NO_MEMORY;
1590 }
1591 if (status_t err = writeInt32(RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR); err != OK) {
1592 return err;
1593 }
1594 if (status_t err = writeInt32(rpcFields->mFds->size()); err != OK) {
1595 return err;
1596 }
1597 rpcFields->mObjectPositions.push_back(dataPos);
1598 rpcFields->mFds->push_back(std::move(fdVariant));
1599 return OK;
1600 }
1601 }
1602 }
1603
1604 #ifdef BINDER_WITH_KERNEL_IPC
1605 flat_binder_object obj;
1606 obj.hdr.type = BINDER_TYPE_FD;
1607 obj.flags = 0;
1608 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1609 obj.handle = fd;
1610 obj.cookie = takeOwnership ? 1 : 0;
1611 return writeObject(obj, true);
1612 #else // BINDER_WITH_KERNEL_IPC
1613 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1614 (void)fd;
1615 (void)takeOwnership;
1616 return INVALID_OPERATION;
1617 #endif // BINDER_WITH_KERNEL_IPC
1618 }
1619
writeDupFileDescriptor(int fd)1620 status_t Parcel::writeDupFileDescriptor(int fd)
1621 {
1622 int dupFd;
1623 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1624 return err;
1625 }
1626 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1627 if (err != OK) {
1628 close(dupFd);
1629 }
1630 return err;
1631 }
1632
writeParcelFileDescriptor(int fd,bool takeOwnership)1633 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1634 {
1635 writeInt32(0);
1636 return writeFileDescriptor(fd, takeOwnership);
1637 }
1638
writeDupParcelFileDescriptor(int fd)1639 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1640 {
1641 int dupFd;
1642 if (status_t err = binder::os::dupFileDescriptor(fd, &dupFd); err != OK) {
1643 return err;
1644 }
1645 status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1646 if (err != OK) {
1647 close(dupFd);
1648 }
1649 return err;
1650 }
1651
writeUniqueFileDescriptor(const unique_fd & fd)1652 status_t Parcel::writeUniqueFileDescriptor(const unique_fd& fd) {
1653 return writeDupFileDescriptor(fd.get());
1654 }
1655
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1656 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1657 {
1658 #ifdef BINDER_DISABLE_BLOB
1659 (void)len;
1660 (void)mutableCopy;
1661 (void)outBlob;
1662 return INVALID_OPERATION;
1663 #else
1664 if (len > INT32_MAX) {
1665 // don't accept size_t values which may have come from an
1666 // inadvertent conversion from a negative int.
1667 return BAD_VALUE;
1668 }
1669
1670 status_t status;
1671 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1672 ALOGV("writeBlob: write in place");
1673 status = writeInt32(BLOB_INPLACE);
1674 if (status) return status;
1675
1676 void* ptr = writeInplace(len);
1677 if (!ptr) return NO_MEMORY;
1678
1679 outBlob->init(-1, ptr, len, false);
1680 return NO_ERROR;
1681 }
1682
1683 ALOGV("writeBlob: write to ashmem");
1684 int fd = ashmem_create_region("Parcel Blob", len);
1685 if (fd < 0) return NO_MEMORY;
1686
1687 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1688 if (result < 0) {
1689 status = result;
1690 } else {
1691 void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1692 if (ptr == MAP_FAILED) {
1693 status = -errno;
1694 } else {
1695 if (!mutableCopy) {
1696 result = ashmem_set_prot_region(fd, PROT_READ);
1697 }
1698 if (result < 0) {
1699 status = result;
1700 } else {
1701 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1702 if (!status) {
1703 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1704 if (!status) {
1705 outBlob->init(fd, ptr, len, mutableCopy);
1706 return NO_ERROR;
1707 }
1708 }
1709 }
1710 }
1711 if (::munmap(ptr, len) == -1) {
1712 ALOGW("munmap() failed: %s", strerror(errno));
1713 }
1714 }
1715 ::close(fd);
1716 return status;
1717 #endif
1718 }
1719
writeDupImmutableBlobFileDescriptor(int fd)1720 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1721 {
1722 // Must match up with what's done in writeBlob.
1723 if (!mAllowFds) return FDS_NOT_ALLOWED;
1724 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1725 if (status) return status;
1726 return writeDupFileDescriptor(fd);
1727 }
1728
write(const FlattenableHelperInterface & val)1729 status_t Parcel::write(const FlattenableHelperInterface& val)
1730 {
1731 status_t err;
1732
1733 // size if needed
1734 const size_t len = val.getFlattenedSize();
1735 const size_t fd_count = val.getFdCount();
1736
1737 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
1738 // don't accept size_t values which may have come from an
1739 // inadvertent conversion from a negative int.
1740 return BAD_VALUE;
1741 }
1742
1743 err = this->writeInt32(len);
1744 if (err) return err;
1745
1746 err = this->writeInt32(fd_count);
1747 if (err) return err;
1748
1749 // payload
1750 void* const buf = this->writeInplace(len);
1751 if (buf == nullptr)
1752 return BAD_VALUE;
1753
1754 int* fds = nullptr;
1755 if (fd_count) {
1756 fds = new (std::nothrow) int[fd_count];
1757 if (fds == nullptr) {
1758 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1759 return BAD_VALUE;
1760 }
1761 }
1762
1763 err = val.flatten(buf, len, fds, fd_count);
1764 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1765 err = this->writeDupFileDescriptor( fds[i] );
1766 }
1767
1768 if (fd_count) {
1769 delete [] fds;
1770 }
1771
1772 return err;
1773 }
1774
writeObject(const flat_binder_object & val,bool nullMetaData)1775 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1776 {
1777 auto* kernelFields = maybeKernelFields();
1778 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr, "Can't write flat_binder_object to RPC Parcel");
1779
1780 #ifdef BINDER_WITH_KERNEL_IPC
1781 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1782 const bool enoughObjects = kernelFields->mObjectsSize < kernelFields->mObjectsCapacity;
1783 if (enoughData && enoughObjects) {
1784 restart_write:
1785 if (status_t status = validateReadData(mDataPos + sizeof(val)); status != OK) {
1786 return status;
1787 }
1788
1789 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1790
1791 // remember if it's a file descriptor
1792 if (val.hdr.type == BINDER_TYPE_FD) {
1793 if (!mAllowFds) {
1794 // fail before modifying our object index
1795 return FDS_NOT_ALLOWED;
1796 }
1797 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
1798 }
1799
1800 // Need to write meta-data?
1801 if (nullMetaData || val.binder != 0) {
1802 kernelFields->mObjects[kernelFields->mObjectsSize] = mDataPos;
1803 acquire_object(ProcessState::self(), val, this, true /*tagFds*/);
1804 kernelFields->mObjectsSize++;
1805 }
1806
1807 return finishWrite(sizeof(flat_binder_object));
1808 }
1809
1810 if (mOwner) {
1811 // continueWrite does have the logic to convert this from an
1812 // owned to an unowned Parcel. However, this is pretty inefficient,
1813 // and it's really strange to need to do so, so prefer to avoid
1814 // these paths than try to support them.
1815 ALOGE("writing objects not supported on owned Parcels");
1816 return PERMISSION_DENIED;
1817 }
1818
1819 if (!enoughData) {
1820 const status_t err = growData(sizeof(val));
1821 if (err != NO_ERROR) return err;
1822 }
1823 if (!enoughObjects) {
1824 if (kernelFields->mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1825 if ((kernelFields->mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1826 size_t newSize = ((kernelFields->mObjectsSize + 2) * 3) / 2;
1827 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1828 binder_size_t* objects =
1829 (binder_size_t*)realloc(kernelFields->mObjects, newSize * sizeof(binder_size_t));
1830 if (objects == nullptr) return NO_MEMORY;
1831 kernelFields->mObjects = objects;
1832 kernelFields->mObjectsCapacity = newSize;
1833 }
1834
1835 goto restart_write;
1836 #else // BINDER_WITH_KERNEL_IPC
1837 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1838 (void)val;
1839 (void)nullMetaData;
1840 return INVALID_OPERATION;
1841 #endif // BINDER_WITH_KERNEL_IPC
1842 }
1843
writeNoException()1844 status_t Parcel::writeNoException()
1845 {
1846 binder::Status status;
1847 return status.writeToParcel(this);
1848 }
1849
validateReadData(size_t upperBound) const1850 status_t Parcel::validateReadData(size_t upperBound) const
1851 {
1852 const auto* kernelFields = maybeKernelFields();
1853 if (kernelFields == nullptr) {
1854 // Can't validate RPC Parcel reads because the location of binder
1855 // objects is unknown.
1856 return OK;
1857 }
1858
1859 #ifdef BINDER_WITH_KERNEL_IPC
1860 // Don't allow non-object reads on object data
1861 if (kernelFields->mObjectsSorted || kernelFields->mObjectsSize <= 1) {
1862 data_sorted:
1863 // Expect to check only against the next object
1864 if (kernelFields->mNextObjectHint < kernelFields->mObjectsSize &&
1865 upperBound > kernelFields->mObjects[kernelFields->mNextObjectHint]) {
1866 // For some reason the current read position is greater than the next object
1867 // hint. Iterate until we find the right object
1868 size_t nextObject = kernelFields->mNextObjectHint;
1869 do {
1870 if (mDataPos < kernelFields->mObjects[nextObject] + sizeof(flat_binder_object)) {
1871 // Requested info overlaps with an object
1872 if (!mServiceFuzzing) {
1873 ALOGE("Attempt to read or write from protected data in Parcel %p. pos: "
1874 "%zu, nextObject: %zu, object offset: %llu, object size: %zu",
1875 this, mDataPos, nextObject, kernelFields->mObjects[nextObject],
1876 sizeof(flat_binder_object));
1877 }
1878 return PERMISSION_DENIED;
1879 }
1880 nextObject++;
1881 } while (nextObject < kernelFields->mObjectsSize &&
1882 upperBound > kernelFields->mObjects[nextObject]);
1883 kernelFields->mNextObjectHint = nextObject;
1884 }
1885 return NO_ERROR;
1886 }
1887 // Quickly determine if mObjects is sorted.
1888 binder_size_t* currObj = kernelFields->mObjects + kernelFields->mObjectsSize - 1;
1889 binder_size_t* prevObj = currObj;
1890 while (currObj > kernelFields->mObjects) {
1891 prevObj--;
1892 if(*prevObj > *currObj) {
1893 goto data_unsorted;
1894 }
1895 currObj--;
1896 }
1897 kernelFields->mObjectsSorted = true;
1898 goto data_sorted;
1899
1900 data_unsorted:
1901 // Insertion Sort mObjects
1902 // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1903 // switch to std::sort(mObjects, mObjects + mObjectsSize);
1904 for (binder_size_t* iter0 = kernelFields->mObjects + 1;
1905 iter0 < kernelFields->mObjects + kernelFields->mObjectsSize; iter0++) {
1906 binder_size_t temp = *iter0;
1907 binder_size_t* iter1 = iter0 - 1;
1908 while (iter1 >= kernelFields->mObjects && *iter1 > temp) {
1909 *(iter1 + 1) = *iter1;
1910 iter1--;
1911 }
1912 *(iter1 + 1) = temp;
1913 }
1914 kernelFields->mNextObjectHint = 0;
1915 kernelFields->mObjectsSorted = true;
1916 goto data_sorted;
1917 #else // BINDER_WITH_KERNEL_IPC
1918 (void)upperBound;
1919 return NO_ERROR;
1920 #endif // BINDER_WITH_KERNEL_IPC
1921 }
1922
read(void * outData,size_t len) const1923 status_t Parcel::read(void* outData, size_t len) const
1924 {
1925 if (len > INT32_MAX) {
1926 // don't accept size_t values which may have come from an
1927 // inadvertent conversion from a negative int.
1928 return BAD_VALUE;
1929 }
1930
1931 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1932 && len <= pad_size(len)) {
1933 const auto* kernelFields = maybeKernelFields();
1934 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1935 status_t err = validateReadData(mDataPos + pad_size(len));
1936 if(err != NO_ERROR) {
1937 // Still increment the data position by the expected length
1938 mDataPos += pad_size(len);
1939 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1940 return err;
1941 }
1942 }
1943 memcpy(outData, mData+mDataPos, len);
1944 mDataPos += pad_size(len);
1945 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1946 return NO_ERROR;
1947 }
1948 return NOT_ENOUGH_DATA;
1949 }
1950
readInplace(size_t len) const1951 const void* Parcel::readInplace(size_t len) const
1952 {
1953 if (len > INT32_MAX) {
1954 // don't accept size_t values which may have come from an
1955 // inadvertent conversion from a negative int.
1956 return nullptr;
1957 }
1958
1959 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1960 && len <= pad_size(len)) {
1961 const auto* kernelFields = maybeKernelFields();
1962 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1963 status_t err = validateReadData(mDataPos + pad_size(len));
1964 if(err != NO_ERROR) {
1965 // Still increment the data position by the expected length
1966 mDataPos += pad_size(len);
1967 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1968 return nullptr;
1969 }
1970 }
1971
1972 const void* data = mData+mDataPos;
1973 mDataPos += pad_size(len);
1974 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1975 return data;
1976 }
1977 return nullptr;
1978 }
1979
readOutVectorSizeWithCheck(size_t elmSize,int32_t * size) const1980 status_t Parcel::readOutVectorSizeWithCheck(size_t elmSize, int32_t* size) const {
1981 if (status_t status = readInt32(size); status != OK) return status;
1982 if (*size < 0) return OK; // may be null, client to handle
1983
1984 LOG_ALWAYS_FATAL_IF(elmSize > INT32_MAX, "Cannot have element as big as %zu", elmSize);
1985
1986 // approximation, can't know max element size (e.g. if it makes heap
1987 // allocations)
1988 static_assert(sizeof(int) == sizeof(int32_t), "Android is LP64");
1989 int32_t allocationSize;
1990 if (__builtin_smul_overflow(elmSize, *size, &allocationSize)) return NO_MEMORY;
1991
1992 // High limit of 1MB since something this big could never be returned. Could
1993 // probably scope this down, but might impact very specific usecases.
1994 constexpr int32_t kMaxAllocationSize = 1 * 1000 * 1000;
1995
1996 if (allocationSize >= kMaxAllocationSize) {
1997 return NO_MEMORY;
1998 }
1999
2000 return OK;
2001 }
2002
2003 template<class T>
readAligned(T * pArg) const2004 status_t Parcel::readAligned(T *pArg) const {
2005 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
2006 static_assert(std::is_trivially_copyable_v<T>);
2007
2008 if ((mDataPos+sizeof(T)) <= mDataSize) {
2009 const auto* kernelFields = maybeKernelFields();
2010 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
2011 status_t err = validateReadData(mDataPos + sizeof(T));
2012 if(err != NO_ERROR) {
2013 // Still increment the data position by the expected length
2014 mDataPos += sizeof(T);
2015 return err;
2016 }
2017 }
2018
2019 memcpy(pArg, mData + mDataPos, sizeof(T));
2020 mDataPos += sizeof(T);
2021 return NO_ERROR;
2022 } else {
2023 return NOT_ENOUGH_DATA;
2024 }
2025 }
2026
2027 template<class T>
readAligned() const2028 T Parcel::readAligned() const {
2029 T result;
2030 if (readAligned(&result) != NO_ERROR) {
2031 result = 0;
2032 }
2033
2034 return result;
2035 }
2036
2037 template<class T>
writeAligned(T val)2038 status_t Parcel::writeAligned(T val) {
2039 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
2040 static_assert(std::is_trivially_copyable_v<T>);
2041
2042 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
2043 restart_write:
2044 if (status_t status = validateReadData(mDataPos + sizeof(val)); status != OK) {
2045 return status;
2046 }
2047
2048 memcpy(mData + mDataPos, &val, sizeof(val));
2049 return finishWrite(sizeof(val));
2050 }
2051
2052 status_t err = growData(sizeof(val));
2053 if (err == NO_ERROR) goto restart_write;
2054 return err;
2055 }
2056
readInt32(int32_t * pArg) const2057 status_t Parcel::readInt32(int32_t *pArg) const
2058 {
2059 return readAligned(pArg);
2060 }
2061
readInt32() const2062 int32_t Parcel::readInt32() const
2063 {
2064 return readAligned<int32_t>();
2065 }
2066
readUint32(uint32_t * pArg) const2067 status_t Parcel::readUint32(uint32_t *pArg) const
2068 {
2069 return readAligned(pArg);
2070 }
2071
readUint32() const2072 uint32_t Parcel::readUint32() const
2073 {
2074 return readAligned<uint32_t>();
2075 }
2076
readInt64(int64_t * pArg) const2077 status_t Parcel::readInt64(int64_t *pArg) const
2078 {
2079 return readAligned(pArg);
2080 }
2081
2082
readInt64() const2083 int64_t Parcel::readInt64() const
2084 {
2085 return readAligned<int64_t>();
2086 }
2087
readUint64(uint64_t * pArg) const2088 status_t Parcel::readUint64(uint64_t *pArg) const
2089 {
2090 return readAligned(pArg);
2091 }
2092
readUint64() const2093 uint64_t Parcel::readUint64() const
2094 {
2095 return readAligned<uint64_t>();
2096 }
2097
readPointer(uintptr_t * pArg) const2098 status_t Parcel::readPointer(uintptr_t *pArg) const
2099 {
2100 status_t ret;
2101 binder_uintptr_t ptr;
2102 ret = readAligned(&ptr);
2103 if (!ret)
2104 *pArg = ptr;
2105 return ret;
2106 }
2107
readPointer() const2108 uintptr_t Parcel::readPointer() const
2109 {
2110 return readAligned<binder_uintptr_t>();
2111 }
2112
2113
readFloat(float * pArg) const2114 status_t Parcel::readFloat(float *pArg) const
2115 {
2116 return readAligned(pArg);
2117 }
2118
2119
readFloat() const2120 float Parcel::readFloat() const
2121 {
2122 return readAligned<float>();
2123 }
2124
2125 #if defined(__mips__) && defined(__mips_hard_float)
2126
readDouble(double * pArg) const2127 status_t Parcel::readDouble(double *pArg) const
2128 {
2129 union {
2130 double d;
2131 unsigned long long ll;
2132 } u;
2133 u.d = 0;
2134 status_t status;
2135 status = readAligned(&u.ll);
2136 *pArg = u.d;
2137 return status;
2138 }
2139
readDouble() const2140 double Parcel::readDouble() const
2141 {
2142 union {
2143 double d;
2144 unsigned long long ll;
2145 } u;
2146 u.ll = readAligned<unsigned long long>();
2147 return u.d;
2148 }
2149
2150 #else
2151
readDouble(double * pArg) const2152 status_t Parcel::readDouble(double *pArg) const
2153 {
2154 return readAligned(pArg);
2155 }
2156
readDouble() const2157 double Parcel::readDouble() const
2158 {
2159 return readAligned<double>();
2160 }
2161
2162 #endif
2163
readBool(bool * pArg) const2164 status_t Parcel::readBool(bool *pArg) const
2165 {
2166 int32_t tmp = 0;
2167 status_t ret = readInt32(&tmp);
2168 *pArg = (tmp != 0);
2169 return ret;
2170 }
2171
readBool() const2172 bool Parcel::readBool() const
2173 {
2174 return readInt32() != 0;
2175 }
2176
readChar(char16_t * pArg) const2177 status_t Parcel::readChar(char16_t *pArg) const
2178 {
2179 int32_t tmp = 0;
2180 status_t ret = readInt32(&tmp);
2181 *pArg = char16_t(tmp);
2182 return ret;
2183 }
2184
readChar() const2185 char16_t Parcel::readChar() const
2186 {
2187 return char16_t(readInt32());
2188 }
2189
readByte(int8_t * pArg) const2190 status_t Parcel::readByte(int8_t *pArg) const
2191 {
2192 int32_t tmp = 0;
2193 status_t ret = readInt32(&tmp);
2194 *pArg = int8_t(tmp);
2195 return ret;
2196 }
2197
readByte() const2198 int8_t Parcel::readByte() const
2199 {
2200 return int8_t(readInt32());
2201 }
2202
readUtf8FromUtf16(std::string * str) const2203 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
2204 size_t utf16Size = 0;
2205 const char16_t* src = readString16Inplace(&utf16Size);
2206 if (!src) {
2207 return UNEXPECTED_NULL;
2208 }
2209
2210 // Save ourselves the trouble, we're done.
2211 if (utf16Size == 0u) {
2212 str->clear();
2213 return NO_ERROR;
2214 }
2215
2216 // Allow for closing '\0'
2217 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
2218 if (utf8Size < 1) {
2219 return BAD_VALUE;
2220 }
2221 // Note that while it is probably safe to assume string::resize keeps a
2222 // spare byte around for the trailing null, we still pass the size including the trailing null
2223 str->resize(utf8Size);
2224 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
2225 str->resize(utf8Size - 1);
2226 return NO_ERROR;
2227 }
2228
readCString() const2229 const char* Parcel::readCString() const
2230 {
2231 if (mDataPos < mDataSize) {
2232 const size_t avail = mDataSize-mDataPos;
2233 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2234 // is the string's trailing NUL within the parcel's valid bounds?
2235 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2236 if (eos) {
2237 const size_t len = eos - str;
2238 return static_cast<const char*>(readInplace(len + 1));
2239 }
2240 }
2241 return nullptr;
2242 }
2243
readString8() const2244 String8 Parcel::readString8() const
2245 {
2246 size_t len;
2247 const char* str = readString8Inplace(&len);
2248 if (str) return String8(str, len);
2249
2250 if (!mServiceFuzzing) {
2251 ALOGE("Reading a NULL string not supported here.");
2252 }
2253
2254 return String8();
2255 }
2256
readString8(String8 * pArg) const2257 status_t Parcel::readString8(String8* pArg) const
2258 {
2259 size_t len;
2260 const char* str = readString8Inplace(&len);
2261 if (str) {
2262 pArg->setTo(str, len);
2263 return 0;
2264 } else {
2265 *pArg = String8();
2266 return UNEXPECTED_NULL;
2267 }
2268 }
2269
readString8Inplace(size_t * outLen) const2270 const char* Parcel::readString8Inplace(size_t* outLen) const
2271 {
2272 int32_t size = readInt32();
2273 // watch for potential int overflow from size+1
2274 if (size >= 0 && size < INT32_MAX) {
2275 *outLen = size;
2276 const char* str = (const char*)readInplace(size+1);
2277 if (str != nullptr) {
2278 if (str[size] == '\0') {
2279 return str;
2280 }
2281 android_errorWriteLog(0x534e4554, "172655291");
2282 }
2283 }
2284 *outLen = 0;
2285 return nullptr;
2286 }
2287
readString16() const2288 String16 Parcel::readString16() const
2289 {
2290 size_t len;
2291 const char16_t* str = readString16Inplace(&len);
2292 if (str) return String16(str, len);
2293
2294 if (!mServiceFuzzing) {
2295 ALOGE("Reading a NULL string not supported here.");
2296 }
2297
2298 return String16();
2299 }
2300
2301
readString16(String16 * pArg) const2302 status_t Parcel::readString16(String16* pArg) const
2303 {
2304 size_t len;
2305 const char16_t* str = readString16Inplace(&len);
2306 if (str) {
2307 pArg->setTo(str, len);
2308 return 0;
2309 } else {
2310 *pArg = String16();
2311 return UNEXPECTED_NULL;
2312 }
2313 }
2314
readString16Inplace(size_t * outLen) const2315 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2316 {
2317 int32_t size = readInt32();
2318 // watch for potential int overflow from size+1
2319 if (size >= 0 && size < INT32_MAX) {
2320 *outLen = size;
2321 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2322 if (str != nullptr) {
2323 if (str[size] == u'\0') {
2324 return str;
2325 }
2326 android_errorWriteLog(0x534e4554, "172655291");
2327 }
2328 }
2329 *outLen = 0;
2330 return nullptr;
2331 }
2332
readStrongBinder(sp<IBinder> * val) const2333 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2334 {
2335 status_t status = readNullableStrongBinder(val);
2336 if (status == OK && !val->get()) {
2337 if (!mServiceFuzzing) {
2338 ALOGW("Expecting binder but got null!");
2339 }
2340 status = UNEXPECTED_NULL;
2341 }
2342 return status;
2343 }
2344
readNullableStrongBinder(sp<IBinder> * val) const2345 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2346 {
2347 return unflattenBinder(val);
2348 }
2349
readStrongBinder() const2350 sp<IBinder> Parcel::readStrongBinder() const
2351 {
2352 sp<IBinder> val;
2353 // Note that a lot of code in Android reads binders by hand with this
2354 // method, and that code has historically been ok with getting nullptr
2355 // back (while ignoring error codes).
2356 readNullableStrongBinder(&val);
2357 return val;
2358 }
2359
readExceptionCode() const2360 int32_t Parcel::readExceptionCode() const
2361 {
2362 binder::Status status;
2363 status.readFromParcel(*this);
2364 return status.exceptionCode();
2365 }
2366
2367 #ifndef BINDER_DISABLE_NATIVE_HANDLE
readNativeHandle() const2368 native_handle* Parcel::readNativeHandle() const
2369 {
2370 int numFds, numInts;
2371 status_t err;
2372 err = readInt32(&numFds);
2373 if (err != NO_ERROR) return nullptr;
2374 err = readInt32(&numInts);
2375 if (err != NO_ERROR) return nullptr;
2376
2377 native_handle* h = native_handle_create(numFds, numInts);
2378 if (!h) {
2379 return nullptr;
2380 }
2381
2382 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2383 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2384 if (h->data[i] < 0) {
2385 for (int j = 0; j < i; j++) {
2386 close(h->data[j]);
2387 }
2388 native_handle_delete(h);
2389 return nullptr;
2390 }
2391 }
2392 err = read(h->data + numFds, sizeof(int)*numInts);
2393 if (err != NO_ERROR) {
2394 native_handle_close(h);
2395 native_handle_delete(h);
2396 h = nullptr;
2397 }
2398 return h;
2399 }
2400 #endif
2401
readFileDescriptor() const2402 int Parcel::readFileDescriptor() const {
2403 if (const auto* rpcFields = maybeRpcFields()) {
2404 if (!std::binary_search(rpcFields->mObjectPositions.begin(),
2405 rpcFields->mObjectPositions.end(), mDataPos)) {
2406 if (!mServiceFuzzing) {
2407 ALOGW("Attempt to read file descriptor from Parcel %p at offset %zu that is not in "
2408 "the object list",
2409 this, mDataPos);
2410 }
2411 return BAD_TYPE;
2412 }
2413
2414 int32_t objectType = readInt32();
2415 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
2416 return BAD_TYPE;
2417 }
2418
2419 int32_t fdIndex = readInt32();
2420 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
2421 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
2422 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
2423 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
2424 return BAD_VALUE;
2425 }
2426 return toRawFd(rpcFields->mFds->at(fdIndex));
2427 }
2428
2429 #ifdef BINDER_WITH_KERNEL_IPC
2430 const flat_binder_object* flat = readObject(true);
2431
2432 if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2433 return flat->handle;
2434 }
2435
2436 return BAD_TYPE;
2437 #else // BINDER_WITH_KERNEL_IPC
2438 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2439 return INVALID_OPERATION;
2440 #endif // BINDER_WITH_KERNEL_IPC
2441 }
2442
readParcelFileDescriptor() const2443 int Parcel::readParcelFileDescriptor() const {
2444 int32_t hasComm = readInt32();
2445 int fd = readFileDescriptor();
2446 if (hasComm != 0) {
2447 // detach (owned by the binder driver)
2448 int comm = readFileDescriptor();
2449
2450 // warning: this must be kept in sync with:
2451 // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2452 enum ParcelFileDescriptorStatus {
2453 DETACHED = 2,
2454 };
2455
2456 #if BYTE_ORDER == BIG_ENDIAN
2457 const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2458 #endif
2459 #if BYTE_ORDER == LITTLE_ENDIAN
2460 const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2461 #endif
2462
2463 ssize_t written = TEMP_FAILURE_RETRY(
2464 ::write(comm, &message, sizeof(message)));
2465
2466 if (written != sizeof(message)) {
2467 ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2468 written, strerror(errno));
2469 return BAD_TYPE;
2470 }
2471 }
2472 return fd;
2473 }
2474
readUniqueFileDescriptor(unique_fd * val) const2475 status_t Parcel::readUniqueFileDescriptor(unique_fd* val) const {
2476 int got = readFileDescriptor();
2477
2478 if (got == BAD_TYPE) {
2479 return BAD_TYPE;
2480 }
2481
2482 int dupFd;
2483 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2484 return BAD_VALUE;
2485 }
2486
2487 val->reset(dupFd);
2488
2489 if (val->get() < 0) {
2490 return BAD_VALUE;
2491 }
2492
2493 return OK;
2494 }
2495
readUniqueParcelFileDescriptor(unique_fd * val) const2496 status_t Parcel::readUniqueParcelFileDescriptor(unique_fd* val) const {
2497 int got = readParcelFileDescriptor();
2498
2499 if (got == BAD_TYPE) {
2500 return BAD_TYPE;
2501 }
2502
2503 int dupFd;
2504 if (status_t err = binder::os::dupFileDescriptor(got, &dupFd); err != OK) {
2505 return BAD_VALUE;
2506 }
2507
2508 val->reset(dupFd);
2509
2510 if (val->get() < 0) {
2511 return BAD_VALUE;
2512 }
2513
2514 return OK;
2515 }
2516
readBlob(size_t len,ReadableBlob * outBlob) const2517 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2518 {
2519 #ifdef BINDER_DISABLE_BLOB
2520 (void)len;
2521 (void)outBlob;
2522 return INVALID_OPERATION;
2523 #else
2524 int32_t blobType;
2525 status_t status = readInt32(&blobType);
2526 if (status) return status;
2527
2528 if (blobType == BLOB_INPLACE) {
2529 ALOGV("readBlob: read in place");
2530 const void* ptr = readInplace(len);
2531 if (!ptr) return BAD_VALUE;
2532
2533 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2534 return NO_ERROR;
2535 }
2536
2537 ALOGV("readBlob: read from ashmem");
2538 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2539 int fd = readFileDescriptor();
2540 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2541
2542 if (!ashmem_valid(fd)) {
2543 ALOGE("invalid fd");
2544 return BAD_VALUE;
2545 }
2546 int size = ashmem_get_size_region(fd);
2547 if (size < 0 || size_t(size) < len) {
2548 ALOGE("request size %zu does not match fd size %d", len, size);
2549 return BAD_VALUE;
2550 }
2551 void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2552 MAP_SHARED, fd, 0);
2553 if (ptr == MAP_FAILED) return NO_MEMORY;
2554
2555 outBlob->init(fd, ptr, len, isMutable);
2556 return NO_ERROR;
2557 #endif
2558 }
2559
read(FlattenableHelperInterface & val) const2560 status_t Parcel::read(FlattenableHelperInterface& val) const
2561 {
2562 // size
2563 const size_t len = this->readInt32();
2564 const size_t fd_count = this->readInt32();
2565
2566 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
2567 // don't accept size_t values which may have come from an
2568 // inadvertent conversion from a negative int.
2569 return BAD_VALUE;
2570 }
2571
2572 // payload
2573 void const* const buf = this->readInplace(pad_size(len));
2574 if (buf == nullptr)
2575 return BAD_VALUE;
2576
2577 int* fds = nullptr;
2578 if (fd_count) {
2579 fds = new (std::nothrow) int[fd_count];
2580 if (fds == nullptr) {
2581 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2582 return BAD_VALUE;
2583 }
2584 }
2585
2586 status_t err = NO_ERROR;
2587 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2588 int fd = this->readFileDescriptor();
2589 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2590 err = BAD_VALUE;
2591 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2592 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2593 // Close all the file descriptors that were dup-ed.
2594 for (size_t j=0; j<i ;j++) {
2595 close(fds[j]);
2596 }
2597 }
2598 }
2599
2600 if (err == NO_ERROR) {
2601 err = val.unflatten(buf, len, fds, fd_count);
2602 }
2603
2604 if (fd_count) {
2605 delete [] fds;
2606 }
2607
2608 return err;
2609 }
2610
2611 #ifdef BINDER_WITH_KERNEL_IPC
readObject(bool nullMetaData) const2612 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2613 {
2614 const auto* kernelFields = maybeKernelFields();
2615 if (kernelFields == nullptr) {
2616 return nullptr;
2617 }
2618
2619 const size_t DPOS = mDataPos;
2620 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2621 const flat_binder_object* obj
2622 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2623 mDataPos = DPOS + sizeof(flat_binder_object);
2624 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2625 // When transferring a NULL object, we don't write it into
2626 // the object list, so we don't want to check for it when
2627 // reading.
2628 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2629 return obj;
2630 }
2631
2632 // Ensure that this object is valid...
2633 binder_size_t* const OBJS = kernelFields->mObjects;
2634 const size_t N = kernelFields->mObjectsSize;
2635 size_t opos = kernelFields->mNextObjectHint;
2636
2637 if (N > 0) {
2638 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2639 this, DPOS, opos);
2640
2641 // Start at the current hint position, looking for an object at
2642 // the current data position.
2643 if (opos < N) {
2644 while (opos < (N-1) && OBJS[opos] < DPOS) {
2645 opos++;
2646 }
2647 } else {
2648 opos = N-1;
2649 }
2650 if (OBJS[opos] == DPOS) {
2651 // Found it!
2652 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2653 this, DPOS, opos);
2654 kernelFields->mNextObjectHint = opos + 1;
2655 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2656 return obj;
2657 }
2658
2659 // Look backwards for it...
2660 while (opos > 0 && OBJS[opos] > DPOS) {
2661 opos--;
2662 }
2663 if (OBJS[opos] == DPOS) {
2664 // Found it!
2665 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2666 this, DPOS, opos);
2667 kernelFields->mNextObjectHint = opos + 1;
2668 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2669 return obj;
2670 }
2671 }
2672 if (!mServiceFuzzing) {
2673 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object "
2674 "list",
2675 this, DPOS);
2676 }
2677 }
2678 return nullptr;
2679 }
2680 #endif // BINDER_WITH_KERNEL_IPC
2681
closeFileDescriptors(size_t newObjectsSize)2682 void Parcel::closeFileDescriptors(size_t newObjectsSize) {
2683 if (auto* kernelFields = maybeKernelFields()) {
2684 #ifdef BINDER_WITH_KERNEL_IPC
2685 size_t i = kernelFields->mObjectsSize;
2686 if (i > 0) {
2687 // ALOGI("Closing file descriptors for %zu objects...", i);
2688 }
2689 while (i > newObjectsSize) {
2690 i--;
2691 const flat_binder_object* flat =
2692 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
2693 if (flat->hdr.type == BINDER_TYPE_FD) {
2694 // ALOGI("Closing fd: %ld", flat->handle);
2695 // FDs from the kernel are always owned
2696 FdTagClose(flat->handle, this);
2697 }
2698 }
2699 #else // BINDER_WITH_KERNEL_IPC
2700 (void)newObjectsSize;
2701 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2702 (void)kernelFields;
2703 #endif // BINDER_WITH_KERNEL_IPC
2704 } else if (auto* rpcFields = maybeRpcFields()) {
2705 rpcFields->mFds.reset();
2706 }
2707 }
2708
ipcData() const2709 uintptr_t Parcel::ipcData() const
2710 {
2711 return reinterpret_cast<uintptr_t>(mData);
2712 }
2713
ipcDataSize() const2714 size_t Parcel::ipcDataSize() const
2715 {
2716 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2717 }
2718
ipcObjects() const2719 uintptr_t Parcel::ipcObjects() const
2720 {
2721 if (const auto* kernelFields = maybeKernelFields()) {
2722 return reinterpret_cast<uintptr_t>(kernelFields->mObjects);
2723 }
2724 return 0;
2725 }
2726
ipcObjectsCount() const2727 size_t Parcel::ipcObjectsCount() const
2728 {
2729 if (const auto* kernelFields = maybeKernelFields()) {
2730 return kernelFields->mObjectsSize;
2731 }
2732 return 0;
2733 }
2734
do_nothing_release_func(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)2735 static void do_nothing_release_func(const uint8_t* data, size_t dataSize,
2736 const binder_size_t* objects, size_t objectsCount) {
2737 (void)data;
2738 (void)dataSize;
2739 (void)objects;
2740 (void)objectsCount;
2741 }
delete_data_release_func(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount)2742 static void delete_data_release_func(const uint8_t* data, size_t dataSize,
2743 const binder_size_t* objects, size_t objectsCount) {
2744 delete[] data;
2745 (void)dataSize;
2746 (void)objects;
2747 (void)objectsCount;
2748 }
2749
makeDangerousViewOf(Parcel * p)2750 void Parcel::makeDangerousViewOf(Parcel* p) {
2751 if (p->isForRpc()) {
2752 // warning: this must match the logic in rpcSetDataReference
2753 auto* rf = p->maybeRpcFields();
2754 LOG_ALWAYS_FATAL_IF(rf == nullptr);
2755 std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>> fds;
2756 if (rf->mFds) {
2757 fds.reserve(rf->mFds->size());
2758 for (const auto& fd : *rf->mFds) {
2759 fds.push_back(binder::borrowed_fd(toRawFd(fd)));
2760 }
2761 }
2762 status_t result =
2763 rpcSetDataReference(rf->mSession, p->mData, p->mDataSize,
2764 rf->mObjectPositions.data(), rf->mObjectPositions.size(),
2765 std::move(fds), do_nothing_release_func);
2766 LOG_ALWAYS_FATAL_IF(result != OK, "Failed: %s", statusToString(result).c_str());
2767 } else {
2768 #ifdef BINDER_WITH_KERNEL_IPC
2769 // warning: this must match the logic in ipcSetDataReference
2770 auto* kf = p->maybeKernelFields();
2771 LOG_ALWAYS_FATAL_IF(kf == nullptr);
2772
2773 // Ownership of FDs is passed to the Parcel from kernel binder. This should be refactored
2774 // to move this ownership out of Parcel and into release_func. However, today, Parcel
2775 // always assums it can own and close FDs today. So, for purposes of testing consistency,
2776 // , create new FDs it can own.
2777
2778 uint8_t* newData = new uint8_t[p->mDataSize]; // deleted by delete_data_release_func
2779 memcpy(newData, p->mData, p->mDataSize);
2780 for (size_t i = 0; i < kf->mObjectsSize; i++) {
2781 flat_binder_object* flat =
2782 reinterpret_cast<flat_binder_object*>(newData + kf->mObjects[i]);
2783 if (flat->hdr.type == BINDER_TYPE_FD) {
2784 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
2785 }
2786 }
2787
2788 ipcSetDataReference(newData, p->mDataSize, kf->mObjects, kf->mObjectsSize,
2789 delete_data_release_func);
2790 #endif // BINDER_WITH_KERNEL_IPC
2791 }
2792 }
2793
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc)2794 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
2795 size_t objectsCount, release_func relFunc) {
2796 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2797 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2798
2799 freeData();
2800
2801 auto* kernelFields = maybeKernelFields();
2802 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr); // guaranteed by freeData.
2803
2804 // must match makeDangerousViewOf
2805 mData = const_cast<uint8_t*>(data);
2806 mDataSize = mDataCapacity = dataSize;
2807 kernelFields->mObjects = const_cast<binder_size_t*>(objects);
2808 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsCount;
2809 mOwner = relFunc;
2810
2811 #ifdef BINDER_WITH_KERNEL_IPC
2812 binder_size_t minOffset = 0;
2813 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
2814 binder_size_t offset = kernelFields->mObjects[i];
2815 if (offset < minOffset) {
2816 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2817 __func__, (uint64_t)offset, (uint64_t)minOffset);
2818 kernelFields->mObjectsSize = 0;
2819 break;
2820 }
2821 const flat_binder_object* flat
2822 = reinterpret_cast<const flat_binder_object*>(mData + offset);
2823 uint32_t type = flat->hdr.type;
2824 if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2825 type == BINDER_TYPE_FD)) {
2826 // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2827 // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2828 // recover gracefully by clearing out the objects.
2829 android_errorWriteLog(0x534e4554, "135930648");
2830 android_errorWriteLog(0x534e4554, "203847542");
2831 ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2832 __func__, type, (uint64_t)offset);
2833
2834 // WARNING: callers of ipcSetDataReference need to make sure they
2835 // don't rely on mObjectsSize in their release_func.
2836 kernelFields->mObjectsSize = 0;
2837 break;
2838 }
2839 if (type == BINDER_TYPE_FD) {
2840 // FDs from the kernel are always owned
2841 FdTag(flat->handle, nullptr, this);
2842 }
2843 minOffset = offset + sizeof(flat_binder_object);
2844 }
2845 scanForFds();
2846 #else // BINDER_WITH_KERNEL_IPC
2847 LOG_ALWAYS_FATAL_IF(objectsCount != 0,
2848 "Non-zero objects count passed to Parcel with kernel driver disabled");
2849 #endif // BINDER_WITH_KERNEL_IPC
2850 }
2851
rpcSetDataReference(const sp<RpcSession> & session,const uint8_t * data,size_t dataSize,const uint32_t * objectTable,size_t objectTableSize,std::vector<std::variant<unique_fd,borrowed_fd>> && ancillaryFds,release_func relFunc)2852 status_t Parcel::rpcSetDataReference(
2853 const sp<RpcSession>& session, const uint8_t* data, size_t dataSize,
2854 const uint32_t* objectTable, size_t objectTableSize,
2855 std::vector<std::variant<unique_fd, borrowed_fd>>&& ancillaryFds, release_func relFunc) {
2856 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2857 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2858
2859 LOG_ALWAYS_FATAL_IF(session == nullptr);
2860
2861 if (objectTableSize != ancillaryFds.size()) {
2862 ALOGE("objectTableSize=%zu ancillaryFds.size=%zu", objectTableSize, ancillaryFds.size());
2863 relFunc(data, dataSize, nullptr, 0);
2864 return BAD_VALUE;
2865 }
2866 for (size_t i = 0; i < objectTableSize; i++) {
2867 uint32_t minObjectEnd;
2868 if (__builtin_add_overflow(objectTable[i], sizeof(RpcFields::ObjectType), &minObjectEnd) ||
2869 minObjectEnd >= dataSize) {
2870 ALOGE("received out of range object position: %" PRIu32 " (parcel size is %zu)",
2871 objectTable[i], dataSize);
2872 relFunc(data, dataSize, nullptr, 0);
2873 return BAD_VALUE;
2874 }
2875 }
2876
2877 freeData();
2878 markForRpc(session);
2879
2880 auto* rpcFields = maybeRpcFields();
2881 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr); // guaranteed by markForRpc.
2882
2883 // must match makeDangerousViewOf
2884 mData = const_cast<uint8_t*>(data);
2885 mDataSize = mDataCapacity = dataSize;
2886 mOwner = relFunc;
2887
2888 rpcFields->mObjectPositions.reserve(objectTableSize);
2889 for (size_t i = 0; i < objectTableSize; i++) {
2890 rpcFields->mObjectPositions.push_back(objectTable[i]);
2891 }
2892 if (!ancillaryFds.empty()) {
2893 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
2894 *rpcFields->mFds = std::move(ancillaryFds);
2895 }
2896
2897 return OK;
2898 }
2899
print(std::ostream & to,uint32_t) const2900 void Parcel::print(std::ostream& to, uint32_t /*flags*/) const {
2901 to << "Parcel(";
2902
2903 if (errorCheck() != NO_ERROR) {
2904 const status_t err = errorCheck();
2905 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2906 } else if (dataSize() > 0) {
2907 const uint8_t* DATA = data();
2908 to << "\t" << HexDump(DATA, dataSize());
2909 #ifdef BINDER_WITH_KERNEL_IPC
2910 if (const auto* kernelFields = maybeKernelFields()) {
2911 const binder_size_t* OBJS = kernelFields->mObjects;
2912 const size_t N = objectsCount();
2913 for (size_t i = 0; i < N; i++) {
2914 const flat_binder_object* flat =
2915 reinterpret_cast<const flat_binder_object*>(DATA + OBJS[i]);
2916 to << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2917 << TypeCode(flat->hdr.type & 0x7f7f7f00) << " = " << flat->binder;
2918 }
2919 }
2920 #endif // BINDER_WITH_KERNEL_IPC
2921 } else {
2922 to << "NULL";
2923 }
2924
2925 to << ")";
2926 }
2927
releaseObjects()2928 void Parcel::releaseObjects()
2929 {
2930 auto* kernelFields = maybeKernelFields();
2931 if (kernelFields == nullptr) {
2932 return;
2933 }
2934
2935 #ifdef BINDER_WITH_KERNEL_IPC
2936 size_t i = kernelFields->mObjectsSize;
2937 if (i == 0) {
2938 return;
2939 }
2940 sp<ProcessState> proc(ProcessState::self());
2941 uint8_t* const data = mData;
2942 binder_size_t* const objects = kernelFields->mObjects;
2943 while (i > 0) {
2944 i--;
2945 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2946 release_object(proc, *flat, this);
2947 }
2948 #endif // BINDER_WITH_KERNEL_IPC
2949 }
2950
reacquireObjects(size_t objectsSize)2951 void Parcel::reacquireObjects(size_t objectsSize) {
2952 auto* kernelFields = maybeKernelFields();
2953 if (kernelFields == nullptr) {
2954 return;
2955 }
2956
2957 #ifdef BINDER_WITH_KERNEL_IPC
2958 LOG_ALWAYS_FATAL_IF(objectsSize > kernelFields->mObjectsSize,
2959 "Object size %zu out of range of %zu", objectsSize,
2960 kernelFields->mObjectsSize);
2961 size_t i = objectsSize;
2962 if (i == 0) {
2963 return;
2964 }
2965 const sp<ProcessState> proc(ProcessState::self());
2966 uint8_t* const data = mData;
2967 binder_size_t* const objects = kernelFields->mObjects;
2968 while (i > 0) {
2969 i--;
2970 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2971 acquire_object(proc, *flat, this, false /*tagFds*/); // they are already tagged
2972 }
2973 #else
2974 (void) objectsSize;
2975 #endif // BINDER_WITH_KERNEL_IPC
2976 }
2977
freeData()2978 void Parcel::freeData()
2979 {
2980 freeDataNoInit();
2981 initState();
2982 }
2983
freeDataNoInit()2984 void Parcel::freeDataNoInit()
2985 {
2986 if (mOwner) {
2987 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2988 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2989 auto* kernelFields = maybeKernelFields();
2990 // Close FDs before freeing, otherwise they will leak for kernel binder.
2991 closeFileDescriptors(/*newObjectsSize=*/0);
2992 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
2993 kernelFields ? kernelFields->mObjectsSize : 0);
2994 } else {
2995 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2996 releaseObjects();
2997 if (mData) {
2998 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2999 gParcelGlobalAllocSize -= mDataCapacity;
3000 gParcelGlobalAllocCount--;
3001 if (mDeallocZero) {
3002 zeroMemory(mData, mDataSize);
3003 }
3004 free(mData);
3005 }
3006 auto* kernelFields = maybeKernelFields();
3007 if (kernelFields && kernelFields->mObjects) free(kernelFields->mObjects);
3008 }
3009 }
3010
growData(size_t len)3011 status_t Parcel::growData(size_t len)
3012 {
3013 if (len > INT32_MAX) {
3014 // don't accept size_t values which may have come from an
3015 // inadvertent conversion from a negative int.
3016 return BAD_VALUE;
3017 }
3018
3019 if (mDataPos > mDataSize) {
3020 // b/370831157 - this case used to abort. We also don't expect mDataPos < mDataSize, but
3021 // this would only waste a bit of memory, so it's okay.
3022 ALOGE("growData only expected at the end of a Parcel. pos: %zu, size: %zu, capacity: %zu",
3023 mDataPos, len, mDataCapacity);
3024 return BAD_VALUE;
3025 }
3026
3027 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
3028 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
3029 size_t newSize = ((mDataSize+len)*3)/2;
3030 return (newSize <= mDataSize)
3031 ? (status_t) NO_MEMORY
3032 : continueWrite(std::max(newSize, (size_t) 128));
3033 }
3034
reallocZeroFree(uint8_t * data,size_t oldCapacity,size_t newCapacity,bool zero)3035 static uint8_t* reallocZeroFree(uint8_t* data, size_t oldCapacity, size_t newCapacity, bool zero) {
3036 if (!zero) {
3037 return (uint8_t*)realloc(data, newCapacity);
3038 }
3039 uint8_t* newData = (uint8_t*)malloc(newCapacity);
3040 if (!newData) {
3041 return nullptr;
3042 }
3043
3044 memcpy(newData, data, std::min(oldCapacity, newCapacity));
3045 zeroMemory(data, oldCapacity);
3046 free(data);
3047 return newData;
3048 }
3049
restartWrite(size_t desired)3050 status_t Parcel::restartWrite(size_t desired)
3051 {
3052 if (desired > INT32_MAX) {
3053 // don't accept size_t values which may have come from an
3054 // inadvertent conversion from a negative int.
3055 return BAD_VALUE;
3056 }
3057
3058 if (mOwner) {
3059 freeData();
3060 return continueWrite(desired);
3061 }
3062
3063 releaseObjects();
3064
3065 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
3066 if (!data && desired > mDataCapacity) {
3067 LOG_ALWAYS_FATAL("out of memory");
3068 mError = NO_MEMORY;
3069 return NO_MEMORY;
3070 }
3071
3072 if (data || desired == 0) {
3073 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
3074 if (mDataCapacity > desired) {
3075 gParcelGlobalAllocSize -= (mDataCapacity - desired);
3076 } else {
3077 gParcelGlobalAllocSize += (desired - mDataCapacity);
3078 }
3079
3080 if (!mData) {
3081 gParcelGlobalAllocCount++;
3082 }
3083 mData = data;
3084 mDataCapacity = desired;
3085 }
3086
3087 mDataSize = mDataPos = 0;
3088 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
3089 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
3090
3091 if (auto* kernelFields = maybeKernelFields()) {
3092 free(kernelFields->mObjects);
3093 kernelFields->mObjects = nullptr;
3094 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = 0;
3095 kernelFields->mNextObjectHint = 0;
3096 kernelFields->mObjectsSorted = false;
3097 kernelFields->mHasFds = false;
3098 kernelFields->mFdsKnown = true;
3099 } else if (auto* rpcFields = maybeRpcFields()) {
3100 rpcFields->mObjectPositions.clear();
3101 rpcFields->mFds.reset();
3102 }
3103 mAllowFds = true;
3104
3105 return NO_ERROR;
3106 }
3107
continueWrite(size_t desired)3108 status_t Parcel::continueWrite(size_t desired)
3109 {
3110 if (desired > INT32_MAX) {
3111 // don't accept size_t values which may have come from an
3112 // inadvertent conversion from a negative int.
3113 return BAD_VALUE;
3114 }
3115
3116 auto* kernelFields = maybeKernelFields();
3117 auto* rpcFields = maybeRpcFields();
3118
3119 // If shrinking, first adjust for any objects that appear
3120 // after the new data size.
3121 size_t objectsSize =
3122 kernelFields ? kernelFields->mObjectsSize : rpcFields->mObjectPositions.size();
3123 if (desired < mDataSize) {
3124 if (desired == 0) {
3125 objectsSize = 0;
3126 } else {
3127 if (kernelFields) {
3128 #ifdef BINDER_WITH_KERNEL_IPC
3129 validateReadData(mDataSize); // hack to sort the objects
3130 while (objectsSize > 0) {
3131 if (kernelFields->mObjects[objectsSize - 1] + sizeof(flat_binder_object) <=
3132 desired)
3133 break;
3134 objectsSize--;
3135 }
3136 #endif // BINDER_WITH_KERNEL_IPC
3137 } else {
3138 while (objectsSize > 0) {
3139 // Object size varies by type.
3140 uint32_t pos = rpcFields->mObjectPositions[objectsSize - 1];
3141 size_t size = sizeof(RpcFields::ObjectType);
3142 uint32_t minObjectEnd;
3143 if (__builtin_add_overflow(pos, sizeof(RpcFields::ObjectType), &minObjectEnd) ||
3144 minObjectEnd > mDataSize) {
3145 return BAD_VALUE;
3146 }
3147 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3148 switch (type) {
3149 case RpcFields::TYPE_BINDER_NULL:
3150 break;
3151 case RpcFields::TYPE_BINDER:
3152 size += sizeof(uint64_t); // address
3153 break;
3154 case RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR:
3155 size += sizeof(int32_t); // fd index
3156 break;
3157 }
3158
3159 if (pos + size <= desired) break;
3160 objectsSize--;
3161 }
3162 }
3163 }
3164 }
3165
3166 if (mOwner) {
3167 // If the size is going to zero, just release the owner's data.
3168 if (desired == 0) {
3169 freeData();
3170 return NO_ERROR;
3171 }
3172
3173 // If there is a different owner, we need to take
3174 // posession.
3175 uint8_t* data = (uint8_t*)malloc(desired);
3176 if (!data) {
3177 mError = NO_MEMORY;
3178 return NO_MEMORY;
3179 }
3180 binder_size_t* objects = nullptr;
3181
3182 if (kernelFields && objectsSize) {
3183 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
3184 if (!objects) {
3185 free(data);
3186
3187 mError = NO_MEMORY;
3188 return NO_MEMORY;
3189 }
3190
3191 // only acquire references on objects we are keeping
3192 reacquireObjects(objectsSize);
3193 }
3194 if (rpcFields) {
3195 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3196 free(data);
3197 return status;
3198 }
3199 }
3200
3201 if (mData) {
3202 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
3203 }
3204 #ifdef BINDER_WITH_KERNEL_IPC
3205 if (objects && kernelFields && kernelFields->mObjects) {
3206 memcpy(objects, kernelFields->mObjects, objectsSize * sizeof(binder_size_t));
3207 // All FDs are owned when `mOwner`, even when `cookie == 0`. When
3208 // we switch to `!mOwner`, we need to explicitly mark the FDs as
3209 // owned.
3210 for (size_t i = 0; i < objectsSize; i++) {
3211 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
3212 if (flat->hdr.type == BINDER_TYPE_FD) {
3213 flat->cookie = 1;
3214 }
3215 }
3216 }
3217 // ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
3218 if (kernelFields) {
3219 closeFileDescriptors(objectsSize);
3220 }
3221 #endif // BINDER_WITH_KERNEL_IPC
3222 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
3223 kernelFields ? kernelFields->mObjectsSize : 0);
3224 mOwner = nullptr;
3225
3226 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
3227 gParcelGlobalAllocSize += desired;
3228 gParcelGlobalAllocCount++;
3229
3230 mData = data;
3231 mDataSize = (mDataSize < desired) ? mDataSize : desired;
3232 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3233 mDataCapacity = desired;
3234 if (kernelFields) {
3235 kernelFields->mObjects = objects;
3236 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsSize;
3237 kernelFields->mNextObjectHint = 0;
3238 kernelFields->mObjectsSorted = false;
3239 }
3240
3241 } else if (mData) {
3242 if (kernelFields && objectsSize < kernelFields->mObjectsSize) {
3243 #ifdef BINDER_WITH_KERNEL_IPC
3244 // Need to release refs on any objects we are dropping.
3245 const sp<ProcessState> proc(ProcessState::self());
3246 for (size_t i = objectsSize; i < kernelFields->mObjectsSize; i++) {
3247 const flat_binder_object* flat =
3248 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
3249 if (flat->hdr.type == BINDER_TYPE_FD) {
3250 // will need to rescan because we may have lopped off the only FDs
3251 kernelFields->mFdsKnown = false;
3252 }
3253 release_object(proc, *flat, this);
3254 }
3255
3256 if (objectsSize == 0) {
3257 free(kernelFields->mObjects);
3258 kernelFields->mObjects = nullptr;
3259 kernelFields->mObjectsCapacity = 0;
3260 } else {
3261 binder_size_t* objects =
3262 (binder_size_t*)realloc(kernelFields->mObjects,
3263 objectsSize * sizeof(binder_size_t));
3264 if (objects) {
3265 kernelFields->mObjects = objects;
3266 kernelFields->mObjectsCapacity = objectsSize;
3267 }
3268 }
3269 kernelFields->mObjectsSize = objectsSize;
3270 kernelFields->mNextObjectHint = 0;
3271 kernelFields->mObjectsSorted = false;
3272 #else // BINDER_WITH_KERNEL_IPC
3273 LOG_ALWAYS_FATAL("Non-zero numObjects for RPC Parcel");
3274 #endif // BINDER_WITH_KERNEL_IPC
3275 }
3276 if (rpcFields) {
3277 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
3278 return status;
3279 }
3280 }
3281
3282 // We own the data, so we can just do a realloc().
3283 if (desired > mDataCapacity) {
3284 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
3285 if (data) {
3286 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
3287 desired);
3288 gParcelGlobalAllocSize += desired;
3289 gParcelGlobalAllocSize -= mDataCapacity;
3290 mData = data;
3291 mDataCapacity = desired;
3292 } else {
3293 mError = NO_MEMORY;
3294 return NO_MEMORY;
3295 }
3296 } else {
3297 if (mDataSize > desired) {
3298 mDataSize = desired;
3299 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3300 }
3301 if (mDataPos > desired) {
3302 mDataPos = desired;
3303 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3304 }
3305 }
3306
3307 } else {
3308 // This is the first data. Easy!
3309 uint8_t* data = (uint8_t*)malloc(desired);
3310 if (!data) {
3311 mError = NO_MEMORY;
3312 return NO_MEMORY;
3313 }
3314
3315 if (!(mDataCapacity == 0 &&
3316 (kernelFields == nullptr ||
3317 (kernelFields->mObjects == nullptr && kernelFields->mObjectsCapacity == 0)))) {
3318 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity,
3319 kernelFields ? kernelFields->mObjects : nullptr,
3320 kernelFields ? kernelFields->mObjectsCapacity : 0, desired);
3321 }
3322
3323 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
3324 gParcelGlobalAllocSize += desired;
3325 gParcelGlobalAllocCount++;
3326
3327 mData = data;
3328 mDataSize = mDataPos = 0;
3329 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3330 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3331 mDataCapacity = desired;
3332 }
3333
3334 return NO_ERROR;
3335 }
3336
truncateRpcObjects(size_t newObjectsSize)3337 status_t Parcel::truncateRpcObjects(size_t newObjectsSize) {
3338 auto* rpcFields = maybeRpcFields();
3339 if (newObjectsSize == 0) {
3340 rpcFields->mObjectPositions.clear();
3341 if (rpcFields->mFds) {
3342 rpcFields->mFds->clear();
3343 }
3344 return OK;
3345 }
3346 while (rpcFields->mObjectPositions.size() > newObjectsSize) {
3347 uint32_t pos = rpcFields->mObjectPositions.back();
3348 uint32_t minObjectEnd;
3349 if (__builtin_add_overflow(pos, sizeof(RpcFields::ObjectType), &minObjectEnd) ||
3350 minObjectEnd > mDataSize) {
3351 return BAD_VALUE;
3352 }
3353 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3354 if (type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
3355 uint32_t objectEnd;
3356 if (__builtin_add_overflow(minObjectEnd, sizeof(int32_t), &objectEnd) ||
3357 objectEnd > mDataSize) {
3358 return BAD_VALUE;
3359 }
3360 const auto fdIndex = *reinterpret_cast<const int32_t*>(mData + minObjectEnd);
3361 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
3362 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
3363 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
3364 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
3365 return BAD_VALUE;
3366 }
3367 // In practice, this always removes the last element.
3368 rpcFields->mFds->erase(rpcFields->mFds->begin() + fdIndex);
3369 }
3370 rpcFields->mObjectPositions.pop_back();
3371 }
3372 return OK;
3373 }
3374
initState()3375 void Parcel::initState()
3376 {
3377 LOG_ALLOC("Parcel %p: initState", this);
3378 mError = NO_ERROR;
3379 mData = nullptr;
3380 mDataSize = 0;
3381 mDataCapacity = 0;
3382 mDataPos = 0;
3383 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
3384 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
3385 mVariantFields.emplace<KernelFields>();
3386 mAllowFds = true;
3387 mDeallocZero = false;
3388 mOwner = nullptr;
3389 mEnforceNoDataAvail = true;
3390 mServiceFuzzing = false;
3391 }
3392
scanForFds() const3393 void Parcel::scanForFds() const {
3394 auto* kernelFields = maybeKernelFields();
3395 if (kernelFields == nullptr) {
3396 return;
3397 }
3398 status_t status = hasFileDescriptorsInRange(0, dataSize(), &kernelFields->mHasFds);
3399 ALOGE_IF(status != NO_ERROR, "Error %d calling hasFileDescriptorsInRange()", status);
3400 kernelFields->mFdsKnown = true;
3401 }
3402
3403 #ifdef BINDER_WITH_KERNEL_IPC
getOpenAshmemSize() const3404 size_t Parcel::getOpenAshmemSize() const
3405 {
3406 auto* kernelFields = maybeKernelFields();
3407 if (kernelFields == nullptr) {
3408 return 0;
3409 }
3410
3411 size_t openAshmemSize = 0;
3412 #ifndef BINDER_DISABLE_BLOB
3413 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
3414 const flat_binder_object* flat =
3415 reinterpret_cast<const flat_binder_object*>(mData + kernelFields->mObjects[i]);
3416
3417 // cookie is compared against zero for historical reasons
3418 // > obj.cookie = takeOwnership ? 1 : 0;
3419 if (flat->hdr.type == BINDER_TYPE_FD && flat->cookie != 0 && ashmem_valid(flat->handle)) {
3420 int size = ashmem_get_size_region(flat->handle);
3421 if (__builtin_add_overflow(openAshmemSize, size, &openAshmemSize)) {
3422 ALOGE("Overflow when computing ashmem size.");
3423 return SIZE_MAX;
3424 }
3425 }
3426 }
3427 #endif
3428 return openAshmemSize;
3429 }
3430 #endif // BINDER_WITH_KERNEL_IPC
3431
3432 // --- Parcel::Blob ---
3433
Blob()3434 Parcel::Blob::Blob() :
3435 mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
3436 }
3437
~Blob()3438 Parcel::Blob::~Blob() {
3439 release();
3440 }
3441
release()3442 void Parcel::Blob::release() {
3443 if (mFd != -1 && mData) {
3444 if (::munmap(mData, mSize) == -1) {
3445 ALOGW("munmap() failed: %s", strerror(errno));
3446 }
3447 }
3448 clear();
3449 }
3450
init(int fd,void * data,size_t size,bool isMutable)3451 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
3452 mFd = fd;
3453 mData = data;
3454 mSize = size;
3455 mMutable = isMutable;
3456 }
3457
clear()3458 void Parcel::Blob::clear() {
3459 mFd = -1;
3460 mData = nullptr;
3461 mSize = 0;
3462 mMutable = false;
3463 }
3464
3465 } // namespace android
3466