1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/resource.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <unistd.h>
32
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/IPCThreadState.h>
36 #include <binder/Parcel.h>
37 #include <binder/ProcessState.h>
38 #include <binder/Stability.h>
39 #include <binder/Status.h>
40 #include <binder/TextOutput.h>
41
42 #include <android-base/scopeguard.h>
43 #include <cutils/ashmem.h>
44 #include <cutils/compiler.h>
45 #include <utils/Flattenable.h>
46 #include <utils/Log.h>
47 #include <utils/String16.h>
48 #include <utils/String8.h>
49 #include <utils/misc.h>
50
51 #include "OS.h"
52 #include "RpcState.h"
53 #include "Static.h"
54 #include "Utils.h"
55
56 // A lot of code in this file uses definitions from the
57 // Linux kernel header for Binder <linux/android/binder.h>
58 // which is included indirectly via "binder_module.h".
59 // Non-Linux OSes do not have that header, so libbinder should be
60 // built for those targets without kernel binder support, i.e.,
61 // without BINDER_WITH_KERNEL_IPC. For this reason, all code in this
62 // file that depends on kernel binder, including the header itself,
63 // is conditional on BINDER_WITH_KERNEL_IPC.
64 #ifdef BINDER_WITH_KERNEL_IPC
65 #include <linux/sched.h>
66 #include "binder_module.h"
67 #else // BINDER_WITH_KERNEL_IPC
68 // Needed by {read,write}Pointer
69 typedef uintptr_t binder_uintptr_t;
70 #endif // BINDER_WITH_KERNEL_IPC
71
72 #define LOG_REFS(...)
73 // #define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
74 #define LOG_ALLOC(...)
75 // #define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
76
77 // ---------------------------------------------------------------------------
78
79 // This macro should never be used at runtime, as a too large value
80 // of s could cause an integer overflow. Instead, you should always
81 // use the wrapper function pad_size()
82 #define PAD_SIZE_UNSAFE(s) (((s) + 3) & ~3UL)
83
pad_size(size_t s)84 static size_t pad_size(size_t s) {
85 if (s > (std::numeric_limits<size_t>::max() - 3)) {
86 LOG_ALWAYS_FATAL("pad size too big %zu", s);
87 }
88 return PAD_SIZE_UNSAFE(s);
89 }
90
91 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
92 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
93
94 namespace android {
95
96 // many things compile this into prebuilts on the stack
97 #ifdef __LP64__
98 static_assert(sizeof(Parcel) == 120);
99 #else
100 static_assert(sizeof(Parcel) == 60);
101 #endif
102
103 static std::atomic<size_t> gParcelGlobalAllocCount;
104 static std::atomic<size_t> gParcelGlobalAllocSize;
105
106 // Maximum number of file descriptors per Parcel.
107 constexpr size_t kMaxFds = 1024;
108
109 // Maximum size of a blob to transfer in-place.
110 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
111
112 enum {
113 BLOB_INPLACE = 0,
114 BLOB_ASHMEM_IMMUTABLE = 1,
115 BLOB_ASHMEM_MUTABLE = 2,
116 };
117
118 #ifdef BINDER_WITH_KERNEL_IPC
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)119 static void acquire_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
120 const void* who) {
121 switch (obj.hdr.type) {
122 case BINDER_TYPE_BINDER:
123 if (obj.binder) {
124 LOG_REFS("Parcel %p acquiring reference on local %llu", who, obj.cookie);
125 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
126 }
127 return;
128 case BINDER_TYPE_HANDLE: {
129 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
130 if (b != nullptr) {
131 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
132 b->incStrong(who);
133 }
134 return;
135 }
136 case BINDER_TYPE_FD: {
137 return;
138 }
139 }
140
141 ALOGD("Invalid object type 0x%08x", obj.hdr.type);
142 }
143
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)144 static void release_object(const sp<ProcessState>& proc, const flat_binder_object& obj,
145 const void* who) {
146 switch (obj.hdr.type) {
147 case BINDER_TYPE_BINDER:
148 if (obj.binder) {
149 LOG_REFS("Parcel %p releasing reference on local %llu", who, obj.cookie);
150 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
151 }
152 return;
153 case BINDER_TYPE_HANDLE: {
154 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
155 if (b != nullptr) {
156 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
157 b->decStrong(who);
158 }
159 return;
160 }
161 case BINDER_TYPE_FD: {
162 if (obj.cookie != 0) { // owned
163 close(obj.handle);
164 }
165 return;
166 }
167 }
168
169 ALOGE("Invalid object type 0x%08x", obj.hdr.type);
170 }
171 #endif // BINDER_WITH_KERNEL_IPC
172
toRawFd(const std::variant<base::unique_fd,base::borrowed_fd> & v)173 static int toRawFd(const std::variant<base::unique_fd, base::borrowed_fd>& v) {
174 return std::visit([](const auto& fd) { return fd.get(); }, v);
175 }
176
RpcFields(const sp<RpcSession> & session)177 Parcel::RpcFields::RpcFields(const sp<RpcSession>& session) : mSession(session) {
178 LOG_ALWAYS_FATAL_IF(mSession == nullptr);
179 }
180
finishFlattenBinder(const sp<IBinder> & binder)181 status_t Parcel::finishFlattenBinder(const sp<IBinder>& binder)
182 {
183 internal::Stability::tryMarkCompilationUnit(binder.get());
184 int16_t rep = internal::Stability::getRepr(binder.get());
185 return writeInt32(rep);
186 }
187
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const188 status_t Parcel::finishUnflattenBinder(
189 const sp<IBinder>& binder, sp<IBinder>* out) const
190 {
191 int32_t stability;
192 status_t status = readInt32(&stability);
193 if (status != OK) return status;
194
195 status = internal::Stability::setRepr(binder.get(), static_cast<int16_t>(stability),
196 true /*log*/);
197 if (status != OK) return status;
198
199 *out = binder;
200 return OK;
201 }
202
203 #ifdef BINDER_WITH_KERNEL_IPC
schedPolicyMask(int policy,int priority)204 static constexpr inline int schedPolicyMask(int policy, int priority) {
205 return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
206 }
207 #endif // BINDER_WITH_KERNEL_IPC
208
flattenBinder(const sp<IBinder> & binder)209 status_t Parcel::flattenBinder(const sp<IBinder>& binder) {
210 BBinder* local = nullptr;
211 if (binder) local = binder->localBinder();
212 if (local) local->setParceled();
213
214 if (const auto* rpcFields = maybeRpcFields()) {
215 if (binder) {
216 status_t status = writeInt32(1); // non-null
217 if (status != OK) return status;
218 uint64_t address;
219 // TODO(b/167966510): need to undo this if the Parcel is not sent
220 status = rpcFields->mSession->state()->onBinderLeaving(rpcFields->mSession, binder,
221 &address);
222 if (status != OK) return status;
223 status = writeUint64(address);
224 if (status != OK) return status;
225 } else {
226 status_t status = writeInt32(0); // null
227 if (status != OK) return status;
228 }
229 return finishFlattenBinder(binder);
230 }
231
232 #ifdef BINDER_WITH_KERNEL_IPC
233 flat_binder_object obj;
234
235 int schedBits = 0;
236 if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
237 schedBits = schedPolicyMask(SCHED_NORMAL, 19);
238 }
239
240 if (binder != nullptr) {
241 if (!local) {
242 BpBinder *proxy = binder->remoteBinder();
243 if (proxy == nullptr) {
244 ALOGE("null proxy");
245 } else {
246 if (proxy->isRpcBinder()) {
247 ALOGE("Sending a socket binder over kernel binder is prohibited");
248 return INVALID_OPERATION;
249 }
250 }
251 const int32_t handle = proxy ? proxy->getPrivateAccessor().binderHandle() : 0;
252 obj.hdr.type = BINDER_TYPE_HANDLE;
253 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
254 obj.flags = 0;
255 obj.handle = handle;
256 obj.cookie = 0;
257 } else {
258 int policy = local->getMinSchedulerPolicy();
259 int priority = local->getMinSchedulerPriority();
260
261 if (policy != 0 || priority != 0) {
262 // override value, since it is set explicitly
263 schedBits = schedPolicyMask(policy, priority);
264 }
265 obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
266 if (local->isRequestingSid()) {
267 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
268 }
269 if (local->isInheritRt()) {
270 obj.flags |= FLAT_BINDER_FLAG_INHERIT_RT;
271 }
272 obj.hdr.type = BINDER_TYPE_BINDER;
273 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
274 obj.cookie = reinterpret_cast<uintptr_t>(local);
275 }
276 } else {
277 obj.hdr.type = BINDER_TYPE_BINDER;
278 obj.flags = 0;
279 obj.binder = 0;
280 obj.cookie = 0;
281 }
282
283 obj.flags |= schedBits;
284
285 status_t status = writeObject(obj, false);
286 if (status != OK) return status;
287
288 return finishFlattenBinder(binder);
289 #else // BINDER_WITH_KERNEL_IPC
290 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
291 return INVALID_OPERATION;
292 #endif // BINDER_WITH_KERNEL_IPC
293 }
294
unflattenBinder(sp<IBinder> * out) const295 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
296 {
297 if (const auto* rpcFields = maybeRpcFields()) {
298 int32_t isPresent;
299 status_t status = readInt32(&isPresent);
300 if (status != OK) return status;
301
302 sp<IBinder> binder;
303
304 if (isPresent & 1) {
305 uint64_t addr;
306 if (status_t status = readUint64(&addr); status != OK) return status;
307 if (status_t status =
308 rpcFields->mSession->state()->onBinderEntering(rpcFields->mSession, addr,
309 &binder);
310 status != OK)
311 return status;
312 if (status_t status =
313 rpcFields->mSession->state()->flushExcessBinderRefs(rpcFields->mSession,
314 addr, binder);
315 status != OK)
316 return status;
317 }
318
319 return finishUnflattenBinder(binder, out);
320 }
321
322 #ifdef BINDER_WITH_KERNEL_IPC
323 const flat_binder_object* flat = readObject(false);
324
325 if (flat) {
326 switch (flat->hdr.type) {
327 case BINDER_TYPE_BINDER: {
328 sp<IBinder> binder =
329 sp<IBinder>::fromExisting(reinterpret_cast<IBinder*>(flat->cookie));
330 return finishUnflattenBinder(binder, out);
331 }
332 case BINDER_TYPE_HANDLE: {
333 sp<IBinder> binder =
334 ProcessState::self()->getStrongProxyForHandle(flat->handle);
335 return finishUnflattenBinder(binder, out);
336 }
337 }
338 }
339 return BAD_TYPE;
340 #else // BINDER_WITH_KERNEL_IPC
341 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
342 return INVALID_OPERATION;
343 #endif // BINDER_WITH_KERNEL_IPC
344 }
345
346 // ---------------------------------------------------------------------------
347
Parcel()348 Parcel::Parcel()
349 {
350 LOG_ALLOC("Parcel %p: constructing", this);
351 initState();
352 }
353
~Parcel()354 Parcel::~Parcel()
355 {
356 freeDataNoInit();
357 LOG_ALLOC("Parcel %p: destroyed", this);
358 }
359
getGlobalAllocSize()360 size_t Parcel::getGlobalAllocSize() {
361 return gParcelGlobalAllocSize.load();
362 }
363
getGlobalAllocCount()364 size_t Parcel::getGlobalAllocCount() {
365 return gParcelGlobalAllocCount.load();
366 }
367
data() const368 const uint8_t* Parcel::data() const
369 {
370 return mData;
371 }
372
dataSize() const373 size_t Parcel::dataSize() const
374 {
375 return (mDataSize > mDataPos ? mDataSize : mDataPos);
376 }
377
dataBufferSize() const378 size_t Parcel::dataBufferSize() const {
379 return mDataSize;
380 }
381
dataAvail() const382 size_t Parcel::dataAvail() const
383 {
384 size_t result = dataSize() - dataPosition();
385 if (result > INT32_MAX) {
386 LOG_ALWAYS_FATAL("result too big: %zu", result);
387 }
388 return result;
389 }
390
dataPosition() const391 size_t Parcel::dataPosition() const
392 {
393 return mDataPos;
394 }
395
dataCapacity() const396 size_t Parcel::dataCapacity() const
397 {
398 return mDataCapacity;
399 }
400
setDataSize(size_t size)401 status_t Parcel::setDataSize(size_t size)
402 {
403 if (size > INT32_MAX) {
404 // don't accept size_t values which may have come from an
405 // inadvertent conversion from a negative int.
406 return BAD_VALUE;
407 }
408
409 status_t err;
410 err = continueWrite(size);
411 if (err == NO_ERROR) {
412 mDataSize = size;
413 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
414 }
415 return err;
416 }
417
setDataPosition(size_t pos) const418 void Parcel::setDataPosition(size_t pos) const
419 {
420 if (pos > INT32_MAX) {
421 // don't accept size_t values which may have come from an
422 // inadvertent conversion from a negative int.
423 LOG_ALWAYS_FATAL("pos too big: %zu", pos);
424 }
425
426 mDataPos = pos;
427 if (const auto* kernelFields = maybeKernelFields()) {
428 kernelFields->mNextObjectHint = 0;
429 kernelFields->mObjectsSorted = false;
430 }
431 }
432
setDataCapacity(size_t size)433 status_t Parcel::setDataCapacity(size_t size)
434 {
435 if (size > INT32_MAX) {
436 // don't accept size_t values which may have come from an
437 // inadvertent conversion from a negative int.
438 return BAD_VALUE;
439 }
440
441 if (size > mDataCapacity) return continueWrite(size);
442 return NO_ERROR;
443 }
444
setData(const uint8_t * buffer,size_t len)445 status_t Parcel::setData(const uint8_t* buffer, size_t len)
446 {
447 if (len > INT32_MAX) {
448 // don't accept size_t values which may have come from an
449 // inadvertent conversion from a negative int.
450 return BAD_VALUE;
451 }
452
453 status_t err = restartWrite(len);
454 if (err == NO_ERROR) {
455 memcpy(const_cast<uint8_t*>(data()), buffer, len);
456 mDataSize = len;
457 if (auto* kernelFields = maybeKernelFields()) {
458 kernelFields->mFdsKnown = false;
459 }
460 }
461 return err;
462 }
463
appendFrom(const Parcel * parcel,size_t offset,size_t len)464 status_t Parcel::appendFrom(const Parcel* parcel, size_t offset, size_t len) {
465 if (isForRpc() != parcel->isForRpc()) {
466 ALOGE("Cannot append Parcel from one context to another. They may be different formats, "
467 "and objects are specific to a context.");
468 return BAD_TYPE;
469 }
470 if (isForRpc() && maybeRpcFields()->mSession != parcel->maybeRpcFields()->mSession) {
471 ALOGE("Cannot append Parcels from different sessions");
472 return BAD_TYPE;
473 }
474
475 status_t err;
476 const uint8_t* data = parcel->mData;
477 int startPos = mDataPos;
478
479 if (len == 0) {
480 return NO_ERROR;
481 }
482
483 if (len > INT32_MAX) {
484 // don't accept size_t values which may have come from an
485 // inadvertent conversion from a negative int.
486 return BAD_VALUE;
487 }
488
489 // range checks against the source parcel size
490 if ((offset > parcel->mDataSize)
491 || (len > parcel->mDataSize)
492 || (offset + len > parcel->mDataSize)) {
493 return BAD_VALUE;
494 }
495
496 if ((mDataSize+len) > mDataCapacity) {
497 // grow data
498 err = growData(len);
499 if (err != NO_ERROR) {
500 return err;
501 }
502 }
503
504 // append data
505 memcpy(mData + mDataPos, data + offset, len);
506 mDataPos += len;
507 mDataSize += len;
508
509 err = NO_ERROR;
510
511 if (auto* kernelFields = maybeKernelFields()) {
512 #ifdef BINDER_WITH_KERNEL_IPC
513 auto* otherKernelFields = parcel->maybeKernelFields();
514 LOG_ALWAYS_FATAL_IF(otherKernelFields == nullptr);
515
516 const binder_size_t* objects = otherKernelFields->mObjects;
517 size_t size = otherKernelFields->mObjectsSize;
518 // Count objects in range
519 int firstIndex = -1, lastIndex = -2;
520 for (int i = 0; i < (int)size; i++) {
521 size_t off = objects[i];
522 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
523 if (firstIndex == -1) {
524 firstIndex = i;
525 }
526 lastIndex = i;
527 }
528 }
529 int numObjects = lastIndex - firstIndex + 1;
530 if (numObjects > 0) {
531 const sp<ProcessState> proc(ProcessState::self());
532 // grow objects
533 if (kernelFields->mObjectsCapacity < kernelFields->mObjectsSize + numObjects) {
534 if ((size_t)numObjects > SIZE_MAX - kernelFields->mObjectsSize)
535 return NO_MEMORY; // overflow
536 if (kernelFields->mObjectsSize + numObjects > SIZE_MAX / 3)
537 return NO_MEMORY; // overflow
538 size_t newSize = ((kernelFields->mObjectsSize + numObjects) * 3) / 2;
539 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
540 binder_size_t* objects = (binder_size_t*)realloc(kernelFields->mObjects,
541 newSize * sizeof(binder_size_t));
542 if (objects == (binder_size_t*)nullptr) {
543 return NO_MEMORY;
544 }
545 kernelFields->mObjects = objects;
546 kernelFields->mObjectsCapacity = newSize;
547 }
548
549 // append and acquire objects
550 int idx = kernelFields->mObjectsSize;
551 for (int i = firstIndex; i <= lastIndex; i++) {
552 size_t off = objects[i] - offset + startPos;
553 kernelFields->mObjects[idx++] = off;
554 kernelFields->mObjectsSize++;
555
556 flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(mData + off);
557 acquire_object(proc, *flat, this);
558
559 if (flat->hdr.type == BINDER_TYPE_FD) {
560 // If this is a file descriptor, we need to dup it so the
561 // new Parcel now owns its own fd, and can declare that we
562 // officially know we have fds.
563 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
564 flat->cookie = 1;
565 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
566 if (!mAllowFds) {
567 err = FDS_NOT_ALLOWED;
568 }
569 }
570 }
571 }
572 #else
573 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
574 return INVALID_OPERATION;
575 #endif // BINDER_WITH_KERNEL_IPC
576 } else {
577 auto* rpcFields = maybeRpcFields();
578 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr);
579 auto* otherRpcFields = parcel->maybeRpcFields();
580 if (otherRpcFields == nullptr) {
581 return BAD_TYPE;
582 }
583 if (rpcFields->mSession != otherRpcFields->mSession) {
584 return BAD_TYPE;
585 }
586
587 const size_t savedDataPos = mDataPos;
588 base::ScopeGuard scopeGuard = [&]() { mDataPos = savedDataPos; };
589
590 rpcFields->mObjectPositions.reserve(otherRpcFields->mObjectPositions.size());
591 if (otherRpcFields->mFds != nullptr) {
592 if (rpcFields->mFds == nullptr) {
593 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
594 }
595 rpcFields->mFds->reserve(otherRpcFields->mFds->size());
596 }
597 for (size_t i = 0; i < otherRpcFields->mObjectPositions.size(); i++) {
598 const binder_size_t objPos = otherRpcFields->mObjectPositions[i];
599 if (offset <= objPos && objPos < offset + len) {
600 size_t newDataPos = objPos - offset + startPos;
601 rpcFields->mObjectPositions.push_back(newDataPos);
602
603 mDataPos = newDataPos;
604 int32_t objectType;
605 if (status_t status = readInt32(&objectType); status != OK) {
606 return status;
607 }
608 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
609 continue;
610 }
611
612 if (!mAllowFds) {
613 return FDS_NOT_ALLOWED;
614 }
615
616 // Read FD, duplicate, and add to list.
617 int32_t fdIndex;
618 if (status_t status = readInt32(&fdIndex); status != OK) {
619 return status;
620 }
621 int oldFd = toRawFd(otherRpcFields->mFds->at(fdIndex));
622 // To match kernel binder behavior, we always dup, even if the
623 // FD was unowned in the source parcel.
624 int newFd = -1;
625 if (status_t status = dupFileDescriptor(oldFd, &newFd); status != OK) {
626 ALOGW("Failed to duplicate file descriptor %d: %s", oldFd, strerror(-status));
627 }
628 rpcFields->mFds->emplace_back(base::unique_fd(newFd));
629 // Fixup the index in the data.
630 mDataPos = newDataPos + 4;
631 if (status_t status = writeInt32(rpcFields->mFds->size() - 1); status != OK) {
632 return status;
633 }
634 }
635 }
636 }
637
638 return err;
639 }
640
compareData(const Parcel & other)641 int Parcel::compareData(const Parcel& other) {
642 size_t size = dataSize();
643 if (size != other.dataSize()) {
644 return size < other.dataSize() ? -1 : 1;
645 }
646 return memcmp(data(), other.data(), size);
647 }
648
compareDataInRange(size_t thisOffset,const Parcel & other,size_t otherOffset,size_t len,int * result) const649 status_t Parcel::compareDataInRange(size_t thisOffset, const Parcel& other, size_t otherOffset,
650 size_t len, int* result) const {
651 if (len > INT32_MAX || thisOffset > INT32_MAX || otherOffset > INT32_MAX) {
652 // Don't accept size_t values which may have come from an inadvertent conversion from a
653 // negative int.
654 return BAD_VALUE;
655 }
656 size_t thisLimit;
657 if (__builtin_add_overflow(thisOffset, len, &thisLimit) || thisLimit > mDataSize) {
658 return BAD_VALUE;
659 }
660 size_t otherLimit;
661 if (__builtin_add_overflow(otherOffset, len, &otherLimit) || otherLimit > other.mDataSize) {
662 return BAD_VALUE;
663 }
664 *result = memcmp(data() + thisOffset, other.data() + otherOffset, len);
665 return NO_ERROR;
666 }
667
allowFds() const668 bool Parcel::allowFds() const
669 {
670 return mAllowFds;
671 }
672
pushAllowFds(bool allowFds)673 bool Parcel::pushAllowFds(bool allowFds)
674 {
675 const bool origValue = mAllowFds;
676 if (!allowFds) {
677 mAllowFds = false;
678 }
679 return origValue;
680 }
681
restoreAllowFds(bool lastValue)682 void Parcel::restoreAllowFds(bool lastValue)
683 {
684 mAllowFds = lastValue;
685 }
686
hasFileDescriptors() const687 bool Parcel::hasFileDescriptors() const
688 {
689 if (const auto* rpcFields = maybeRpcFields()) {
690 return rpcFields->mFds != nullptr && !rpcFields->mFds->empty();
691 }
692 auto* kernelFields = maybeKernelFields();
693 if (!kernelFields->mFdsKnown) {
694 scanForFds();
695 }
696 return kernelFields->mHasFds;
697 }
698
debugReadAllStrongBinders() const699 std::vector<sp<IBinder>> Parcel::debugReadAllStrongBinders() const {
700 std::vector<sp<IBinder>> ret;
701
702 #ifdef BINDER_WITH_KERNEL_IPC
703 const auto* kernelFields = maybeKernelFields();
704 if (kernelFields == nullptr) {
705 return ret;
706 }
707
708 size_t initPosition = dataPosition();
709 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
710 binder_size_t offset = kernelFields->mObjects[i];
711 const flat_binder_object* flat =
712 reinterpret_cast<const flat_binder_object*>(mData + offset);
713 if (flat->hdr.type != BINDER_TYPE_BINDER) continue;
714
715 setDataPosition(offset);
716
717 sp<IBinder> binder = readStrongBinder();
718 if (binder != nullptr) ret.push_back(binder);
719 }
720
721 setDataPosition(initPosition);
722 #endif // BINDER_WITH_KERNEL_IPC
723
724 return ret;
725 }
726
debugReadAllFileDescriptors() const727 std::vector<int> Parcel::debugReadAllFileDescriptors() const {
728 std::vector<int> ret;
729
730 if (const auto* kernelFields = maybeKernelFields()) {
731 #ifdef BINDER_WITH_KERNEL_IPC
732 size_t initPosition = dataPosition();
733 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
734 binder_size_t offset = kernelFields->mObjects[i];
735 const flat_binder_object* flat =
736 reinterpret_cast<const flat_binder_object*>(mData + offset);
737 if (flat->hdr.type != BINDER_TYPE_FD) continue;
738
739 setDataPosition(offset);
740
741 int fd = readFileDescriptor();
742 LOG_ALWAYS_FATAL_IF(fd == -1);
743 ret.push_back(fd);
744 }
745 setDataPosition(initPosition);
746 #else
747 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
748 #endif
749 } else if (const auto* rpcFields = maybeRpcFields(); rpcFields && rpcFields->mFds) {
750 for (const auto& fd : *rpcFields->mFds) {
751 ret.push_back(toRawFd(fd));
752 }
753 }
754
755 return ret;
756 }
757
hasFileDescriptorsInRange(size_t offset,size_t len,bool * result) const758 status_t Parcel::hasFileDescriptorsInRange(size_t offset, size_t len, bool* result) const {
759 if (len > INT32_MAX || offset > INT32_MAX) {
760 // Don't accept size_t values which may have come from an inadvertent conversion from a
761 // negative int.
762 return BAD_VALUE;
763 }
764 size_t limit;
765 if (__builtin_add_overflow(offset, len, &limit) || limit > mDataSize) {
766 return BAD_VALUE;
767 }
768 *result = false;
769 if (const auto* kernelFields = maybeKernelFields()) {
770 #ifdef BINDER_WITH_KERNEL_IPC
771 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
772 size_t pos = kernelFields->mObjects[i];
773 if (pos < offset) continue;
774 if (pos + sizeof(flat_binder_object) > offset + len) {
775 if (kernelFields->mObjectsSorted) {
776 break;
777 } else {
778 continue;
779 }
780 }
781 const flat_binder_object* flat =
782 reinterpret_cast<const flat_binder_object*>(mData + pos);
783 if (flat->hdr.type == BINDER_TYPE_FD) {
784 *result = true;
785 break;
786 }
787 }
788 #else
789 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
790 return INVALID_OPERATION;
791 #endif // BINDER_WITH_KERNEL_IPC
792 } else if (const auto* rpcFields = maybeRpcFields()) {
793 for (uint32_t pos : rpcFields->mObjectPositions) {
794 if (offset <= pos && pos < limit) {
795 const auto* type = reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
796 if (*type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
797 *result = true;
798 break;
799 }
800 }
801 }
802 }
803 return NO_ERROR;
804 }
805
markSensitive() const806 void Parcel::markSensitive() const
807 {
808 mDeallocZero = true;
809 }
810
markForBinder(const sp<IBinder> & binder)811 void Parcel::markForBinder(const sp<IBinder>& binder) {
812 LOG_ALWAYS_FATAL_IF(mData != nullptr, "format must be set before data is written");
813
814 if (binder && binder->remoteBinder() && binder->remoteBinder()->isRpcBinder()) {
815 markForRpc(binder->remoteBinder()->getPrivateAccessor().rpcSession());
816 }
817 }
818
markForRpc(const sp<RpcSession> & session)819 void Parcel::markForRpc(const sp<RpcSession>& session) {
820 LOG_ALWAYS_FATAL_IF(mData != nullptr && mOwner == nullptr,
821 "format must be set before data is written OR on IPC data");
822
823 mVariantFields.emplace<RpcFields>(session);
824 }
825
isForRpc() const826 bool Parcel::isForRpc() const {
827 return std::holds_alternative<RpcFields>(mVariantFields);
828 }
829
updateWorkSourceRequestHeaderPosition() const830 void Parcel::updateWorkSourceRequestHeaderPosition() const {
831 auto* kernelFields = maybeKernelFields();
832 if (kernelFields == nullptr) {
833 return;
834 }
835
836 // Only update the request headers once. We only want to point
837 // to the first headers read/written.
838 if (!kernelFields->mRequestHeaderPresent) {
839 kernelFields->mWorkSourceRequestHeaderPosition = dataPosition();
840 kernelFields->mRequestHeaderPresent = true;
841 }
842 }
843
844 #ifdef BINDER_WITH_KERNEL_IPC
845 #if defined(__ANDROID_VNDK__)
846 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
847 #elif defined(__ANDROID_RECOVERY__)
848 constexpr int32_t kHeader = B_PACK_CHARS('R', 'E', 'C', 'O');
849 #else
850 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
851 #endif
852 #endif // BINDER_WITH_KERNEL_IPC
853
854 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)855 status_t Parcel::writeInterfaceToken(const String16& interface)
856 {
857 return writeInterfaceToken(interface.string(), interface.size());
858 }
859
writeInterfaceToken(const char16_t * str,size_t len)860 status_t Parcel::writeInterfaceToken(const char16_t* str, size_t len) {
861 if (auto* kernelFields = maybeKernelFields()) {
862 #ifdef BINDER_WITH_KERNEL_IPC
863 const IPCThreadState* threadState = IPCThreadState::self();
864 writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
865 updateWorkSourceRequestHeaderPosition();
866 writeInt32(threadState->shouldPropagateWorkSource() ? threadState->getCallingWorkSourceUid()
867 : IPCThreadState::kUnsetWorkSource);
868 writeInt32(kHeader);
869 #else // BINDER_WITH_KERNEL_IPC
870 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
871 return INVALID_OPERATION;
872 #endif // BINDER_WITH_KERNEL_IPC
873 }
874
875 // currently the interface identification token is just its name as a string
876 return writeString16(str, len);
877 }
878
replaceCallingWorkSourceUid(uid_t uid)879 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
880 {
881 auto* kernelFields = maybeKernelFields();
882 if (kernelFields == nullptr) {
883 return false;
884 }
885 if (!kernelFields->mRequestHeaderPresent) {
886 return false;
887 }
888
889 const size_t initialPosition = dataPosition();
890 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
891 status_t err = writeInt32(uid);
892 setDataPosition(initialPosition);
893 return err == NO_ERROR;
894 }
895
readCallingWorkSourceUid() const896 uid_t Parcel::readCallingWorkSourceUid() const
897 {
898 auto* kernelFields = maybeKernelFields();
899 if (kernelFields == nullptr) {
900 return false;
901 }
902 if (!kernelFields->mRequestHeaderPresent) {
903 return IPCThreadState::kUnsetWorkSource;
904 }
905
906 const size_t initialPosition = dataPosition();
907 setDataPosition(kernelFields->mWorkSourceRequestHeaderPosition);
908 uid_t uid = readInt32();
909 setDataPosition(initialPosition);
910 return uid;
911 }
912
checkInterface(IBinder * binder) const913 bool Parcel::checkInterface(IBinder* binder) const
914 {
915 return enforceInterface(binder->getInterfaceDescriptor());
916 }
917
enforceInterface(const String16 & interface,IPCThreadState * threadState) const918 bool Parcel::enforceInterface(const String16& interface,
919 IPCThreadState* threadState) const
920 {
921 return enforceInterface(interface.string(), interface.size(), threadState);
922 }
923
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const924 bool Parcel::enforceInterface(const char16_t* interface,
925 size_t len,
926 IPCThreadState* threadState) const
927 {
928 if (auto* kernelFields = maybeKernelFields()) {
929 #ifdef BINDER_WITH_KERNEL_IPC
930 // StrictModePolicy.
931 int32_t strictPolicy = readInt32();
932 if (threadState == nullptr) {
933 threadState = IPCThreadState::self();
934 }
935 if ((threadState->getLastTransactionBinderFlags() & IBinder::FLAG_ONEWAY) != 0) {
936 // For one-way calls, the callee is running entirely
937 // disconnected from the caller, so disable StrictMode entirely.
938 // Not only does disk/network usage not impact the caller, but
939 // there's no way to communicate back violations anyway.
940 threadState->setStrictModePolicy(0);
941 } else {
942 threadState->setStrictModePolicy(strictPolicy);
943 }
944 // WorkSource.
945 updateWorkSourceRequestHeaderPosition();
946 int32_t workSource = readInt32();
947 threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
948 // vendor header
949 int32_t header = readInt32();
950 if (header != kHeader) {
951 ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader,
952 header);
953 return false;
954 }
955 #else // BINDER_WITH_KERNEL_IPC
956 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
957 (void)threadState;
958 return false;
959 #endif // BINDER_WITH_KERNEL_IPC
960 }
961
962 // Interface descriptor.
963 size_t parcel_interface_len;
964 const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
965 if (len == parcel_interface_len &&
966 (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
967 return true;
968 } else {
969 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
970 String8(interface, len).string(),
971 String8(parcel_interface, parcel_interface_len).string());
972 return false;
973 }
974 }
975
setEnforceNoDataAvail(bool enforceNoDataAvail)976 void Parcel::setEnforceNoDataAvail(bool enforceNoDataAvail) {
977 mEnforceNoDataAvail = enforceNoDataAvail;
978 }
979
enforceNoDataAvail() const980 binder::Status Parcel::enforceNoDataAvail() const {
981 if (!mEnforceNoDataAvail) {
982 return binder::Status::ok();
983 }
984
985 const auto n = dataAvail();
986 if (n == 0) {
987 return binder::Status::ok();
988 }
989 return binder::Status::
990 fromExceptionCode(binder::Status::Exception::EX_BAD_PARCELABLE,
991 String8::format("Parcel data not fully consumed, unread size: %zu",
992 n));
993 }
994
objectsCount() const995 size_t Parcel::objectsCount() const
996 {
997 if (const auto* kernelFields = maybeKernelFields()) {
998 return kernelFields->mObjectsSize;
999 }
1000 return 0;
1001 }
1002
errorCheck() const1003 status_t Parcel::errorCheck() const
1004 {
1005 return mError;
1006 }
1007
setError(status_t err)1008 void Parcel::setError(status_t err)
1009 {
1010 mError = err;
1011 }
1012
finishWrite(size_t len)1013 status_t Parcel::finishWrite(size_t len)
1014 {
1015 if (len > INT32_MAX) {
1016 // don't accept size_t values which may have come from an
1017 // inadvertent conversion from a negative int.
1018 return BAD_VALUE;
1019 }
1020
1021 //printf("Finish write of %d\n", len);
1022 mDataPos += len;
1023 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
1024 if (mDataPos > mDataSize) {
1025 mDataSize = mDataPos;
1026 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
1027 }
1028 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
1029 return NO_ERROR;
1030 }
1031
writeUnpadded(const void * data,size_t len)1032 status_t Parcel::writeUnpadded(const void* data, size_t len)
1033 {
1034 if (len > INT32_MAX) {
1035 // don't accept size_t values which may have come from an
1036 // inadvertent conversion from a negative int.
1037 return BAD_VALUE;
1038 }
1039
1040 size_t end = mDataPos + len;
1041 if (end < mDataPos) {
1042 // integer overflow
1043 return BAD_VALUE;
1044 }
1045
1046 if (end <= mDataCapacity) {
1047 restart_write:
1048 memcpy(mData+mDataPos, data, len);
1049 return finishWrite(len);
1050 }
1051
1052 status_t err = growData(len);
1053 if (err == NO_ERROR) goto restart_write;
1054 return err;
1055 }
1056
write(const void * data,size_t len)1057 status_t Parcel::write(const void* data, size_t len)
1058 {
1059 if (len > INT32_MAX) {
1060 // don't accept size_t values which may have come from an
1061 // inadvertent conversion from a negative int.
1062 return BAD_VALUE;
1063 }
1064
1065 void* const d = writeInplace(len);
1066 if (d) {
1067 memcpy(d, data, len);
1068 return NO_ERROR;
1069 }
1070 return mError;
1071 }
1072
writeInplace(size_t len)1073 void* Parcel::writeInplace(size_t len)
1074 {
1075 if (len > INT32_MAX) {
1076 // don't accept size_t values which may have come from an
1077 // inadvertent conversion from a negative int.
1078 return nullptr;
1079 }
1080
1081 const size_t padded = pad_size(len);
1082
1083 // check for integer overflow
1084 if (mDataPos+padded < mDataPos) {
1085 return nullptr;
1086 }
1087
1088 if ((mDataPos+padded) <= mDataCapacity) {
1089 restart_write:
1090 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
1091 uint8_t* const data = mData+mDataPos;
1092
1093 // Need to pad at end?
1094 if (padded != len) {
1095 #if BYTE_ORDER == BIG_ENDIAN
1096 static const uint32_t mask[4] = {
1097 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
1098 };
1099 #endif
1100 #if BYTE_ORDER == LITTLE_ENDIAN
1101 static const uint32_t mask[4] = {
1102 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
1103 };
1104 #endif
1105 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
1106 // *reinterpret_cast<void**>(data+padded-4));
1107 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
1108 }
1109
1110 finishWrite(padded);
1111 return data;
1112 }
1113
1114 status_t err = growData(padded);
1115 if (err == NO_ERROR) goto restart_write;
1116 return nullptr;
1117 }
1118
writeUtf8AsUtf16(const std::string & str)1119 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
1120 const uint8_t* strData = (uint8_t*)str.data();
1121 const size_t strLen= str.length();
1122 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
1123 if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
1124 return BAD_VALUE;
1125 }
1126
1127 status_t err = writeInt32(utf16Len);
1128 if (err) {
1129 return err;
1130 }
1131
1132 // Allocate enough bytes to hold our converted string and its terminating NULL.
1133 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
1134 if (!dst) {
1135 return NO_MEMORY;
1136 }
1137
1138 utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
1139
1140 return NO_ERROR;
1141 }
1142
1143
writeUtf8AsUtf16(const std::optional<std::string> & str)1144 status_t Parcel::writeUtf8AsUtf16(const std::optional<std::string>& str) { return writeData(str); }
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)1145 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) { return writeData(str); }
1146
writeString16(const std::optional<String16> & str)1147 status_t Parcel::writeString16(const std::optional<String16>& str) { return writeData(str); }
writeString16(const std::unique_ptr<String16> & str)1148 status_t Parcel::writeString16(const std::unique_ptr<String16>& str) { return writeData(str); }
1149
writeByteVector(const std::vector<int8_t> & val)1150 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<int8_t>> & val)1151 status_t Parcel::writeByteVector(const std::optional<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)1152 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val) { return writeData(val); }
writeByteVector(const std::vector<uint8_t> & val)1153 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) { return writeData(val); }
writeByteVector(const std::optional<std::vector<uint8_t>> & val)1154 status_t Parcel::writeByteVector(const std::optional<std::vector<uint8_t>>& val) { return writeData(val); }
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)1155 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val){ return writeData(val); }
writeInt32Vector(const std::vector<int32_t> & val)1156 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val) { return writeData(val); }
writeInt32Vector(const std::optional<std::vector<int32_t>> & val)1157 status_t Parcel::writeInt32Vector(const std::optional<std::vector<int32_t>>& val) { return writeData(val); }
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)1158 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val) { return writeData(val); }
writeInt64Vector(const std::vector<int64_t> & val)1159 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val) { return writeData(val); }
writeInt64Vector(const std::optional<std::vector<int64_t>> & val)1160 status_t Parcel::writeInt64Vector(const std::optional<std::vector<int64_t>>& val) { return writeData(val); }
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)1161 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::vector<uint64_t> & val)1162 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val) { return writeData(val); }
writeUint64Vector(const std::optional<std::vector<uint64_t>> & val)1163 status_t Parcel::writeUint64Vector(const std::optional<std::vector<uint64_t>>& val) { return writeData(val); }
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)1164 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val) { return writeData(val); }
writeFloatVector(const std::vector<float> & val)1165 status_t Parcel::writeFloatVector(const std::vector<float>& val) { return writeData(val); }
writeFloatVector(const std::optional<std::vector<float>> & val)1166 status_t Parcel::writeFloatVector(const std::optional<std::vector<float>>& val) { return writeData(val); }
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)1167 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val) { return writeData(val); }
writeDoubleVector(const std::vector<double> & val)1168 status_t Parcel::writeDoubleVector(const std::vector<double>& val) { return writeData(val); }
writeDoubleVector(const std::optional<std::vector<double>> & val)1169 status_t Parcel::writeDoubleVector(const std::optional<std::vector<double>>& val) { return writeData(val); }
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)1170 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val) { return writeData(val); }
writeBoolVector(const std::vector<bool> & val)1171 status_t Parcel::writeBoolVector(const std::vector<bool>& val) { return writeData(val); }
writeBoolVector(const std::optional<std::vector<bool>> & val)1172 status_t Parcel::writeBoolVector(const std::optional<std::vector<bool>>& val) { return writeData(val); }
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)1173 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val) { return writeData(val); }
writeCharVector(const std::vector<char16_t> & val)1174 status_t Parcel::writeCharVector(const std::vector<char16_t>& val) { return writeData(val); }
writeCharVector(const std::optional<std::vector<char16_t>> & val)1175 status_t Parcel::writeCharVector(const std::optional<std::vector<char16_t>>& val) { return writeData(val); }
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)1176 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val) { return writeData(val); }
1177
writeString16Vector(const std::vector<String16> & val)1178 status_t Parcel::writeString16Vector(const std::vector<String16>& val) { return writeData(val); }
writeString16Vector(const std::optional<std::vector<std::optional<String16>>> & val)1179 status_t Parcel::writeString16Vector(
1180 const std::optional<std::vector<std::optional<String16>>>& val) { return writeData(val); }
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)1181 status_t Parcel::writeString16Vector(
1182 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::optional<std::vector<std::optional<std::string>>> & val)1183 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1184 const std::optional<std::vector<std::optional<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)1185 status_t Parcel::writeUtf8VectorAsUtf16Vector(
1186 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) { return writeData(val); }
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)1187 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) { return writeData(val); }
1188
writeUniqueFileDescriptorVector(const std::vector<base::unique_fd> & val)1189 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) { return writeData(val); }
writeUniqueFileDescriptorVector(const std::optional<std::vector<base::unique_fd>> & val)1190 status_t Parcel::writeUniqueFileDescriptorVector(const std::optional<std::vector<base::unique_fd>>& val) { return writeData(val); }
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>> & val)1191 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) { return writeData(val); }
1192
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1193 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val) { return writeData(val); }
writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>> & val)1194 status_t Parcel::writeStrongBinderVector(const std::optional<std::vector<sp<IBinder>>>& val) { return writeData(val); }
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1195 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val) { return writeData(val); }
1196
writeParcelable(const Parcelable & parcelable)1197 status_t Parcel::writeParcelable(const Parcelable& parcelable) { return writeData(parcelable); }
1198
readUtf8FromUtf16(std::optional<std::string> * str) const1199 status_t Parcel::readUtf8FromUtf16(std::optional<std::string>* str) const { return readData(str); }
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1200 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const { return readData(str); }
1201
readString16(std::optional<String16> * pArg) const1202 status_t Parcel::readString16(std::optional<String16>* pArg) const { return readData(pArg); }
readString16(std::unique_ptr<String16> * pArg) const1203 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const { return readData(pArg); }
1204
readByteVector(std::vector<int8_t> * val) const1205 status_t Parcel::readByteVector(std::vector<int8_t>* val) const { return readData(val); }
readByteVector(std::vector<uint8_t> * val) const1206 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<int8_t>> * val) const1207 status_t Parcel::readByteVector(std::optional<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1208 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const { return readData(val); }
readByteVector(std::optional<std::vector<uint8_t>> * val) const1209 status_t Parcel::readByteVector(std::optional<std::vector<uint8_t>>* val) const { return readData(val); }
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1210 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const { return readData(val); }
readInt32Vector(std::optional<std::vector<int32_t>> * val) const1211 status_t Parcel::readInt32Vector(std::optional<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1212 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const { return readData(val); }
readInt32Vector(std::vector<int32_t> * val) const1213 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const { return readData(val); }
readInt64Vector(std::optional<std::vector<int64_t>> * val) const1214 status_t Parcel::readInt64Vector(std::optional<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1215 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const { return readData(val); }
readInt64Vector(std::vector<int64_t> * val) const1216 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const { return readData(val); }
readUint64Vector(std::optional<std::vector<uint64_t>> * val) const1217 status_t Parcel::readUint64Vector(std::optional<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1218 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const { return readData(val); }
readUint64Vector(std::vector<uint64_t> * val) const1219 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const { return readData(val); }
readFloatVector(std::optional<std::vector<float>> * val) const1220 status_t Parcel::readFloatVector(std::optional<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1221 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const { return readData(val); }
readFloatVector(std::vector<float> * val) const1222 status_t Parcel::readFloatVector(std::vector<float>* val) const { return readData(val); }
readDoubleVector(std::optional<std::vector<double>> * val) const1223 status_t Parcel::readDoubleVector(std::optional<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1224 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const { return readData(val); }
readDoubleVector(std::vector<double> * val) const1225 status_t Parcel::readDoubleVector(std::vector<double>* val) const { return readData(val); }
readBoolVector(std::optional<std::vector<bool>> * val) const1226 status_t Parcel::readBoolVector(std::optional<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1227 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const { return readData(val); }
readBoolVector(std::vector<bool> * val) const1228 status_t Parcel::readBoolVector(std::vector<bool>* val) const { return readData(val); }
readCharVector(std::optional<std::vector<char16_t>> * val) const1229 status_t Parcel::readCharVector(std::optional<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1230 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const { return readData(val); }
readCharVector(std::vector<char16_t> * val) const1231 status_t Parcel::readCharVector(std::vector<char16_t>* val) const { return readData(val); }
1232
readString16Vector(std::optional<std::vector<std::optional<String16>>> * val) const1233 status_t Parcel::readString16Vector(
1234 std::optional<std::vector<std::optional<String16>>>* val) const { return readData(val); }
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1235 status_t Parcel::readString16Vector(
1236 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const { return readData(val); }
readString16Vector(std::vector<String16> * val) const1237 status_t Parcel::readString16Vector(std::vector<String16>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::optional<std::vector<std::optional<std::string>>> * val) const1238 status_t Parcel::readUtf8VectorFromUtf16Vector(
1239 std::optional<std::vector<std::optional<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1240 status_t Parcel::readUtf8VectorFromUtf16Vector(
1241 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const { return readData(val); }
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1242 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const { return readData(val); }
1243
readUniqueFileDescriptorVector(std::optional<std::vector<base::unique_fd>> * val) const1244 status_t Parcel::readUniqueFileDescriptorVector(std::optional<std::vector<base::unique_fd>>* val) const { return readData(val); }
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>> * val) const1245 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const { return readData(val); }
readUniqueFileDescriptorVector(std::vector<base::unique_fd> * val) const1246 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const { return readData(val); }
1247
readStrongBinderVector(std::optional<std::vector<sp<IBinder>>> * val) const1248 status_t Parcel::readStrongBinderVector(std::optional<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1249 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const { return readData(val); }
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1250 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const { return readData(val); }
1251
readParcelable(Parcelable * parcelable) const1252 status_t Parcel::readParcelable(Parcelable* parcelable) const { return readData(parcelable); }
1253
writeInt32(int32_t val)1254 status_t Parcel::writeInt32(int32_t val)
1255 {
1256 return writeAligned(val);
1257 }
1258
writeUint32(uint32_t val)1259 status_t Parcel::writeUint32(uint32_t val)
1260 {
1261 return writeAligned(val);
1262 }
1263
writeInt32Array(size_t len,const int32_t * val)1264 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
1265 if (len > INT32_MAX) {
1266 // don't accept size_t values which may have come from an
1267 // inadvertent conversion from a negative int.
1268 return BAD_VALUE;
1269 }
1270
1271 if (!val) {
1272 return writeInt32(-1);
1273 }
1274 status_t ret = writeInt32(static_cast<uint32_t>(len));
1275 if (ret == NO_ERROR) {
1276 ret = write(val, len * sizeof(*val));
1277 }
1278 return ret;
1279 }
writeByteArray(size_t len,const uint8_t * val)1280 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
1281 if (len > INT32_MAX) {
1282 // don't accept size_t values which may have come from an
1283 // inadvertent conversion from a negative int.
1284 return BAD_VALUE;
1285 }
1286
1287 if (!val) {
1288 return writeInt32(-1);
1289 }
1290 status_t ret = writeInt32(static_cast<uint32_t>(len));
1291 if (ret == NO_ERROR) {
1292 ret = write(val, len * sizeof(*val));
1293 }
1294 return ret;
1295 }
1296
writeBool(bool val)1297 status_t Parcel::writeBool(bool val)
1298 {
1299 return writeInt32(int32_t(val));
1300 }
1301
writeChar(char16_t val)1302 status_t Parcel::writeChar(char16_t val)
1303 {
1304 return writeInt32(int32_t(val));
1305 }
1306
writeByte(int8_t val)1307 status_t Parcel::writeByte(int8_t val)
1308 {
1309 return writeInt32(int32_t(val));
1310 }
1311
writeInt64(int64_t val)1312 status_t Parcel::writeInt64(int64_t val)
1313 {
1314 return writeAligned(val);
1315 }
1316
writeUint64(uint64_t val)1317 status_t Parcel::writeUint64(uint64_t val)
1318 {
1319 return writeAligned(val);
1320 }
1321
writePointer(uintptr_t val)1322 status_t Parcel::writePointer(uintptr_t val)
1323 {
1324 return writeAligned<binder_uintptr_t>(val);
1325 }
1326
writeFloat(float val)1327 status_t Parcel::writeFloat(float val)
1328 {
1329 return writeAligned(val);
1330 }
1331
1332 #if defined(__mips__) && defined(__mips_hard_float)
1333
writeDouble(double val)1334 status_t Parcel::writeDouble(double val)
1335 {
1336 union {
1337 double d;
1338 unsigned long long ll;
1339 } u;
1340 u.d = val;
1341 return writeAligned(u.ll);
1342 }
1343
1344 #else
1345
writeDouble(double val)1346 status_t Parcel::writeDouble(double val)
1347 {
1348 return writeAligned(val);
1349 }
1350
1351 #endif
1352
writeCString(const char * str)1353 status_t Parcel::writeCString(const char* str)
1354 {
1355 return write(str, strlen(str)+1);
1356 }
1357
writeString8(const String8 & str)1358 status_t Parcel::writeString8(const String8& str)
1359 {
1360 return writeString8(str.string(), str.size());
1361 }
1362
writeString8(const char * str,size_t len)1363 status_t Parcel::writeString8(const char* str, size_t len)
1364 {
1365 if (str == nullptr) return writeInt32(-1);
1366
1367 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1368 status_t err = writeInt32(len);
1369 if (err == NO_ERROR) {
1370 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1371 if (data) {
1372 memcpy(data, str, len);
1373 *reinterpret_cast<char*>(data+len) = 0;
1374 return NO_ERROR;
1375 }
1376 err = mError;
1377 }
1378 return err;
1379 }
1380
writeString16(const String16 & str)1381 status_t Parcel::writeString16(const String16& str)
1382 {
1383 return writeString16(str.string(), str.size());
1384 }
1385
writeString16(const char16_t * str,size_t len)1386 status_t Parcel::writeString16(const char16_t* str, size_t len)
1387 {
1388 if (str == nullptr) return writeInt32(-1);
1389
1390 // NOTE: Keep this logic in sync with android_os_Parcel.cpp
1391 status_t err = writeInt32(len);
1392 if (err == NO_ERROR) {
1393 len *= sizeof(char16_t);
1394 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1395 if (data) {
1396 memcpy(data, str, len);
1397 *reinterpret_cast<char16_t*>(data+len) = 0;
1398 return NO_ERROR;
1399 }
1400 err = mError;
1401 }
1402 return err;
1403 }
1404
writeStrongBinder(const sp<IBinder> & val)1405 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1406 {
1407 return flattenBinder(val);
1408 }
1409
1410
writeRawNullableParcelable(const Parcelable * parcelable)1411 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1412 if (!parcelable) {
1413 return writeInt32(0);
1414 }
1415
1416 return writeParcelable(*parcelable);
1417 }
1418
writeNativeHandle(const native_handle * handle)1419 status_t Parcel::writeNativeHandle(const native_handle* handle)
1420 {
1421 if (!handle || handle->version != sizeof(native_handle))
1422 return BAD_TYPE;
1423
1424 status_t err;
1425 err = writeInt32(handle->numFds);
1426 if (err != NO_ERROR) return err;
1427
1428 err = writeInt32(handle->numInts);
1429 if (err != NO_ERROR) return err;
1430
1431 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1432 err = writeDupFileDescriptor(handle->data[i]);
1433
1434 if (err != NO_ERROR) {
1435 ALOGD("write native handle, write dup fd failed");
1436 return err;
1437 }
1438 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1439 return err;
1440 }
1441
writeFileDescriptor(int fd,bool takeOwnership)1442 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership) {
1443 if (auto* rpcFields = maybeRpcFields()) {
1444 std::variant<base::unique_fd, base::borrowed_fd> fdVariant;
1445 if (takeOwnership) {
1446 fdVariant = base::unique_fd(fd);
1447 } else {
1448 fdVariant = base::borrowed_fd(fd);
1449 }
1450 if (!mAllowFds) {
1451 return FDS_NOT_ALLOWED;
1452 }
1453 switch (rpcFields->mSession->getFileDescriptorTransportMode()) {
1454 case RpcSession::FileDescriptorTransportMode::NONE: {
1455 return FDS_NOT_ALLOWED;
1456 }
1457 case RpcSession::FileDescriptorTransportMode::UNIX:
1458 case RpcSession::FileDescriptorTransportMode::TRUSTY: {
1459 if (rpcFields->mFds == nullptr) {
1460 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
1461 }
1462 size_t dataPos = mDataPos;
1463 if (dataPos > UINT32_MAX) {
1464 return NO_MEMORY;
1465 }
1466 if (status_t err = writeInt32(RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR); err != OK) {
1467 return err;
1468 }
1469 if (status_t err = writeInt32(rpcFields->mFds->size()); err != OK) {
1470 return err;
1471 }
1472 rpcFields->mObjectPositions.push_back(dataPos);
1473 rpcFields->mFds->push_back(std::move(fdVariant));
1474 return OK;
1475 }
1476 }
1477 }
1478
1479 #ifdef BINDER_WITH_KERNEL_IPC
1480 flat_binder_object obj;
1481 obj.hdr.type = BINDER_TYPE_FD;
1482 obj.flags = 0;
1483 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1484 obj.handle = fd;
1485 obj.cookie = takeOwnership ? 1 : 0;
1486 return writeObject(obj, true);
1487 #else // BINDER_WITH_KERNEL_IPC
1488 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1489 (void)fd;
1490 (void)takeOwnership;
1491 return INVALID_OPERATION;
1492 #endif // BINDER_WITH_KERNEL_IPC
1493 }
1494
writeDupFileDescriptor(int fd)1495 status_t Parcel::writeDupFileDescriptor(int fd)
1496 {
1497 int dupFd;
1498 if (status_t err = dupFileDescriptor(fd, &dupFd); err != OK) {
1499 return err;
1500 }
1501 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1502 if (err != OK) {
1503 close(dupFd);
1504 }
1505 return err;
1506 }
1507
writeParcelFileDescriptor(int fd,bool takeOwnership)1508 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1509 {
1510 writeInt32(0);
1511 return writeFileDescriptor(fd, takeOwnership);
1512 }
1513
writeDupParcelFileDescriptor(int fd)1514 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1515 {
1516 int dupFd;
1517 if (status_t err = dupFileDescriptor(fd, &dupFd); err != OK) {
1518 return err;
1519 }
1520 status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1521 if (err != OK) {
1522 close(dupFd);
1523 }
1524 return err;
1525 }
1526
writeUniqueFileDescriptor(const base::unique_fd & fd)1527 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1528 return writeDupFileDescriptor(fd.get());
1529 }
1530
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1531 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1532 {
1533 if (len > INT32_MAX) {
1534 // don't accept size_t values which may have come from an
1535 // inadvertent conversion from a negative int.
1536 return BAD_VALUE;
1537 }
1538
1539 status_t status;
1540 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1541 ALOGV("writeBlob: write in place");
1542 status = writeInt32(BLOB_INPLACE);
1543 if (status) return status;
1544
1545 void* ptr = writeInplace(len);
1546 if (!ptr) return NO_MEMORY;
1547
1548 outBlob->init(-1, ptr, len, false);
1549 return NO_ERROR;
1550 }
1551
1552 ALOGV("writeBlob: write to ashmem");
1553 int fd = ashmem_create_region("Parcel Blob", len);
1554 if (fd < 0) return NO_MEMORY;
1555
1556 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1557 if (result < 0) {
1558 status = result;
1559 } else {
1560 void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1561 if (ptr == MAP_FAILED) {
1562 status = -errno;
1563 } else {
1564 if (!mutableCopy) {
1565 result = ashmem_set_prot_region(fd, PROT_READ);
1566 }
1567 if (result < 0) {
1568 status = result;
1569 } else {
1570 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1571 if (!status) {
1572 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1573 if (!status) {
1574 outBlob->init(fd, ptr, len, mutableCopy);
1575 return NO_ERROR;
1576 }
1577 }
1578 }
1579 }
1580 ::munmap(ptr, len);
1581 }
1582 ::close(fd);
1583 return status;
1584 }
1585
writeDupImmutableBlobFileDescriptor(int fd)1586 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1587 {
1588 // Must match up with what's done in writeBlob.
1589 if (!mAllowFds) return FDS_NOT_ALLOWED;
1590 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1591 if (status) return status;
1592 return writeDupFileDescriptor(fd);
1593 }
1594
write(const FlattenableHelperInterface & val)1595 status_t Parcel::write(const FlattenableHelperInterface& val)
1596 {
1597 status_t err;
1598
1599 // size if needed
1600 const size_t len = val.getFlattenedSize();
1601 const size_t fd_count = val.getFdCount();
1602
1603 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
1604 // don't accept size_t values which may have come from an
1605 // inadvertent conversion from a negative int.
1606 return BAD_VALUE;
1607 }
1608
1609 err = this->writeInt32(len);
1610 if (err) return err;
1611
1612 err = this->writeInt32(fd_count);
1613 if (err) return err;
1614
1615 // payload
1616 void* const buf = this->writeInplace(len);
1617 if (buf == nullptr)
1618 return BAD_VALUE;
1619
1620 int* fds = nullptr;
1621 if (fd_count) {
1622 fds = new (std::nothrow) int[fd_count];
1623 if (fds == nullptr) {
1624 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1625 return BAD_VALUE;
1626 }
1627 }
1628
1629 err = val.flatten(buf, len, fds, fd_count);
1630 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1631 err = this->writeDupFileDescriptor( fds[i] );
1632 }
1633
1634 if (fd_count) {
1635 delete [] fds;
1636 }
1637
1638 return err;
1639 }
1640
writeObject(const flat_binder_object & val,bool nullMetaData)1641 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1642 {
1643 auto* kernelFields = maybeKernelFields();
1644 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr, "Can't write flat_binder_object to RPC Parcel");
1645
1646 #ifdef BINDER_WITH_KERNEL_IPC
1647 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1648 const bool enoughObjects = kernelFields->mObjectsSize < kernelFields->mObjectsCapacity;
1649 if (enoughData && enoughObjects) {
1650 restart_write:
1651 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1652
1653 // remember if it's a file descriptor
1654 if (val.hdr.type == BINDER_TYPE_FD) {
1655 if (!mAllowFds) {
1656 // fail before modifying our object index
1657 return FDS_NOT_ALLOWED;
1658 }
1659 kernelFields->mHasFds = kernelFields->mFdsKnown = true;
1660 }
1661
1662 // Need to write meta-data?
1663 if (nullMetaData || val.binder != 0) {
1664 kernelFields->mObjects[kernelFields->mObjectsSize] = mDataPos;
1665 acquire_object(ProcessState::self(), val, this);
1666 kernelFields->mObjectsSize++;
1667 }
1668
1669 return finishWrite(sizeof(flat_binder_object));
1670 }
1671
1672 if (!enoughData) {
1673 const status_t err = growData(sizeof(val));
1674 if (err != NO_ERROR) return err;
1675 }
1676 if (!enoughObjects) {
1677 if (kernelFields->mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1678 if ((kernelFields->mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1679 size_t newSize = ((kernelFields->mObjectsSize + 2) * 3) / 2;
1680 if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1681 binder_size_t* objects =
1682 (binder_size_t*)realloc(kernelFields->mObjects, newSize * sizeof(binder_size_t));
1683 if (objects == nullptr) return NO_MEMORY;
1684 kernelFields->mObjects = objects;
1685 kernelFields->mObjectsCapacity = newSize;
1686 }
1687
1688 goto restart_write;
1689 #else // BINDER_WITH_KERNEL_IPC
1690 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
1691 (void)val;
1692 (void)nullMetaData;
1693 return INVALID_OPERATION;
1694 #endif // BINDER_WITH_KERNEL_IPC
1695 }
1696
writeNoException()1697 status_t Parcel::writeNoException()
1698 {
1699 binder::Status status;
1700 return status.writeToParcel(this);
1701 }
1702
validateReadData(size_t upperBound) const1703 status_t Parcel::validateReadData(size_t upperBound) const
1704 {
1705 const auto* kernelFields = maybeKernelFields();
1706 if (kernelFields == nullptr) {
1707 // Can't validate RPC Parcel reads because the location of binder
1708 // objects is unknown.
1709 return OK;
1710 }
1711
1712 #ifdef BINDER_WITH_KERNEL_IPC
1713 // Don't allow non-object reads on object data
1714 if (kernelFields->mObjectsSorted || kernelFields->mObjectsSize <= 1) {
1715 data_sorted:
1716 // Expect to check only against the next object
1717 if (kernelFields->mNextObjectHint < kernelFields->mObjectsSize &&
1718 upperBound > kernelFields->mObjects[kernelFields->mNextObjectHint]) {
1719 // For some reason the current read position is greater than the next object
1720 // hint. Iterate until we find the right object
1721 size_t nextObject = kernelFields->mNextObjectHint;
1722 do {
1723 if (mDataPos < kernelFields->mObjects[nextObject] + sizeof(flat_binder_object)) {
1724 // Requested info overlaps with an object
1725 ALOGE("Attempt to read from protected data in Parcel %p", this);
1726 return PERMISSION_DENIED;
1727 }
1728 nextObject++;
1729 } while (nextObject < kernelFields->mObjectsSize &&
1730 upperBound > kernelFields->mObjects[nextObject]);
1731 kernelFields->mNextObjectHint = nextObject;
1732 }
1733 return NO_ERROR;
1734 }
1735 // Quickly determine if mObjects is sorted.
1736 binder_size_t* currObj = kernelFields->mObjects + kernelFields->mObjectsSize - 1;
1737 binder_size_t* prevObj = currObj;
1738 while (currObj > kernelFields->mObjects) {
1739 prevObj--;
1740 if(*prevObj > *currObj) {
1741 goto data_unsorted;
1742 }
1743 currObj--;
1744 }
1745 kernelFields->mObjectsSorted = true;
1746 goto data_sorted;
1747
1748 data_unsorted:
1749 // Insertion Sort mObjects
1750 // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1751 // switch to std::sort(mObjects, mObjects + mObjectsSize);
1752 for (binder_size_t* iter0 = kernelFields->mObjects + 1;
1753 iter0 < kernelFields->mObjects + kernelFields->mObjectsSize; iter0++) {
1754 binder_size_t temp = *iter0;
1755 binder_size_t* iter1 = iter0 - 1;
1756 while (iter1 >= kernelFields->mObjects && *iter1 > temp) {
1757 *(iter1 + 1) = *iter1;
1758 iter1--;
1759 }
1760 *(iter1 + 1) = temp;
1761 }
1762 kernelFields->mNextObjectHint = 0;
1763 kernelFields->mObjectsSorted = true;
1764 goto data_sorted;
1765 #else // BINDER_WITH_KERNEL_IPC
1766 (void)upperBound;
1767 return NO_ERROR;
1768 #endif // BINDER_WITH_KERNEL_IPC
1769 }
1770
read(void * outData,size_t len) const1771 status_t Parcel::read(void* outData, size_t len) const
1772 {
1773 if (len > INT32_MAX) {
1774 // don't accept size_t values which may have come from an
1775 // inadvertent conversion from a negative int.
1776 return BAD_VALUE;
1777 }
1778
1779 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1780 && len <= pad_size(len)) {
1781 const auto* kernelFields = maybeKernelFields();
1782 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1783 status_t err = validateReadData(mDataPos + pad_size(len));
1784 if(err != NO_ERROR) {
1785 // Still increment the data position by the expected length
1786 mDataPos += pad_size(len);
1787 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1788 return err;
1789 }
1790 }
1791 memcpy(outData, mData+mDataPos, len);
1792 mDataPos += pad_size(len);
1793 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1794 return NO_ERROR;
1795 }
1796 return NOT_ENOUGH_DATA;
1797 }
1798
readInplace(size_t len) const1799 const void* Parcel::readInplace(size_t len) const
1800 {
1801 if (len > INT32_MAX) {
1802 // don't accept size_t values which may have come from an
1803 // inadvertent conversion from a negative int.
1804 return nullptr;
1805 }
1806
1807 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1808 && len <= pad_size(len)) {
1809 const auto* kernelFields = maybeKernelFields();
1810 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1811 status_t err = validateReadData(mDataPos + pad_size(len));
1812 if(err != NO_ERROR) {
1813 // Still increment the data position by the expected length
1814 mDataPos += pad_size(len);
1815 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1816 return nullptr;
1817 }
1818 }
1819
1820 const void* data = mData+mDataPos;
1821 mDataPos += pad_size(len);
1822 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1823 return data;
1824 }
1825 return nullptr;
1826 }
1827
readOutVectorSizeWithCheck(size_t elmSize,int32_t * size) const1828 status_t Parcel::readOutVectorSizeWithCheck(size_t elmSize, int32_t* size) const {
1829 if (status_t status = readInt32(size); status != OK) return status;
1830 if (*size < 0) return OK; // may be null, client to handle
1831
1832 LOG_ALWAYS_FATAL_IF(elmSize > INT32_MAX, "Cannot have element as big as %zu", elmSize);
1833
1834 // approximation, can't know max element size (e.g. if it makes heap
1835 // allocations)
1836 static_assert(sizeof(int) == sizeof(int32_t), "Android is LP64");
1837 int32_t allocationSize;
1838 if (__builtin_smul_overflow(elmSize, *size, &allocationSize)) return NO_MEMORY;
1839
1840 // High limit of 1MB since something this big could never be returned. Could
1841 // probably scope this down, but might impact very specific usecases.
1842 constexpr int32_t kMaxAllocationSize = 1 * 1000 * 1000;
1843
1844 if (allocationSize >= kMaxAllocationSize) {
1845 return NO_MEMORY;
1846 }
1847
1848 return OK;
1849 }
1850
1851 template<class T>
readAligned(T * pArg) const1852 status_t Parcel::readAligned(T *pArg) const {
1853 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1854 static_assert(std::is_trivially_copyable_v<T>);
1855
1856 if ((mDataPos+sizeof(T)) <= mDataSize) {
1857 const auto* kernelFields = maybeKernelFields();
1858 if (kernelFields != nullptr && kernelFields->mObjectsSize > 0) {
1859 status_t err = validateReadData(mDataPos + sizeof(T));
1860 if(err != NO_ERROR) {
1861 // Still increment the data position by the expected length
1862 mDataPos += sizeof(T);
1863 return err;
1864 }
1865 }
1866
1867 memcpy(pArg, mData + mDataPos, sizeof(T));
1868 mDataPos += sizeof(T);
1869 return NO_ERROR;
1870 } else {
1871 return NOT_ENOUGH_DATA;
1872 }
1873 }
1874
1875 template<class T>
readAligned() const1876 T Parcel::readAligned() const {
1877 T result;
1878 if (readAligned(&result) != NO_ERROR) {
1879 result = 0;
1880 }
1881
1882 return result;
1883 }
1884
1885 template<class T>
writeAligned(T val)1886 status_t Parcel::writeAligned(T val) {
1887 static_assert(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1888 static_assert(std::is_trivially_copyable_v<T>);
1889
1890 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1891 restart_write:
1892 memcpy(mData + mDataPos, &val, sizeof(val));
1893 return finishWrite(sizeof(val));
1894 }
1895
1896 status_t err = growData(sizeof(val));
1897 if (err == NO_ERROR) goto restart_write;
1898 return err;
1899 }
1900
readInt32(int32_t * pArg) const1901 status_t Parcel::readInt32(int32_t *pArg) const
1902 {
1903 return readAligned(pArg);
1904 }
1905
readInt32() const1906 int32_t Parcel::readInt32() const
1907 {
1908 return readAligned<int32_t>();
1909 }
1910
readUint32(uint32_t * pArg) const1911 status_t Parcel::readUint32(uint32_t *pArg) const
1912 {
1913 return readAligned(pArg);
1914 }
1915
readUint32() const1916 uint32_t Parcel::readUint32() const
1917 {
1918 return readAligned<uint32_t>();
1919 }
1920
readInt64(int64_t * pArg) const1921 status_t Parcel::readInt64(int64_t *pArg) const
1922 {
1923 return readAligned(pArg);
1924 }
1925
1926
readInt64() const1927 int64_t Parcel::readInt64() const
1928 {
1929 return readAligned<int64_t>();
1930 }
1931
readUint64(uint64_t * pArg) const1932 status_t Parcel::readUint64(uint64_t *pArg) const
1933 {
1934 return readAligned(pArg);
1935 }
1936
readUint64() const1937 uint64_t Parcel::readUint64() const
1938 {
1939 return readAligned<uint64_t>();
1940 }
1941
readPointer(uintptr_t * pArg) const1942 status_t Parcel::readPointer(uintptr_t *pArg) const
1943 {
1944 status_t ret;
1945 binder_uintptr_t ptr;
1946 ret = readAligned(&ptr);
1947 if (!ret)
1948 *pArg = ptr;
1949 return ret;
1950 }
1951
readPointer() const1952 uintptr_t Parcel::readPointer() const
1953 {
1954 return readAligned<binder_uintptr_t>();
1955 }
1956
1957
readFloat(float * pArg) const1958 status_t Parcel::readFloat(float *pArg) const
1959 {
1960 return readAligned(pArg);
1961 }
1962
1963
readFloat() const1964 float Parcel::readFloat() const
1965 {
1966 return readAligned<float>();
1967 }
1968
1969 #if defined(__mips__) && defined(__mips_hard_float)
1970
readDouble(double * pArg) const1971 status_t Parcel::readDouble(double *pArg) const
1972 {
1973 union {
1974 double d;
1975 unsigned long long ll;
1976 } u;
1977 u.d = 0;
1978 status_t status;
1979 status = readAligned(&u.ll);
1980 *pArg = u.d;
1981 return status;
1982 }
1983
readDouble() const1984 double Parcel::readDouble() const
1985 {
1986 union {
1987 double d;
1988 unsigned long long ll;
1989 } u;
1990 u.ll = readAligned<unsigned long long>();
1991 return u.d;
1992 }
1993
1994 #else
1995
readDouble(double * pArg) const1996 status_t Parcel::readDouble(double *pArg) const
1997 {
1998 return readAligned(pArg);
1999 }
2000
readDouble() const2001 double Parcel::readDouble() const
2002 {
2003 return readAligned<double>();
2004 }
2005
2006 #endif
2007
readBool(bool * pArg) const2008 status_t Parcel::readBool(bool *pArg) const
2009 {
2010 int32_t tmp = 0;
2011 status_t ret = readInt32(&tmp);
2012 *pArg = (tmp != 0);
2013 return ret;
2014 }
2015
readBool() const2016 bool Parcel::readBool() const
2017 {
2018 return readInt32() != 0;
2019 }
2020
readChar(char16_t * pArg) const2021 status_t Parcel::readChar(char16_t *pArg) const
2022 {
2023 int32_t tmp = 0;
2024 status_t ret = readInt32(&tmp);
2025 *pArg = char16_t(tmp);
2026 return ret;
2027 }
2028
readChar() const2029 char16_t Parcel::readChar() const
2030 {
2031 return char16_t(readInt32());
2032 }
2033
readByte(int8_t * pArg) const2034 status_t Parcel::readByte(int8_t *pArg) const
2035 {
2036 int32_t tmp = 0;
2037 status_t ret = readInt32(&tmp);
2038 *pArg = int8_t(tmp);
2039 return ret;
2040 }
2041
readByte() const2042 int8_t Parcel::readByte() const
2043 {
2044 return int8_t(readInt32());
2045 }
2046
readUtf8FromUtf16(std::string * str) const2047 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
2048 size_t utf16Size = 0;
2049 const char16_t* src = readString16Inplace(&utf16Size);
2050 if (!src) {
2051 return UNEXPECTED_NULL;
2052 }
2053
2054 // Save ourselves the trouble, we're done.
2055 if (utf16Size == 0u) {
2056 str->clear();
2057 return NO_ERROR;
2058 }
2059
2060 // Allow for closing '\0'
2061 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
2062 if (utf8Size < 1) {
2063 return BAD_VALUE;
2064 }
2065 // Note that while it is probably safe to assume string::resize keeps a
2066 // spare byte around for the trailing null, we still pass the size including the trailing null
2067 str->resize(utf8Size);
2068 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
2069 str->resize(utf8Size - 1);
2070 return NO_ERROR;
2071 }
2072
readCString() const2073 const char* Parcel::readCString() const
2074 {
2075 if (mDataPos < mDataSize) {
2076 const size_t avail = mDataSize-mDataPos;
2077 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2078 // is the string's trailing NUL within the parcel's valid bounds?
2079 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2080 if (eos) {
2081 const size_t len = eos - str;
2082 mDataPos += pad_size(len+1);
2083 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
2084 return str;
2085 }
2086 }
2087 return nullptr;
2088 }
2089
readString8() const2090 String8 Parcel::readString8() const
2091 {
2092 size_t len;
2093 const char* str = readString8Inplace(&len);
2094 if (str) return String8(str, len);
2095 ALOGE("Reading a NULL string not supported here.");
2096 return String8();
2097 }
2098
readString8(String8 * pArg) const2099 status_t Parcel::readString8(String8* pArg) const
2100 {
2101 size_t len;
2102 const char* str = readString8Inplace(&len);
2103 if (str) {
2104 pArg->setTo(str, len);
2105 return 0;
2106 } else {
2107 *pArg = String8();
2108 return UNEXPECTED_NULL;
2109 }
2110 }
2111
readString8Inplace(size_t * outLen) const2112 const char* Parcel::readString8Inplace(size_t* outLen) const
2113 {
2114 int32_t size = readInt32();
2115 // watch for potential int overflow from size+1
2116 if (size >= 0 && size < INT32_MAX) {
2117 *outLen = size;
2118 const char* str = (const char*)readInplace(size+1);
2119 if (str != nullptr) {
2120 if (str[size] == '\0') {
2121 return str;
2122 }
2123 android_errorWriteLog(0x534e4554, "172655291");
2124 }
2125 }
2126 *outLen = 0;
2127 return nullptr;
2128 }
2129
readString16() const2130 String16 Parcel::readString16() const
2131 {
2132 size_t len;
2133 const char16_t* str = readString16Inplace(&len);
2134 if (str) return String16(str, len);
2135 ALOGE("Reading a NULL string not supported here.");
2136 return String16();
2137 }
2138
2139
readString16(String16 * pArg) const2140 status_t Parcel::readString16(String16* pArg) const
2141 {
2142 size_t len;
2143 const char16_t* str = readString16Inplace(&len);
2144 if (str) {
2145 pArg->setTo(str, len);
2146 return 0;
2147 } else {
2148 *pArg = String16();
2149 return UNEXPECTED_NULL;
2150 }
2151 }
2152
readString16Inplace(size_t * outLen) const2153 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2154 {
2155 int32_t size = readInt32();
2156 // watch for potential int overflow from size+1
2157 if (size >= 0 && size < INT32_MAX) {
2158 *outLen = size;
2159 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2160 if (str != nullptr) {
2161 if (str[size] == u'\0') {
2162 return str;
2163 }
2164 android_errorWriteLog(0x534e4554, "172655291");
2165 }
2166 }
2167 *outLen = 0;
2168 return nullptr;
2169 }
2170
readStrongBinder(sp<IBinder> * val) const2171 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2172 {
2173 status_t status = readNullableStrongBinder(val);
2174 if (status == OK && !val->get()) {
2175 ALOGW("Expecting binder but got null!");
2176 status = UNEXPECTED_NULL;
2177 }
2178 return status;
2179 }
2180
readNullableStrongBinder(sp<IBinder> * val) const2181 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2182 {
2183 return unflattenBinder(val);
2184 }
2185
readStrongBinder() const2186 sp<IBinder> Parcel::readStrongBinder() const
2187 {
2188 sp<IBinder> val;
2189 // Note that a lot of code in Android reads binders by hand with this
2190 // method, and that code has historically been ok with getting nullptr
2191 // back (while ignoring error codes).
2192 readNullableStrongBinder(&val);
2193 return val;
2194 }
2195
readExceptionCode() const2196 int32_t Parcel::readExceptionCode() const
2197 {
2198 binder::Status status;
2199 status.readFromParcel(*this);
2200 return status.exceptionCode();
2201 }
2202
readNativeHandle() const2203 native_handle* Parcel::readNativeHandle() const
2204 {
2205 int numFds, numInts;
2206 status_t err;
2207 err = readInt32(&numFds);
2208 if (err != NO_ERROR) return nullptr;
2209 err = readInt32(&numInts);
2210 if (err != NO_ERROR) return nullptr;
2211
2212 native_handle* h = native_handle_create(numFds, numInts);
2213 if (!h) {
2214 return nullptr;
2215 }
2216
2217 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2218 h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2219 if (h->data[i] < 0) {
2220 for (int j = 0; j < i; j++) {
2221 close(h->data[j]);
2222 }
2223 native_handle_delete(h);
2224 return nullptr;
2225 }
2226 }
2227 err = read(h->data + numFds, sizeof(int)*numInts);
2228 if (err != NO_ERROR) {
2229 native_handle_close(h);
2230 native_handle_delete(h);
2231 h = nullptr;
2232 }
2233 return h;
2234 }
2235
readFileDescriptor() const2236 int Parcel::readFileDescriptor() const {
2237 if (const auto* rpcFields = maybeRpcFields()) {
2238 if (!std::binary_search(rpcFields->mObjectPositions.begin(),
2239 rpcFields->mObjectPositions.end(), mDataPos)) {
2240 ALOGW("Attempt to read file descriptor from Parcel %p at offset %zu that is not in the "
2241 "object list",
2242 this, mDataPos);
2243 return BAD_TYPE;
2244 }
2245
2246 int32_t objectType = readInt32();
2247 if (objectType != RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
2248 return BAD_TYPE;
2249 }
2250
2251 int32_t fdIndex = readInt32();
2252 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
2253 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
2254 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
2255 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
2256 return BAD_VALUE;
2257 }
2258 return toRawFd(rpcFields->mFds->at(fdIndex));
2259 }
2260
2261 #ifdef BINDER_WITH_KERNEL_IPC
2262 const flat_binder_object* flat = readObject(true);
2263
2264 if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2265 return flat->handle;
2266 }
2267
2268 return BAD_TYPE;
2269 #else // BINDER_WITH_KERNEL_IPC
2270 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2271 return INVALID_OPERATION;
2272 #endif // BINDER_WITH_KERNEL_IPC
2273 }
2274
readParcelFileDescriptor() const2275 int Parcel::readParcelFileDescriptor() const {
2276 int32_t hasComm = readInt32();
2277 int fd = readFileDescriptor();
2278 if (hasComm != 0) {
2279 // detach (owned by the binder driver)
2280 int comm = readFileDescriptor();
2281
2282 // warning: this must be kept in sync with:
2283 // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2284 enum ParcelFileDescriptorStatus {
2285 DETACHED = 2,
2286 };
2287
2288 #if BYTE_ORDER == BIG_ENDIAN
2289 const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2290 #endif
2291 #if BYTE_ORDER == LITTLE_ENDIAN
2292 const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2293 #endif
2294
2295 ssize_t written = TEMP_FAILURE_RETRY(
2296 ::write(comm, &message, sizeof(message)));
2297
2298 if (written != sizeof(message)) {
2299 ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2300 written, strerror(errno));
2301 return BAD_TYPE;
2302 }
2303 }
2304 return fd;
2305 }
2306
readUniqueFileDescriptor(base::unique_fd * val) const2307 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2308 {
2309 int got = readFileDescriptor();
2310
2311 if (got == BAD_TYPE) {
2312 return BAD_TYPE;
2313 }
2314
2315 int dupFd;
2316 if (status_t err = dupFileDescriptor(got, &dupFd); err != OK) {
2317 return BAD_VALUE;
2318 }
2319
2320 val->reset(dupFd);
2321
2322 if (val->get() < 0) {
2323 return BAD_VALUE;
2324 }
2325
2326 return OK;
2327 }
2328
readUniqueParcelFileDescriptor(base::unique_fd * val) const2329 status_t Parcel::readUniqueParcelFileDescriptor(base::unique_fd* val) const
2330 {
2331 int got = readParcelFileDescriptor();
2332
2333 if (got == BAD_TYPE) {
2334 return BAD_TYPE;
2335 }
2336
2337 int dupFd;
2338 if (status_t err = dupFileDescriptor(got, &dupFd); err != OK) {
2339 return BAD_VALUE;
2340 }
2341
2342 val->reset(dupFd);
2343
2344 if (val->get() < 0) {
2345 return BAD_VALUE;
2346 }
2347
2348 return OK;
2349 }
2350
readBlob(size_t len,ReadableBlob * outBlob) const2351 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2352 {
2353 int32_t blobType;
2354 status_t status = readInt32(&blobType);
2355 if (status) return status;
2356
2357 if (blobType == BLOB_INPLACE) {
2358 ALOGV("readBlob: read in place");
2359 const void* ptr = readInplace(len);
2360 if (!ptr) return BAD_VALUE;
2361
2362 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2363 return NO_ERROR;
2364 }
2365
2366 ALOGV("readBlob: read from ashmem");
2367 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2368 int fd = readFileDescriptor();
2369 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2370
2371 if (!ashmem_valid(fd)) {
2372 ALOGE("invalid fd");
2373 return BAD_VALUE;
2374 }
2375 int size = ashmem_get_size_region(fd);
2376 if (size < 0 || size_t(size) < len) {
2377 ALOGE("request size %zu does not match fd size %d", len, size);
2378 return BAD_VALUE;
2379 }
2380 void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2381 MAP_SHARED, fd, 0);
2382 if (ptr == MAP_FAILED) return NO_MEMORY;
2383
2384 outBlob->init(fd, ptr, len, isMutable);
2385 return NO_ERROR;
2386 }
2387
read(FlattenableHelperInterface & val) const2388 status_t Parcel::read(FlattenableHelperInterface& val) const
2389 {
2390 // size
2391 const size_t len = this->readInt32();
2392 const size_t fd_count = this->readInt32();
2393
2394 if ((len > INT32_MAX) || (fd_count > kMaxFds)) {
2395 // don't accept size_t values which may have come from an
2396 // inadvertent conversion from a negative int.
2397 return BAD_VALUE;
2398 }
2399
2400 // payload
2401 void const* const buf = this->readInplace(pad_size(len));
2402 if (buf == nullptr)
2403 return BAD_VALUE;
2404
2405 int* fds = nullptr;
2406 if (fd_count) {
2407 fds = new (std::nothrow) int[fd_count];
2408 if (fds == nullptr) {
2409 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2410 return BAD_VALUE;
2411 }
2412 }
2413
2414 status_t err = NO_ERROR;
2415 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2416 int fd = this->readFileDescriptor();
2417 if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2418 err = BAD_VALUE;
2419 ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2420 i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2421 // Close all the file descriptors that were dup-ed.
2422 for (size_t j=0; j<i ;j++) {
2423 close(fds[j]);
2424 }
2425 }
2426 }
2427
2428 if (err == NO_ERROR) {
2429 err = val.unflatten(buf, len, fds, fd_count);
2430 }
2431
2432 if (fd_count) {
2433 delete [] fds;
2434 }
2435
2436 return err;
2437 }
2438
2439 #ifdef BINDER_WITH_KERNEL_IPC
readObject(bool nullMetaData) const2440 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2441 {
2442 const auto* kernelFields = maybeKernelFields();
2443 if (kernelFields == nullptr) {
2444 return nullptr;
2445 }
2446
2447 const size_t DPOS = mDataPos;
2448 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2449 const flat_binder_object* obj
2450 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2451 mDataPos = DPOS + sizeof(flat_binder_object);
2452 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2453 // When transferring a NULL object, we don't write it into
2454 // the object list, so we don't want to check for it when
2455 // reading.
2456 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2457 return obj;
2458 }
2459
2460 // Ensure that this object is valid...
2461 binder_size_t* const OBJS = kernelFields->mObjects;
2462 const size_t N = kernelFields->mObjectsSize;
2463 size_t opos = kernelFields->mNextObjectHint;
2464
2465 if (N > 0) {
2466 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2467 this, DPOS, opos);
2468
2469 // Start at the current hint position, looking for an object at
2470 // the current data position.
2471 if (opos < N) {
2472 while (opos < (N-1) && OBJS[opos] < DPOS) {
2473 opos++;
2474 }
2475 } else {
2476 opos = N-1;
2477 }
2478 if (OBJS[opos] == DPOS) {
2479 // Found it!
2480 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2481 this, DPOS, opos);
2482 kernelFields->mNextObjectHint = opos + 1;
2483 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2484 return obj;
2485 }
2486
2487 // Look backwards for it...
2488 while (opos > 0 && OBJS[opos] > DPOS) {
2489 opos--;
2490 }
2491 if (OBJS[opos] == DPOS) {
2492 // Found it!
2493 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2494 this, DPOS, opos);
2495 kernelFields->mNextObjectHint = opos + 1;
2496 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2497 return obj;
2498 }
2499 }
2500 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2501 this, DPOS);
2502 }
2503 return nullptr;
2504 }
2505 #endif // BINDER_WITH_KERNEL_IPC
2506
closeFileDescriptors()2507 void Parcel::closeFileDescriptors() {
2508 if (auto* kernelFields = maybeKernelFields()) {
2509 #ifdef BINDER_WITH_KERNEL_IPC
2510 size_t i = kernelFields->mObjectsSize;
2511 if (i > 0) {
2512 // ALOGI("Closing file descriptors for %zu objects...", i);
2513 }
2514 while (i > 0) {
2515 i--;
2516 const flat_binder_object* flat =
2517 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
2518 if (flat->hdr.type == BINDER_TYPE_FD) {
2519 // ALOGI("Closing fd: %ld", flat->handle);
2520 close(flat->handle);
2521 }
2522 }
2523 #else // BINDER_WITH_KERNEL_IPC
2524 LOG_ALWAYS_FATAL("Binder kernel driver disabled at build time");
2525 #endif // BINDER_WITH_KERNEL_IPC
2526 } else if (auto* rpcFields = maybeRpcFields()) {
2527 rpcFields->mFds.reset();
2528 }
2529 }
2530
ipcData() const2531 uintptr_t Parcel::ipcData() const
2532 {
2533 return reinterpret_cast<uintptr_t>(mData);
2534 }
2535
ipcDataSize() const2536 size_t Parcel::ipcDataSize() const
2537 {
2538 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2539 }
2540
ipcObjects() const2541 uintptr_t Parcel::ipcObjects() const
2542 {
2543 if (const auto* kernelFields = maybeKernelFields()) {
2544 return reinterpret_cast<uintptr_t>(kernelFields->mObjects);
2545 }
2546 return 0;
2547 }
2548
ipcObjectsCount() const2549 size_t Parcel::ipcObjectsCount() const
2550 {
2551 if (const auto* kernelFields = maybeKernelFields()) {
2552 return kernelFields->mObjectsSize;
2553 }
2554 return 0;
2555 }
2556
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc)2557 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize, const binder_size_t* objects,
2558 size_t objectsCount, release_func relFunc) {
2559 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2560 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2561
2562 freeData();
2563
2564 auto* kernelFields = maybeKernelFields();
2565 LOG_ALWAYS_FATAL_IF(kernelFields == nullptr); // guaranteed by freeData.
2566
2567 mData = const_cast<uint8_t*>(data);
2568 mDataSize = mDataCapacity = dataSize;
2569 kernelFields->mObjects = const_cast<binder_size_t*>(objects);
2570 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsCount;
2571 mOwner = relFunc;
2572
2573 #ifdef BINDER_WITH_KERNEL_IPC
2574 binder_size_t minOffset = 0;
2575 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
2576 binder_size_t offset = kernelFields->mObjects[i];
2577 if (offset < minOffset) {
2578 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2579 __func__, (uint64_t)offset, (uint64_t)minOffset);
2580 kernelFields->mObjectsSize = 0;
2581 break;
2582 }
2583 const flat_binder_object* flat
2584 = reinterpret_cast<const flat_binder_object*>(mData + offset);
2585 uint32_t type = flat->hdr.type;
2586 if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2587 type == BINDER_TYPE_FD)) {
2588 // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2589 // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2590 // recover gracefully by clearing out the objects.
2591 android_errorWriteLog(0x534e4554, "135930648");
2592 android_errorWriteLog(0x534e4554, "203847542");
2593 ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2594 __func__, type, (uint64_t)offset);
2595
2596 // WARNING: callers of ipcSetDataReference need to make sure they
2597 // don't rely on mObjectsSize in their release_func.
2598 kernelFields->mObjectsSize = 0;
2599 break;
2600 }
2601 minOffset = offset + sizeof(flat_binder_object);
2602 }
2603 scanForFds();
2604 #else // BINDER_WITH_KERNEL_IPC
2605 LOG_ALWAYS_FATAL_IF(objectsCount != 0,
2606 "Non-zero objects count passed to Parcel with kernel driver disabled");
2607 #endif // BINDER_WITH_KERNEL_IPC
2608 }
2609
rpcSetDataReference(const sp<RpcSession> & session,const uint8_t * data,size_t dataSize,const uint32_t * objectTable,size_t objectTableSize,std::vector<std::variant<base::unique_fd,base::borrowed_fd>> && ancillaryFds,release_func relFunc)2610 status_t Parcel::rpcSetDataReference(
2611 const sp<RpcSession>& session, const uint8_t* data, size_t dataSize,
2612 const uint32_t* objectTable, size_t objectTableSize,
2613 std::vector<std::variant<base::unique_fd, base::borrowed_fd>>&& ancillaryFds,
2614 release_func relFunc) {
2615 // this code uses 'mOwner == nullptr' to understand whether it owns memory
2616 LOG_ALWAYS_FATAL_IF(relFunc == nullptr, "must provide cleanup function");
2617
2618 LOG_ALWAYS_FATAL_IF(session == nullptr);
2619
2620 if (objectTableSize != ancillaryFds.size()) {
2621 ALOGE("objectTableSize=%zu ancillaryFds.size=%zu", objectTableSize, ancillaryFds.size());
2622 relFunc(data, dataSize, nullptr, 0);
2623 return BAD_VALUE;
2624 }
2625 for (size_t i = 0; i < objectTableSize; i++) {
2626 uint32_t minObjectEnd;
2627 if (__builtin_add_overflow(objectTable[i], sizeof(RpcFields::ObjectType), &minObjectEnd) ||
2628 minObjectEnd >= dataSize) {
2629 ALOGE("received out of range object position: %" PRIu32 " (parcel size is %zu)",
2630 objectTable[i], dataSize);
2631 relFunc(data, dataSize, nullptr, 0);
2632 return BAD_VALUE;
2633 }
2634 }
2635
2636 freeData();
2637 markForRpc(session);
2638
2639 auto* rpcFields = maybeRpcFields();
2640 LOG_ALWAYS_FATAL_IF(rpcFields == nullptr); // guaranteed by markForRpc.
2641
2642 mData = const_cast<uint8_t*>(data);
2643 mDataSize = mDataCapacity = dataSize;
2644 mOwner = relFunc;
2645
2646 rpcFields->mObjectPositions.reserve(objectTableSize);
2647 for (size_t i = 0; i < objectTableSize; i++) {
2648 rpcFields->mObjectPositions.push_back(objectTable[i]);
2649 }
2650 if (!ancillaryFds.empty()) {
2651 rpcFields->mFds = std::make_unique<decltype(rpcFields->mFds)::element_type>();
2652 *rpcFields->mFds = std::move(ancillaryFds);
2653 }
2654
2655 return OK;
2656 }
2657
print(std::ostream & to,uint32_t) const2658 void Parcel::print(std::ostream& to, uint32_t /*flags*/) const {
2659 to << "Parcel(";
2660
2661 if (errorCheck() != NO_ERROR) {
2662 const status_t err = errorCheck();
2663 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2664 } else if (dataSize() > 0) {
2665 const uint8_t* DATA = data();
2666 to << "\t" << HexDump(DATA, dataSize());
2667 #ifdef BINDER_WITH_KERNEL_IPC
2668 if (const auto* kernelFields = maybeKernelFields()) {
2669 const binder_size_t* OBJS = kernelFields->mObjects;
2670 const size_t N = objectsCount();
2671 for (size_t i = 0; i < N; i++) {
2672 const flat_binder_object* flat =
2673 reinterpret_cast<const flat_binder_object*>(DATA + OBJS[i]);
2674 to << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2675 << TypeCode(flat->hdr.type & 0x7f7f7f00) << " = " << flat->binder;
2676 }
2677 }
2678 #endif // BINDER_WITH_KERNEL_IPC
2679 } else {
2680 to << "NULL";
2681 }
2682
2683 to << ")";
2684 }
2685
releaseObjects()2686 void Parcel::releaseObjects()
2687 {
2688 auto* kernelFields = maybeKernelFields();
2689 if (kernelFields == nullptr) {
2690 return;
2691 }
2692
2693 #ifdef BINDER_WITH_KERNEL_IPC
2694 size_t i = kernelFields->mObjectsSize;
2695 if (i == 0) {
2696 return;
2697 }
2698 sp<ProcessState> proc(ProcessState::self());
2699 uint8_t* const data = mData;
2700 binder_size_t* const objects = kernelFields->mObjects;
2701 while (i > 0) {
2702 i--;
2703 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2704 release_object(proc, *flat, this);
2705 }
2706 #endif // BINDER_WITH_KERNEL_IPC
2707 }
2708
acquireObjects()2709 void Parcel::acquireObjects()
2710 {
2711 auto* kernelFields = maybeKernelFields();
2712 if (kernelFields == nullptr) {
2713 return;
2714 }
2715
2716 #ifdef BINDER_WITH_KERNEL_IPC
2717 size_t i = kernelFields->mObjectsSize;
2718 if (i == 0) {
2719 return;
2720 }
2721 const sp<ProcessState> proc(ProcessState::self());
2722 uint8_t* const data = mData;
2723 binder_size_t* const objects = kernelFields->mObjects;
2724 while (i > 0) {
2725 i--;
2726 const flat_binder_object* flat = reinterpret_cast<flat_binder_object*>(data + objects[i]);
2727 acquire_object(proc, *flat, this);
2728 }
2729 #endif // BINDER_WITH_KERNEL_IPC
2730 }
2731
freeData()2732 void Parcel::freeData()
2733 {
2734 freeDataNoInit();
2735 initState();
2736 }
2737
freeDataNoInit()2738 void Parcel::freeDataNoInit()
2739 {
2740 if (mOwner) {
2741 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2742 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2743 auto* kernelFields = maybeKernelFields();
2744 // Close FDs before freeing, otherwise they will leak for kernel binder.
2745 closeFileDescriptors();
2746 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
2747 kernelFields ? kernelFields->mObjectsSize : 0);
2748 } else {
2749 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2750 releaseObjects();
2751 if (mData) {
2752 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2753 gParcelGlobalAllocSize -= mDataCapacity;
2754 gParcelGlobalAllocCount--;
2755 if (mDeallocZero) {
2756 zeroMemory(mData, mDataSize);
2757 }
2758 free(mData);
2759 }
2760 auto* kernelFields = maybeKernelFields();
2761 if (kernelFields && kernelFields->mObjects) free(kernelFields->mObjects);
2762 }
2763 }
2764
growData(size_t len)2765 status_t Parcel::growData(size_t len)
2766 {
2767 if (len > INT32_MAX) {
2768 // don't accept size_t values which may have come from an
2769 // inadvertent conversion from a negative int.
2770 return BAD_VALUE;
2771 }
2772
2773 if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2774 if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2775 size_t newSize = ((mDataSize+len)*3)/2;
2776 return (newSize <= mDataSize)
2777 ? (status_t) NO_MEMORY
2778 : continueWrite(std::max(newSize, (size_t) 128));
2779 }
2780
reallocZeroFree(uint8_t * data,size_t oldCapacity,size_t newCapacity,bool zero)2781 static uint8_t* reallocZeroFree(uint8_t* data, size_t oldCapacity, size_t newCapacity, bool zero) {
2782 if (!zero) {
2783 return (uint8_t*)realloc(data, newCapacity);
2784 }
2785 uint8_t* newData = (uint8_t*)malloc(newCapacity);
2786 if (!newData) {
2787 return nullptr;
2788 }
2789
2790 memcpy(newData, data, std::min(oldCapacity, newCapacity));
2791 zeroMemory(data, oldCapacity);
2792 free(data);
2793 return newData;
2794 }
2795
restartWrite(size_t desired)2796 status_t Parcel::restartWrite(size_t desired)
2797 {
2798 if (desired > INT32_MAX) {
2799 // don't accept size_t values which may have come from an
2800 // inadvertent conversion from a negative int.
2801 return BAD_VALUE;
2802 }
2803
2804 if (mOwner) {
2805 freeData();
2806 return continueWrite(desired);
2807 }
2808
2809 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
2810 if (!data && desired > mDataCapacity) {
2811 mError = NO_MEMORY;
2812 return NO_MEMORY;
2813 }
2814
2815 releaseObjects();
2816
2817 if (data || desired == 0) {
2818 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2819 if (mDataCapacity > desired) {
2820 gParcelGlobalAllocSize -= (mDataCapacity - desired);
2821 } else {
2822 gParcelGlobalAllocSize += (desired - mDataCapacity);
2823 }
2824
2825 if (!mData) {
2826 gParcelGlobalAllocCount++;
2827 }
2828 mData = data;
2829 mDataCapacity = desired;
2830 }
2831
2832 mDataSize = mDataPos = 0;
2833 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2834 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2835
2836 if (auto* kernelFields = maybeKernelFields()) {
2837 free(kernelFields->mObjects);
2838 kernelFields->mObjects = nullptr;
2839 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = 0;
2840 kernelFields->mNextObjectHint = 0;
2841 kernelFields->mObjectsSorted = false;
2842 kernelFields->mHasFds = false;
2843 kernelFields->mFdsKnown = true;
2844 } else if (auto* rpcFields = maybeRpcFields()) {
2845 rpcFields->mObjectPositions.clear();
2846 rpcFields->mFds.reset();
2847 }
2848 mAllowFds = true;
2849
2850 return NO_ERROR;
2851 }
2852
continueWrite(size_t desired)2853 status_t Parcel::continueWrite(size_t desired)
2854 {
2855 if (desired > INT32_MAX) {
2856 // don't accept size_t values which may have come from an
2857 // inadvertent conversion from a negative int.
2858 return BAD_VALUE;
2859 }
2860
2861 auto* kernelFields = maybeKernelFields();
2862 auto* rpcFields = maybeRpcFields();
2863
2864 // If shrinking, first adjust for any objects that appear
2865 // after the new data size.
2866 size_t objectsSize =
2867 kernelFields ? kernelFields->mObjectsSize : rpcFields->mObjectPositions.size();
2868 if (desired < mDataSize) {
2869 if (desired == 0) {
2870 objectsSize = 0;
2871 } else {
2872 if (kernelFields) {
2873 while (objectsSize > 0) {
2874 if (kernelFields->mObjects[objectsSize - 1] < desired) break;
2875 objectsSize--;
2876 }
2877 } else {
2878 while (objectsSize > 0) {
2879 if (rpcFields->mObjectPositions[objectsSize - 1] < desired) break;
2880 objectsSize--;
2881 }
2882 }
2883 }
2884 }
2885
2886 if (mOwner) {
2887 // If the size is going to zero, just release the owner's data.
2888 if (desired == 0) {
2889 freeData();
2890 return NO_ERROR;
2891 }
2892
2893 // If there is a different owner, we need to take
2894 // posession.
2895 uint8_t* data = (uint8_t*)malloc(desired);
2896 if (!data) {
2897 mError = NO_MEMORY;
2898 return NO_MEMORY;
2899 }
2900 binder_size_t* objects = nullptr;
2901
2902 if (kernelFields && objectsSize) {
2903 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2904 if (!objects) {
2905 free(data);
2906
2907 mError = NO_MEMORY;
2908 return NO_MEMORY;
2909 }
2910
2911 // Little hack to only acquire references on objects
2912 // we will be keeping.
2913 size_t oldObjectsSize = kernelFields->mObjectsSize;
2914 kernelFields->mObjectsSize = objectsSize;
2915 acquireObjects();
2916 kernelFields->mObjectsSize = oldObjectsSize;
2917 }
2918 if (rpcFields) {
2919 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
2920 free(data);
2921 return status;
2922 }
2923 }
2924
2925 if (mData) {
2926 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2927 }
2928 if (objects && kernelFields && kernelFields->mObjects) {
2929 memcpy(objects, kernelFields->mObjects, objectsSize * sizeof(binder_size_t));
2930 }
2931 // ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2932 if (kernelFields) {
2933 // TODO(b/239222407): This seems wrong. We should only free FDs when
2934 // they are in a truncated section of the parcel.
2935 closeFileDescriptors();
2936 }
2937 mOwner(mData, mDataSize, kernelFields ? kernelFields->mObjects : nullptr,
2938 kernelFields ? kernelFields->mObjectsSize : 0);
2939 mOwner = nullptr;
2940
2941 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2942 gParcelGlobalAllocSize += desired;
2943 gParcelGlobalAllocCount++;
2944
2945 mData = data;
2946 mDataSize = (mDataSize < desired) ? mDataSize : desired;
2947 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2948 mDataCapacity = desired;
2949 if (kernelFields) {
2950 kernelFields->mObjects = objects;
2951 kernelFields->mObjectsSize = kernelFields->mObjectsCapacity = objectsSize;
2952 kernelFields->mNextObjectHint = 0;
2953 kernelFields->mObjectsSorted = false;
2954 }
2955
2956 } else if (mData) {
2957 if (kernelFields && objectsSize < kernelFields->mObjectsSize) {
2958 #ifdef BINDER_WITH_KERNEL_IPC
2959 // Need to release refs on any objects we are dropping.
2960 const sp<ProcessState> proc(ProcessState::self());
2961 for (size_t i = objectsSize; i < kernelFields->mObjectsSize; i++) {
2962 const flat_binder_object* flat =
2963 reinterpret_cast<flat_binder_object*>(mData + kernelFields->mObjects[i]);
2964 if (flat->hdr.type == BINDER_TYPE_FD) {
2965 // will need to rescan because we may have lopped off the only FDs
2966 kernelFields->mFdsKnown = false;
2967 }
2968 release_object(proc, *flat, this);
2969 }
2970
2971 if (objectsSize == 0) {
2972 free(kernelFields->mObjects);
2973 kernelFields->mObjects = nullptr;
2974 kernelFields->mObjectsCapacity = 0;
2975 } else {
2976 binder_size_t* objects =
2977 (binder_size_t*)realloc(kernelFields->mObjects,
2978 objectsSize * sizeof(binder_size_t));
2979 if (objects) {
2980 kernelFields->mObjects = objects;
2981 kernelFields->mObjectsCapacity = objectsSize;
2982 }
2983 }
2984 kernelFields->mObjectsSize = objectsSize;
2985 kernelFields->mNextObjectHint = 0;
2986 kernelFields->mObjectsSorted = false;
2987 #else // BINDER_WITH_KERNEL_IPC
2988 LOG_ALWAYS_FATAL("Non-zero numObjects for RPC Parcel");
2989 #endif // BINDER_WITH_KERNEL_IPC
2990 }
2991 if (rpcFields) {
2992 if (status_t status = truncateRpcObjects(objectsSize); status != OK) {
2993 return status;
2994 }
2995 }
2996
2997 // We own the data, so we can just do a realloc().
2998 if (desired > mDataCapacity) {
2999 uint8_t* data = reallocZeroFree(mData, mDataCapacity, desired, mDeallocZero);
3000 if (data) {
3001 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
3002 desired);
3003 gParcelGlobalAllocSize += desired;
3004 gParcelGlobalAllocSize -= mDataCapacity;
3005 mData = data;
3006 mDataCapacity = desired;
3007 } else {
3008 mError = NO_MEMORY;
3009 return NO_MEMORY;
3010 }
3011 } else {
3012 if (mDataSize > desired) {
3013 mDataSize = desired;
3014 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3015 }
3016 if (mDataPos > desired) {
3017 mDataPos = desired;
3018 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3019 }
3020 }
3021
3022 } else {
3023 // This is the first data. Easy!
3024 uint8_t* data = (uint8_t*)malloc(desired);
3025 if (!data) {
3026 mError = NO_MEMORY;
3027 return NO_MEMORY;
3028 }
3029
3030 if (!(mDataCapacity == 0 &&
3031 (kernelFields == nullptr ||
3032 (kernelFields->mObjects == nullptr && kernelFields->mObjectsCapacity == 0)))) {
3033 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity,
3034 kernelFields ? kernelFields->mObjects : nullptr,
3035 kernelFields ? kernelFields->mObjectsCapacity : 0, desired);
3036 }
3037
3038 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
3039 gParcelGlobalAllocSize += desired;
3040 gParcelGlobalAllocCount++;
3041
3042 mData = data;
3043 mDataSize = mDataPos = 0;
3044 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
3045 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
3046 mDataCapacity = desired;
3047 }
3048
3049 return NO_ERROR;
3050 }
3051
truncateRpcObjects(size_t newObjectsSize)3052 status_t Parcel::truncateRpcObjects(size_t newObjectsSize) {
3053 auto* rpcFields = maybeRpcFields();
3054 if (newObjectsSize == 0) {
3055 rpcFields->mObjectPositions.clear();
3056 if (rpcFields->mFds) {
3057 rpcFields->mFds->clear();
3058 }
3059 return OK;
3060 }
3061 while (rpcFields->mObjectPositions.size() > newObjectsSize) {
3062 uint32_t pos = rpcFields->mObjectPositions.back();
3063 rpcFields->mObjectPositions.pop_back();
3064 const auto type = *reinterpret_cast<const RpcFields::ObjectType*>(mData + pos);
3065 if (type == RpcFields::TYPE_NATIVE_FILE_DESCRIPTOR) {
3066 const auto fdIndex =
3067 *reinterpret_cast<const int32_t*>(mData + pos + sizeof(RpcFields::ObjectType));
3068 if (rpcFields->mFds == nullptr || fdIndex < 0 ||
3069 static_cast<size_t>(fdIndex) >= rpcFields->mFds->size()) {
3070 ALOGE("RPC Parcel contains invalid file descriptor index. index=%d fd_count=%zu",
3071 fdIndex, rpcFields->mFds ? rpcFields->mFds->size() : 0);
3072 return BAD_VALUE;
3073 }
3074 // In practice, this always removes the last element.
3075 rpcFields->mFds->erase(rpcFields->mFds->begin() + fdIndex);
3076 }
3077 }
3078 return OK;
3079 }
3080
initState()3081 void Parcel::initState()
3082 {
3083 LOG_ALLOC("Parcel %p: initState", this);
3084 mError = NO_ERROR;
3085 mData = nullptr;
3086 mDataSize = 0;
3087 mDataCapacity = 0;
3088 mDataPos = 0;
3089 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
3090 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
3091 mVariantFields.emplace<KernelFields>();
3092 mAllowFds = true;
3093 mDeallocZero = false;
3094 mOwner = nullptr;
3095 mEnforceNoDataAvail = true;
3096 }
3097
scanForFds() const3098 void Parcel::scanForFds() const {
3099 auto* kernelFields = maybeKernelFields();
3100 if (kernelFields == nullptr) {
3101 return;
3102 }
3103 status_t status = hasFileDescriptorsInRange(0, dataSize(), &kernelFields->mHasFds);
3104 ALOGE_IF(status != NO_ERROR, "Error %d calling hasFileDescriptorsInRange()", status);
3105 kernelFields->mFdsKnown = true;
3106 }
3107
3108 #ifdef BINDER_WITH_KERNEL_IPC
getBlobAshmemSize() const3109 size_t Parcel::getBlobAshmemSize() const
3110 {
3111 // This used to return the size of all blobs that were written to ashmem, now we're returning
3112 // the ashmem currently referenced by this Parcel, which should be equivalent.
3113 // TODO(b/202029388): Remove method once ABI can be changed.
3114 return getOpenAshmemSize();
3115 }
3116
getOpenAshmemSize() const3117 size_t Parcel::getOpenAshmemSize() const
3118 {
3119 auto* kernelFields = maybeKernelFields();
3120 if (kernelFields == nullptr) {
3121 return 0;
3122 }
3123
3124 size_t openAshmemSize = 0;
3125 for (size_t i = 0; i < kernelFields->mObjectsSize; i++) {
3126 const flat_binder_object* flat =
3127 reinterpret_cast<const flat_binder_object*>(mData + kernelFields->mObjects[i]);
3128
3129 // cookie is compared against zero for historical reasons
3130 // > obj.cookie = takeOwnership ? 1 : 0;
3131 if (flat->hdr.type == BINDER_TYPE_FD && flat->cookie != 0 && ashmem_valid(flat->handle)) {
3132 int size = ashmem_get_size_region(flat->handle);
3133 if (__builtin_add_overflow(openAshmemSize, size, &openAshmemSize)) {
3134 ALOGE("Overflow when computing ashmem size.");
3135 return SIZE_MAX;
3136 }
3137 }
3138 }
3139 return openAshmemSize;
3140 }
3141 #endif // BINDER_WITH_KERNEL_IPC
3142
3143 // --- Parcel::Blob ---
3144
Blob()3145 Parcel::Blob::Blob() :
3146 mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
3147 }
3148
~Blob()3149 Parcel::Blob::~Blob() {
3150 release();
3151 }
3152
release()3153 void Parcel::Blob::release() {
3154 if (mFd != -1 && mData) {
3155 ::munmap(mData, mSize);
3156 }
3157 clear();
3158 }
3159
init(int fd,void * data,size_t size,bool isMutable)3160 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
3161 mFd = fd;
3162 mData = data;
3163 mSize = size;
3164 mMutable = isMutable;
3165 }
3166
clear()3167 void Parcel::Blob::clear() {
3168 mFd = -1;
3169 mData = nullptr;
3170 mSize = 0;
3171 mMutable = false;
3172 }
3173
3174 } // namespace android
3175