• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2005 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19 
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <linux/sched.h>
24 #include <pthread.h>
25 #include <stdint.h>
26 #include <stdio.h>
27 #include <stdlib.h>
28 #include <sys/mman.h>
29 #include <sys/stat.h>
30 #include <sys/types.h>
31 #include <sys/resource.h>
32 #include <unistd.h>
33 
34 #include <binder/Binder.h>
35 #include <binder/BpBinder.h>
36 #include <binder/IPCThreadState.h>
37 #include <binder/Parcel.h>
38 #include <binder/ProcessState.h>
39 #include <binder/Stability.h>
40 #include <binder/Status.h>
41 #include <binder/TextOutput.h>
42 
43 #include <cutils/ashmem.h>
44 #include <utils/Debug.h>
45 #include <utils/Flattenable.h>
46 #include <utils/Log.h>
47 #include <utils/misc.h>
48 #include <utils/String8.h>
49 #include <utils/String16.h>
50 
51 #include <private/binder/binder_module.h>
52 #include "Static.h"
53 
54 #define LOG_REFS(...)
55 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
56 #define LOG_ALLOC(...)
57 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
58 
59 // ---------------------------------------------------------------------------
60 
61 // This macro should never be used at runtime, as a too large value
62 // of s could cause an integer overflow. Instead, you should always
63 // use the wrapper function pad_size()
64 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
65 
pad_size(size_t s)66 static size_t pad_size(size_t s) {
67     if (s > (std::numeric_limits<size_t>::max() - 3)) {
68         LOG_ALWAYS_FATAL("pad size too big %zu", s);
69     }
70     return PAD_SIZE_UNSAFE(s);
71 }
72 
73 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
74 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
75 
76 namespace android {
77 
78 // many things compile this into prebuilts on the stack
79 static_assert(sizeof(Parcel) == 60 || sizeof(Parcel) == 120);
80 
81 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
82 static size_t gParcelGlobalAllocSize = 0;
83 static size_t gParcelGlobalAllocCount = 0;
84 
85 static size_t gMaxFds = 0;
86 
87 // Maximum size of a blob to transfer in-place.
88 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
89 
90 enum {
91     BLOB_INPLACE = 0,
92     BLOB_ASHMEM_IMMUTABLE = 1,
93     BLOB_ASHMEM_MUTABLE = 2,
94 };
95 
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)96 static void acquire_object(const sp<ProcessState>& proc,
97     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
98 {
99     switch (obj.hdr.type) {
100         case BINDER_TYPE_BINDER:
101             if (obj.binder) {
102                 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
103                 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
104             }
105             return;
106         case BINDER_TYPE_HANDLE: {
107             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
108             if (b != nullptr) {
109                 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
110                 b->incStrong(who);
111             }
112             return;
113         }
114         case BINDER_TYPE_FD: {
115             if ((obj.cookie != 0) && (outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
116                 // If we own an ashmem fd, keep track of how much memory it refers to.
117                 int size = ashmem_get_size_region(obj.handle);
118                 if (size > 0) {
119                     *outAshmemSize += size;
120                 }
121             }
122             return;
123         }
124     }
125 
126     ALOGD("Invalid object type 0x%08x", obj.hdr.type);
127 }
128 
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)129 static void release_object(const sp<ProcessState>& proc,
130     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
131 {
132     switch (obj.hdr.type) {
133         case BINDER_TYPE_BINDER:
134             if (obj.binder) {
135                 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
136                 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
137             }
138             return;
139         case BINDER_TYPE_HANDLE: {
140             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
141             if (b != nullptr) {
142                 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
143                 b->decStrong(who);
144             }
145             return;
146         }
147         case BINDER_TYPE_FD: {
148             if (obj.cookie != 0) { // owned
149                 if ((outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
150                     int size = ashmem_get_size_region(obj.handle);
151                     if (size > 0) {
152                         // ashmem size might have changed since last time it was accounted for, e.g.
153                         // in acquire_object(). Value of *outAshmemSize is not critical since we are
154                         // releasing the object anyway. Check for integer overflow condition.
155                         *outAshmemSize -= std::min(*outAshmemSize, static_cast<size_t>(size));
156                     }
157                 }
158 
159                 close(obj.handle);
160             }
161             return;
162         }
163     }
164 
165     ALOGE("Invalid object type 0x%08x", obj.hdr.type);
166 }
167 
finishFlattenBinder(const sp<IBinder> & binder,const flat_binder_object & flat)168 status_t Parcel::finishFlattenBinder(
169     const sp<IBinder>& binder, const flat_binder_object& flat)
170 {
171     status_t status = writeObject(flat, false);
172     if (status != OK) return status;
173 
174     internal::Stability::tryMarkCompilationUnit(binder.get());
175     return writeInt32(internal::Stability::get(binder.get()));
176 }
177 
finishUnflattenBinder(const sp<IBinder> & binder,sp<IBinder> * out) const178 status_t Parcel::finishUnflattenBinder(
179     const sp<IBinder>& binder, sp<IBinder>* out) const
180 {
181     int32_t stability;
182     status_t status = readInt32(&stability);
183     if (status != OK) return status;
184 
185     status = internal::Stability::set(binder.get(), stability, true /*log*/);
186     if (status != OK) return status;
187 
188     *out = binder;
189     return OK;
190 }
191 
schedPolicyMask(int policy,int priority)192 static constexpr inline int schedPolicyMask(int policy, int priority) {
193     return (priority & FLAT_BINDER_FLAG_PRIORITY_MASK) | ((policy & 3) << FLAT_BINDER_FLAG_SCHED_POLICY_SHIFT);
194 }
195 
flattenBinder(const sp<IBinder> & binder)196 status_t Parcel::flattenBinder(const sp<IBinder>& binder)
197 {
198     flat_binder_object obj;
199     obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
200 
201     int schedBits = 0;
202     if (!IPCThreadState::self()->backgroundSchedulingDisabled()) {
203         schedBits = schedPolicyMask(SCHED_NORMAL, 19);
204     }
205 
206     if (binder != nullptr) {
207         BBinder *local = binder->localBinder();
208         if (!local) {
209             BpBinder *proxy = binder->remoteBinder();
210             if (proxy == nullptr) {
211                 ALOGE("null proxy");
212             }
213             const int32_t handle = proxy ? proxy->handle() : 0;
214             obj.hdr.type = BINDER_TYPE_HANDLE;
215             obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
216             obj.handle = handle;
217             obj.cookie = 0;
218         } else {
219             int policy = local->getMinSchedulerPolicy();
220             int priority = local->getMinSchedulerPriority();
221 
222             if (policy != 0 || priority != 0) {
223                 // override value, since it is set explicitly
224                 schedBits = schedPolicyMask(policy, priority);
225             }
226             if (local->isRequestingSid()) {
227                 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
228             }
229             obj.hdr.type = BINDER_TYPE_BINDER;
230             obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
231             obj.cookie = reinterpret_cast<uintptr_t>(local);
232         }
233     } else {
234         obj.hdr.type = BINDER_TYPE_BINDER;
235         obj.binder = 0;
236         obj.cookie = 0;
237     }
238 
239     obj.flags |= schedBits;
240 
241     return finishFlattenBinder(binder, obj);
242 }
243 
unflattenBinder(sp<IBinder> * out) const244 status_t Parcel::unflattenBinder(sp<IBinder>* out) const
245 {
246     const flat_binder_object* flat = readObject(false);
247 
248     if (flat) {
249         switch (flat->hdr.type) {
250             case BINDER_TYPE_BINDER: {
251                 sp<IBinder> binder = reinterpret_cast<IBinder*>(flat->cookie);
252                 return finishUnflattenBinder(binder, out);
253             }
254             case BINDER_TYPE_HANDLE: {
255                 sp<IBinder> binder =
256                     ProcessState::self()->getStrongProxyForHandle(flat->handle);
257                 return finishUnflattenBinder(binder, out);
258             }
259         }
260     }
261     return BAD_TYPE;
262 }
263 
264 // ---------------------------------------------------------------------------
265 
Parcel()266 Parcel::Parcel()
267 {
268     LOG_ALLOC("Parcel %p: constructing", this);
269     initState();
270 }
271 
~Parcel()272 Parcel::~Parcel()
273 {
274     freeDataNoInit();
275     LOG_ALLOC("Parcel %p: destroyed", this);
276 }
277 
getGlobalAllocSize()278 size_t Parcel::getGlobalAllocSize() {
279     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
280     size_t size = gParcelGlobalAllocSize;
281     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
282     return size;
283 }
284 
getGlobalAllocCount()285 size_t Parcel::getGlobalAllocCount() {
286     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
287     size_t count = gParcelGlobalAllocCount;
288     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
289     return count;
290 }
291 
data() const292 const uint8_t* Parcel::data() const
293 {
294     return mData;
295 }
296 
dataSize() const297 size_t Parcel::dataSize() const
298 {
299     return (mDataSize > mDataPos ? mDataSize : mDataPos);
300 }
301 
dataAvail() const302 size_t Parcel::dataAvail() const
303 {
304     size_t result = dataSize() - dataPosition();
305     if (result > INT32_MAX) {
306         LOG_ALWAYS_FATAL("result too big: %zu", result);
307     }
308     return result;
309 }
310 
dataPosition() const311 size_t Parcel::dataPosition() const
312 {
313     return mDataPos;
314 }
315 
dataCapacity() const316 size_t Parcel::dataCapacity() const
317 {
318     return mDataCapacity;
319 }
320 
setDataSize(size_t size)321 status_t Parcel::setDataSize(size_t size)
322 {
323     if (size > INT32_MAX) {
324         // don't accept size_t values which may have come from an
325         // inadvertent conversion from a negative int.
326         return BAD_VALUE;
327     }
328 
329     status_t err;
330     err = continueWrite(size);
331     if (err == NO_ERROR) {
332         mDataSize = size;
333         ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
334     }
335     return err;
336 }
337 
setDataPosition(size_t pos) const338 void Parcel::setDataPosition(size_t pos) const
339 {
340     if (pos > INT32_MAX) {
341         // don't accept size_t values which may have come from an
342         // inadvertent conversion from a negative int.
343         LOG_ALWAYS_FATAL("pos too big: %zu", pos);
344     }
345 
346     mDataPos = pos;
347     mNextObjectHint = 0;
348     mObjectsSorted = false;
349 }
350 
setDataCapacity(size_t size)351 status_t Parcel::setDataCapacity(size_t size)
352 {
353     if (size > INT32_MAX) {
354         // don't accept size_t values which may have come from an
355         // inadvertent conversion from a negative int.
356         return BAD_VALUE;
357     }
358 
359     if (size > mDataCapacity) return continueWrite(size);
360     return NO_ERROR;
361 }
362 
setData(const uint8_t * buffer,size_t len)363 status_t Parcel::setData(const uint8_t* buffer, size_t len)
364 {
365     if (len > INT32_MAX) {
366         // don't accept size_t values which may have come from an
367         // inadvertent conversion from a negative int.
368         return BAD_VALUE;
369     }
370 
371     status_t err = restartWrite(len);
372     if (err == NO_ERROR) {
373         memcpy(const_cast<uint8_t*>(data()), buffer, len);
374         mDataSize = len;
375         mFdsKnown = false;
376     }
377     return err;
378 }
379 
appendFrom(const Parcel * parcel,size_t offset,size_t len)380 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
381 {
382     status_t err;
383     const uint8_t *data = parcel->mData;
384     const binder_size_t *objects = parcel->mObjects;
385     size_t size = parcel->mObjectsSize;
386     int startPos = mDataPos;
387     int firstIndex = -1, lastIndex = -2;
388 
389     if (len == 0) {
390         return NO_ERROR;
391     }
392 
393     if (len > INT32_MAX) {
394         // don't accept size_t values which may have come from an
395         // inadvertent conversion from a negative int.
396         return BAD_VALUE;
397     }
398 
399     // range checks against the source parcel size
400     if ((offset > parcel->mDataSize)
401             || (len > parcel->mDataSize)
402             || (offset + len > parcel->mDataSize)) {
403         return BAD_VALUE;
404     }
405 
406     // Count objects in range
407     for (int i = 0; i < (int) size; i++) {
408         size_t off = objects[i];
409         if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
410             if (firstIndex == -1) {
411                 firstIndex = i;
412             }
413             lastIndex = i;
414         }
415     }
416     int numObjects = lastIndex - firstIndex + 1;
417 
418     if ((mDataSize+len) > mDataCapacity) {
419         // grow data
420         err = growData(len);
421         if (err != NO_ERROR) {
422             return err;
423         }
424     }
425 
426     // append data
427     memcpy(mData + mDataPos, data + offset, len);
428     mDataPos += len;
429     mDataSize += len;
430 
431     err = NO_ERROR;
432 
433     if (numObjects > 0) {
434         const sp<ProcessState> proc(ProcessState::self());
435         // grow objects
436         if (mObjectsCapacity < mObjectsSize + numObjects) {
437             if ((size_t) numObjects > SIZE_MAX - mObjectsSize) return NO_MEMORY; // overflow
438             if (mObjectsSize + numObjects > SIZE_MAX / 3) return NO_MEMORY; // overflow
439             size_t newSize = ((mObjectsSize + numObjects)*3)/2;
440             if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
441             binder_size_t *objects =
442                 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
443             if (objects == (binder_size_t*)nullptr) {
444                 return NO_MEMORY;
445             }
446             mObjects = objects;
447             mObjectsCapacity = newSize;
448         }
449 
450         // append and acquire objects
451         int idx = mObjectsSize;
452         for (int i = firstIndex; i <= lastIndex; i++) {
453             size_t off = objects[i] - offset + startPos;
454             mObjects[idx++] = off;
455             mObjectsSize++;
456 
457             flat_binder_object* flat
458                 = reinterpret_cast<flat_binder_object*>(mData + off);
459             acquire_object(proc, *flat, this, &mOpenAshmemSize);
460 
461             if (flat->hdr.type == BINDER_TYPE_FD) {
462                 // If this is a file descriptor, we need to dup it so the
463                 // new Parcel now owns its own fd, and can declare that we
464                 // officially know we have fds.
465                 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
466                 flat->cookie = 1;
467                 mHasFds = mFdsKnown = true;
468                 if (!mAllowFds) {
469                     err = FDS_NOT_ALLOWED;
470                 }
471             }
472         }
473     }
474 
475     return err;
476 }
477 
compareData(const Parcel & other)478 int Parcel::compareData(const Parcel& other) {
479     size_t size = dataSize();
480     if (size != other.dataSize()) {
481         return size < other.dataSize() ? -1 : 1;
482     }
483     return memcmp(data(), other.data(), size);
484 }
485 
allowFds() const486 bool Parcel::allowFds() const
487 {
488     return mAllowFds;
489 }
490 
pushAllowFds(bool allowFds)491 bool Parcel::pushAllowFds(bool allowFds)
492 {
493     const bool origValue = mAllowFds;
494     if (!allowFds) {
495         mAllowFds = false;
496     }
497     return origValue;
498 }
499 
restoreAllowFds(bool lastValue)500 void Parcel::restoreAllowFds(bool lastValue)
501 {
502     mAllowFds = lastValue;
503 }
504 
hasFileDescriptors() const505 bool Parcel::hasFileDescriptors() const
506 {
507     if (!mFdsKnown) {
508         scanForFds();
509     }
510     return mHasFds;
511 }
512 
updateWorkSourceRequestHeaderPosition() const513 void Parcel::updateWorkSourceRequestHeaderPosition() const {
514     // Only update the request headers once. We only want to point
515     // to the first headers read/written.
516     if (!mRequestHeaderPresent) {
517         mWorkSourceRequestHeaderPosition = dataPosition();
518         mRequestHeaderPresent = true;
519     }
520 }
521 
522 #if defined(__ANDROID_VNDK__) && !defined(__ANDROID_APEX__)
523 constexpr int32_t kHeader = B_PACK_CHARS('V', 'N', 'D', 'R');
524 #else
525 constexpr int32_t kHeader = B_PACK_CHARS('S', 'Y', 'S', 'T');
526 #endif
527 
528 // Write RPC headers.  (previously just the interface token)
writeInterfaceToken(const String16 & interface)529 status_t Parcel::writeInterfaceToken(const String16& interface)
530 {
531     const IPCThreadState* threadState = IPCThreadState::self();
532     writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
533     updateWorkSourceRequestHeaderPosition();
534     writeInt32(threadState->shouldPropagateWorkSource() ?
535             threadState->getCallingWorkSourceUid() : IPCThreadState::kUnsetWorkSource);
536     writeInt32(kHeader);
537     // currently the interface identification token is just its name as a string
538     return writeString16(interface);
539 }
540 
replaceCallingWorkSourceUid(uid_t uid)541 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
542 {
543     if (!mRequestHeaderPresent) {
544         return false;
545     }
546 
547     const size_t initialPosition = dataPosition();
548     setDataPosition(mWorkSourceRequestHeaderPosition);
549     status_t err = writeInt32(uid);
550     setDataPosition(initialPosition);
551     return err == NO_ERROR;
552 }
553 
readCallingWorkSourceUid() const554 uid_t Parcel::readCallingWorkSourceUid() const
555 {
556     if (!mRequestHeaderPresent) {
557         return IPCThreadState::kUnsetWorkSource;
558     }
559 
560     const size_t initialPosition = dataPosition();
561     setDataPosition(mWorkSourceRequestHeaderPosition);
562     uid_t uid = readInt32();
563     setDataPosition(initialPosition);
564     return uid;
565 }
566 
checkInterface(IBinder * binder) const567 bool Parcel::checkInterface(IBinder* binder) const
568 {
569     return enforceInterface(binder->getInterfaceDescriptor());
570 }
571 
enforceInterface(const String16 & interface,IPCThreadState * threadState) const572 bool Parcel::enforceInterface(const String16& interface,
573                               IPCThreadState* threadState) const
574 {
575     return enforceInterface(interface.string(), interface.size(), threadState);
576 }
577 
enforceInterface(const char16_t * interface,size_t len,IPCThreadState * threadState) const578 bool Parcel::enforceInterface(const char16_t* interface,
579                               size_t len,
580                               IPCThreadState* threadState) const
581 {
582     // StrictModePolicy.
583     int32_t strictPolicy = readInt32();
584     if (threadState == nullptr) {
585         threadState = IPCThreadState::self();
586     }
587     if ((threadState->getLastTransactionBinderFlags() &
588          IBinder::FLAG_ONEWAY) != 0) {
589       // For one-way calls, the callee is running entirely
590       // disconnected from the caller, so disable StrictMode entirely.
591       // Not only does disk/network usage not impact the caller, but
592       // there's no way to commuicate back any violations anyway.
593       threadState->setStrictModePolicy(0);
594     } else {
595       threadState->setStrictModePolicy(strictPolicy);
596     }
597     // WorkSource.
598     updateWorkSourceRequestHeaderPosition();
599     int32_t workSource = readInt32();
600     threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
601     // vendor header
602     int32_t header = readInt32();
603     if (header != kHeader) {
604         ALOGE("Expecting header 0x%x but found 0x%x. Mixing copies of libbinder?", kHeader, header);
605         return false;
606     }
607     // Interface descriptor.
608     size_t parcel_interface_len;
609     const char16_t* parcel_interface = readString16Inplace(&parcel_interface_len);
610     if (len == parcel_interface_len &&
611             (!len || !memcmp(parcel_interface, interface, len * sizeof (char16_t)))) {
612         return true;
613     } else {
614         ALOGW("**** enforceInterface() expected '%s' but read '%s'",
615               String8(interface, len).string(),
616               String8(parcel_interface, parcel_interface_len).string());
617         return false;
618     }
619 }
620 
objectsCount() const621 size_t Parcel::objectsCount() const
622 {
623     return mObjectsSize;
624 }
625 
errorCheck() const626 status_t Parcel::errorCheck() const
627 {
628     return mError;
629 }
630 
setError(status_t err)631 void Parcel::setError(status_t err)
632 {
633     mError = err;
634 }
635 
finishWrite(size_t len)636 status_t Parcel::finishWrite(size_t len)
637 {
638     if (len > INT32_MAX) {
639         // don't accept size_t values which may have come from an
640         // inadvertent conversion from a negative int.
641         return BAD_VALUE;
642     }
643 
644     //printf("Finish write of %d\n", len);
645     mDataPos += len;
646     ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
647     if (mDataPos > mDataSize) {
648         mDataSize = mDataPos;
649         ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
650     }
651     //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
652     return NO_ERROR;
653 }
654 
writeUnpadded(const void * data,size_t len)655 status_t Parcel::writeUnpadded(const void* data, size_t len)
656 {
657     if (len > INT32_MAX) {
658         // don't accept size_t values which may have come from an
659         // inadvertent conversion from a negative int.
660         return BAD_VALUE;
661     }
662 
663     size_t end = mDataPos + len;
664     if (end < mDataPos) {
665         // integer overflow
666         return BAD_VALUE;
667     }
668 
669     if (end <= mDataCapacity) {
670 restart_write:
671         memcpy(mData+mDataPos, data, len);
672         return finishWrite(len);
673     }
674 
675     status_t err = growData(len);
676     if (err == NO_ERROR) goto restart_write;
677     return err;
678 }
679 
write(const void * data,size_t len)680 status_t Parcel::write(const void* data, size_t len)
681 {
682     if (len > INT32_MAX) {
683         // don't accept size_t values which may have come from an
684         // inadvertent conversion from a negative int.
685         return BAD_VALUE;
686     }
687 
688     void* const d = writeInplace(len);
689     if (d) {
690         memcpy(d, data, len);
691         return NO_ERROR;
692     }
693     return mError;
694 }
695 
writeInplace(size_t len)696 void* Parcel::writeInplace(size_t len)
697 {
698     if (len > INT32_MAX) {
699         // don't accept size_t values which may have come from an
700         // inadvertent conversion from a negative int.
701         return nullptr;
702     }
703 
704     const size_t padded = pad_size(len);
705 
706     // sanity check for integer overflow
707     if (mDataPos+padded < mDataPos) {
708         return nullptr;
709     }
710 
711     if ((mDataPos+padded) <= mDataCapacity) {
712 restart_write:
713         //printf("Writing %ld bytes, padded to %ld\n", len, padded);
714         uint8_t* const data = mData+mDataPos;
715 
716         // Need to pad at end?
717         if (padded != len) {
718 #if BYTE_ORDER == BIG_ENDIAN
719             static const uint32_t mask[4] = {
720                 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
721             };
722 #endif
723 #if BYTE_ORDER == LITTLE_ENDIAN
724             static const uint32_t mask[4] = {
725                 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
726             };
727 #endif
728             //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
729             //    *reinterpret_cast<void**>(data+padded-4));
730             *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
731         }
732 
733         finishWrite(padded);
734         return data;
735     }
736 
737     status_t err = growData(padded);
738     if (err == NO_ERROR) goto restart_write;
739     return nullptr;
740 }
741 
writeUtf8AsUtf16(const std::string & str)742 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
743     const uint8_t* strData = (uint8_t*)str.data();
744     const size_t strLen= str.length();
745     const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
746     if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
747         return BAD_VALUE;
748     }
749 
750     status_t err = writeInt32(utf16Len);
751     if (err) {
752         return err;
753     }
754 
755     // Allocate enough bytes to hold our converted string and its terminating NULL.
756     void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
757     if (!dst) {
758         return NO_MEMORY;
759     }
760 
761     utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
762 
763     return NO_ERROR;
764 }
765 
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)766 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
767   if (!str) {
768     return writeInt32(-1);
769   }
770   return writeUtf8AsUtf16(*str);
771 }
772 
writeByteVectorInternal(const int8_t * data,size_t size)773 status_t Parcel::writeByteVectorInternal(const int8_t* data, size_t size) {
774     if (size > std::numeric_limits<int32_t>::max()) {
775         return BAD_VALUE;
776     }
777 
778     status_t status = writeInt32(size);
779     if (status != OK) {
780         return status;
781     }
782 
783     return write(data, size);
784 }
785 
writeByteVector(const std::vector<int8_t> & val)786 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
787     return writeByteVectorInternal(val.data(), val.size());
788 }
789 
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)790 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
791 {
792     if (!val) return writeInt32(-1);
793     return writeByteVectorInternal(val->data(), val->size());
794 }
795 
writeByteVector(const std::vector<uint8_t> & val)796 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
797     return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val.data()), val.size());
798 }
799 
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)800 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
801 {
802     if (!val) return writeInt32(-1);
803     return writeByteVectorInternal(reinterpret_cast<const int8_t*>(val->data()), val->size());
804 }
805 
writeInt32Vector(const std::vector<int32_t> & val)806 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
807 {
808     return writeTypedVector(val, &Parcel::writeInt32);
809 }
810 
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)811 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
812 {
813     return writeNullableTypedVector(val, &Parcel::writeInt32);
814 }
815 
writeInt64Vector(const std::vector<int64_t> & val)816 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
817 {
818     return writeTypedVector(val, &Parcel::writeInt64);
819 }
820 
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)821 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
822 {
823     return writeNullableTypedVector(val, &Parcel::writeInt64);
824 }
825 
writeUint64Vector(const std::vector<uint64_t> & val)826 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val)
827 {
828     return writeTypedVector(val, &Parcel::writeUint64);
829 }
830 
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)831 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val)
832 {
833     return writeNullableTypedVector(val, &Parcel::writeUint64);
834 }
835 
writeFloatVector(const std::vector<float> & val)836 status_t Parcel::writeFloatVector(const std::vector<float>& val)
837 {
838     return writeTypedVector(val, &Parcel::writeFloat);
839 }
840 
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)841 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
842 {
843     return writeNullableTypedVector(val, &Parcel::writeFloat);
844 }
845 
writeDoubleVector(const std::vector<double> & val)846 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
847 {
848     return writeTypedVector(val, &Parcel::writeDouble);
849 }
850 
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)851 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
852 {
853     return writeNullableTypedVector(val, &Parcel::writeDouble);
854 }
855 
writeBoolVector(const std::vector<bool> & val)856 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
857 {
858     return writeTypedVector(val, &Parcel::writeBool);
859 }
860 
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)861 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
862 {
863     return writeNullableTypedVector(val, &Parcel::writeBool);
864 }
865 
writeCharVector(const std::vector<char16_t> & val)866 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
867 {
868     return writeTypedVector(val, &Parcel::writeChar);
869 }
870 
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)871 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
872 {
873     return writeNullableTypedVector(val, &Parcel::writeChar);
874 }
875 
writeString16Vector(const std::vector<String16> & val)876 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
877 {
878     return writeTypedVector(val, &Parcel::writeString16);
879 }
880 
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)881 status_t Parcel::writeString16Vector(
882         const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
883 {
884     return writeNullableTypedVector(val, &Parcel::writeString16);
885 }
886 
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)887 status_t Parcel::writeUtf8VectorAsUtf16Vector(
888                         const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
889     return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
890 }
891 
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)892 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
893     return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
894 }
895 
writeInt32(int32_t val)896 status_t Parcel::writeInt32(int32_t val)
897 {
898     return writeAligned(val);
899 }
900 
writeUint32(uint32_t val)901 status_t Parcel::writeUint32(uint32_t val)
902 {
903     return writeAligned(val);
904 }
905 
writeInt32Array(size_t len,const int32_t * val)906 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
907     if (len > INT32_MAX) {
908         // don't accept size_t values which may have come from an
909         // inadvertent conversion from a negative int.
910         return BAD_VALUE;
911     }
912 
913     if (!val) {
914         return writeInt32(-1);
915     }
916     status_t ret = writeInt32(static_cast<uint32_t>(len));
917     if (ret == NO_ERROR) {
918         ret = write(val, len * sizeof(*val));
919     }
920     return ret;
921 }
writeByteArray(size_t len,const uint8_t * val)922 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
923     if (len > INT32_MAX) {
924         // don't accept size_t values which may have come from an
925         // inadvertent conversion from a negative int.
926         return BAD_VALUE;
927     }
928 
929     if (!val) {
930         return writeInt32(-1);
931     }
932     status_t ret = writeInt32(static_cast<uint32_t>(len));
933     if (ret == NO_ERROR) {
934         ret = write(val, len * sizeof(*val));
935     }
936     return ret;
937 }
938 
writeBool(bool val)939 status_t Parcel::writeBool(bool val)
940 {
941     return writeInt32(int32_t(val));
942 }
943 
writeChar(char16_t val)944 status_t Parcel::writeChar(char16_t val)
945 {
946     return writeInt32(int32_t(val));
947 }
948 
writeByte(int8_t val)949 status_t Parcel::writeByte(int8_t val)
950 {
951     return writeInt32(int32_t(val));
952 }
953 
writeInt64(int64_t val)954 status_t Parcel::writeInt64(int64_t val)
955 {
956     return writeAligned(val);
957 }
958 
writeUint64(uint64_t val)959 status_t Parcel::writeUint64(uint64_t val)
960 {
961     return writeAligned(val);
962 }
963 
writePointer(uintptr_t val)964 status_t Parcel::writePointer(uintptr_t val)
965 {
966     return writeAligned<binder_uintptr_t>(val);
967 }
968 
writeFloat(float val)969 status_t Parcel::writeFloat(float val)
970 {
971     return writeAligned(val);
972 }
973 
974 #if defined(__mips__) && defined(__mips_hard_float)
975 
writeDouble(double val)976 status_t Parcel::writeDouble(double val)
977 {
978     union {
979         double d;
980         unsigned long long ll;
981     } u;
982     u.d = val;
983     return writeAligned(u.ll);
984 }
985 
986 #else
987 
writeDouble(double val)988 status_t Parcel::writeDouble(double val)
989 {
990     return writeAligned(val);
991 }
992 
993 #endif
994 
writeCString(const char * str)995 status_t Parcel::writeCString(const char* str)
996 {
997     return write(str, strlen(str)+1);
998 }
999 
writeString8(const String8 & str)1000 status_t Parcel::writeString8(const String8& str)
1001 {
1002     return writeString8(str.string(), str.size());
1003 }
1004 
writeString8(const char * str,size_t len)1005 status_t Parcel::writeString8(const char* str, size_t len)
1006 {
1007     if (str == nullptr) return writeInt32(-1);
1008 
1009     status_t err = writeInt32(len);
1010     if (err == NO_ERROR) {
1011         uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char));
1012         if (data) {
1013             memcpy(data, str, len);
1014             *reinterpret_cast<char*>(data+len) = 0;
1015             return NO_ERROR;
1016         }
1017         err = mError;
1018     }
1019     return err;
1020 }
1021 
writeString16(const std::unique_ptr<String16> & str)1022 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1023 {
1024     if (!str) {
1025         return writeInt32(-1);
1026     }
1027 
1028     return writeString16(*str);
1029 }
1030 
writeString16(const String16 & str)1031 status_t Parcel::writeString16(const String16& str)
1032 {
1033     return writeString16(str.string(), str.size());
1034 }
1035 
writeString16(const char16_t * str,size_t len)1036 status_t Parcel::writeString16(const char16_t* str, size_t len)
1037 {
1038     if (str == nullptr) return writeInt32(-1);
1039 
1040     status_t err = writeInt32(len);
1041     if (err == NO_ERROR) {
1042         len *= sizeof(char16_t);
1043         uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1044         if (data) {
1045             memcpy(data, str, len);
1046             *reinterpret_cast<char16_t*>(data+len) = 0;
1047             return NO_ERROR;
1048         }
1049         err = mError;
1050     }
1051     return err;
1052 }
1053 
writeStrongBinder(const sp<IBinder> & val)1054 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1055 {
1056     return flattenBinder(val);
1057 }
1058 
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1059 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1060 {
1061     return writeTypedVector(val, &Parcel::writeStrongBinder);
1062 }
1063 
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1064 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1065 {
1066     return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1067 }
1068 
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1069 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1070     return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1071 }
1072 
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1073 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1074     return readTypedVector(val, &Parcel::readStrongBinder);
1075 }
1076 
writeRawNullableParcelable(const Parcelable * parcelable)1077 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1078     if (!parcelable) {
1079         return writeInt32(0);
1080     }
1081 
1082     return writeParcelable(*parcelable);
1083 }
1084 
writeParcelable(const Parcelable & parcelable)1085 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1086     status_t status = writeInt32(1);  // parcelable is not null.
1087     if (status != OK) {
1088         return status;
1089     }
1090     return parcelable.writeToParcel(this);
1091 }
1092 
writeNativeHandle(const native_handle * handle)1093 status_t Parcel::writeNativeHandle(const native_handle* handle)
1094 {
1095     if (!handle || handle->version != sizeof(native_handle))
1096         return BAD_TYPE;
1097 
1098     status_t err;
1099     err = writeInt32(handle->numFds);
1100     if (err != NO_ERROR) return err;
1101 
1102     err = writeInt32(handle->numInts);
1103     if (err != NO_ERROR) return err;
1104 
1105     for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1106         err = writeDupFileDescriptor(handle->data[i]);
1107 
1108     if (err != NO_ERROR) {
1109         ALOGD("write native handle, write dup fd failed");
1110         return err;
1111     }
1112     err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1113     return err;
1114 }
1115 
writeFileDescriptor(int fd,bool takeOwnership)1116 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1117 {
1118     flat_binder_object obj;
1119     obj.hdr.type = BINDER_TYPE_FD;
1120     obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1121     obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1122     obj.handle = fd;
1123     obj.cookie = takeOwnership ? 1 : 0;
1124     return writeObject(obj, true);
1125 }
1126 
writeDupFileDescriptor(int fd)1127 status_t Parcel::writeDupFileDescriptor(int fd)
1128 {
1129     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1130     if (dupFd < 0) {
1131         return -errno;
1132     }
1133     status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1134     if (err != OK) {
1135         close(dupFd);
1136     }
1137     return err;
1138 }
1139 
writeParcelFileDescriptor(int fd,bool takeOwnership)1140 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1141 {
1142     writeInt32(0);
1143     return writeFileDescriptor(fd, takeOwnership);
1144 }
1145 
writeDupParcelFileDescriptor(int fd)1146 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1147 {
1148     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1149     if (dupFd < 0) {
1150         return -errno;
1151     }
1152     status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1153     if (err != OK) {
1154         close(dupFd);
1155     }
1156     return err;
1157 }
1158 
writeUniqueFileDescriptor(const base::unique_fd & fd)1159 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1160     return writeDupFileDescriptor(fd.get());
1161 }
1162 
writeUniqueFileDescriptorVector(const std::vector<base::unique_fd> & val)1163 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) {
1164     return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1165 }
1166 
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>> & val)1167 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) {
1168     return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1169 }
1170 
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1171 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1172 {
1173     if (len > INT32_MAX) {
1174         // don't accept size_t values which may have come from an
1175         // inadvertent conversion from a negative int.
1176         return BAD_VALUE;
1177     }
1178 
1179     status_t status;
1180     if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1181         ALOGV("writeBlob: write in place");
1182         status = writeInt32(BLOB_INPLACE);
1183         if (status) return status;
1184 
1185         void* ptr = writeInplace(len);
1186         if (!ptr) return NO_MEMORY;
1187 
1188         outBlob->init(-1, ptr, len, false);
1189         return NO_ERROR;
1190     }
1191 
1192     ALOGV("writeBlob: write to ashmem");
1193     int fd = ashmem_create_region("Parcel Blob", len);
1194     if (fd < 0) return NO_MEMORY;
1195 
1196     int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1197     if (result < 0) {
1198         status = result;
1199     } else {
1200         void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1201         if (ptr == MAP_FAILED) {
1202             status = -errno;
1203         } else {
1204             if (!mutableCopy) {
1205                 result = ashmem_set_prot_region(fd, PROT_READ);
1206             }
1207             if (result < 0) {
1208                 status = result;
1209             } else {
1210                 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1211                 if (!status) {
1212                     status = writeFileDescriptor(fd, true /*takeOwnership*/);
1213                     if (!status) {
1214                         outBlob->init(fd, ptr, len, mutableCopy);
1215                         return NO_ERROR;
1216                     }
1217                 }
1218             }
1219         }
1220         ::munmap(ptr, len);
1221     }
1222     ::close(fd);
1223     return status;
1224 }
1225 
writeDupImmutableBlobFileDescriptor(int fd)1226 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1227 {
1228     // Must match up with what's done in writeBlob.
1229     if (!mAllowFds) return FDS_NOT_ALLOWED;
1230     status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1231     if (status) return status;
1232     return writeDupFileDescriptor(fd);
1233 }
1234 
write(const FlattenableHelperInterface & val)1235 status_t Parcel::write(const FlattenableHelperInterface& val)
1236 {
1237     status_t err;
1238 
1239     // size if needed
1240     const size_t len = val.getFlattenedSize();
1241     const size_t fd_count = val.getFdCount();
1242 
1243     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1244         // don't accept size_t values which may have come from an
1245         // inadvertent conversion from a negative int.
1246         return BAD_VALUE;
1247     }
1248 
1249     err = this->writeInt32(len);
1250     if (err) return err;
1251 
1252     err = this->writeInt32(fd_count);
1253     if (err) return err;
1254 
1255     // payload
1256     void* const buf = this->writeInplace(len);
1257     if (buf == nullptr)
1258         return BAD_VALUE;
1259 
1260     int* fds = nullptr;
1261     if (fd_count) {
1262         fds = new (std::nothrow) int[fd_count];
1263         if (fds == nullptr) {
1264             ALOGE("write: failed to allocate requested %zu fds", fd_count);
1265             return BAD_VALUE;
1266         }
1267     }
1268 
1269     err = val.flatten(buf, len, fds, fd_count);
1270     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1271         err = this->writeDupFileDescriptor( fds[i] );
1272     }
1273 
1274     if (fd_count) {
1275         delete [] fds;
1276     }
1277 
1278     return err;
1279 }
1280 
writeObject(const flat_binder_object & val,bool nullMetaData)1281 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1282 {
1283     const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1284     const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1285     if (enoughData && enoughObjects) {
1286 restart_write:
1287         *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1288 
1289         // remember if it's a file descriptor
1290         if (val.hdr.type == BINDER_TYPE_FD) {
1291             if (!mAllowFds) {
1292                 // fail before modifying our object index
1293                 return FDS_NOT_ALLOWED;
1294             }
1295             mHasFds = mFdsKnown = true;
1296         }
1297 
1298         // Need to write meta-data?
1299         if (nullMetaData || val.binder != 0) {
1300             mObjects[mObjectsSize] = mDataPos;
1301             acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1302             mObjectsSize++;
1303         }
1304 
1305         return finishWrite(sizeof(flat_binder_object));
1306     }
1307 
1308     if (!enoughData) {
1309         const status_t err = growData(sizeof(val));
1310         if (err != NO_ERROR) return err;
1311     }
1312     if (!enoughObjects) {
1313         if (mObjectsSize > SIZE_MAX - 2) return NO_MEMORY; // overflow
1314         if ((mObjectsSize + 2) > SIZE_MAX / 3) return NO_MEMORY; // overflow
1315         size_t newSize = ((mObjectsSize+2)*3)/2;
1316         if (newSize > SIZE_MAX / sizeof(binder_size_t)) return NO_MEMORY; // overflow
1317         binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1318         if (objects == nullptr) return NO_MEMORY;
1319         mObjects = objects;
1320         mObjectsCapacity = newSize;
1321     }
1322 
1323     goto restart_write;
1324 }
1325 
writeNoException()1326 status_t Parcel::writeNoException()
1327 {
1328     binder::Status status;
1329     return status.writeToParcel(this);
1330 }
1331 
validateReadData(size_t upperBound) const1332 status_t Parcel::validateReadData(size_t upperBound) const
1333 {
1334     // Don't allow non-object reads on object data
1335     if (mObjectsSorted || mObjectsSize <= 1) {
1336 data_sorted:
1337         // Expect to check only against the next object
1338         if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
1339             // For some reason the current read position is greater than the next object
1340             // hint. Iterate until we find the right object
1341             size_t nextObject = mNextObjectHint;
1342             do {
1343                 if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
1344                     // Requested info overlaps with an object
1345                     ALOGE("Attempt to read from protected data in Parcel %p", this);
1346                     return PERMISSION_DENIED;
1347                 }
1348                 nextObject++;
1349             } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
1350             mNextObjectHint = nextObject;
1351         }
1352         return NO_ERROR;
1353     }
1354     // Quickly determine if mObjects is sorted.
1355     binder_size_t* currObj = mObjects + mObjectsSize - 1;
1356     binder_size_t* prevObj = currObj;
1357     while (currObj > mObjects) {
1358         prevObj--;
1359         if(*prevObj > *currObj) {
1360             goto data_unsorted;
1361         }
1362         currObj--;
1363     }
1364     mObjectsSorted = true;
1365     goto data_sorted;
1366 
1367 data_unsorted:
1368     // Insertion Sort mObjects
1369     // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1370     // switch to std::sort(mObjects, mObjects + mObjectsSize);
1371     for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
1372         binder_size_t temp = *iter0;
1373         binder_size_t* iter1 = iter0 - 1;
1374         while (iter1 >= mObjects && *iter1 > temp) {
1375             *(iter1 + 1) = *iter1;
1376             iter1--;
1377         }
1378         *(iter1 + 1) = temp;
1379     }
1380     mNextObjectHint = 0;
1381     mObjectsSorted = true;
1382     goto data_sorted;
1383 }
1384 
read(void * outData,size_t len) const1385 status_t Parcel::read(void* outData, size_t len) const
1386 {
1387     if (len > INT32_MAX) {
1388         // don't accept size_t values which may have come from an
1389         // inadvertent conversion from a negative int.
1390         return BAD_VALUE;
1391     }
1392 
1393     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1394             && len <= pad_size(len)) {
1395         if (mObjectsSize > 0) {
1396             status_t err = validateReadData(mDataPos + pad_size(len));
1397             if(err != NO_ERROR) {
1398                 // Still increment the data position by the expected length
1399                 mDataPos += pad_size(len);
1400                 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1401                 return err;
1402             }
1403         }
1404         memcpy(outData, mData+mDataPos, len);
1405         mDataPos += pad_size(len);
1406         ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1407         return NO_ERROR;
1408     }
1409     return NOT_ENOUGH_DATA;
1410 }
1411 
readInplace(size_t len) const1412 const void* Parcel::readInplace(size_t len) const
1413 {
1414     if (len > INT32_MAX) {
1415         // don't accept size_t values which may have come from an
1416         // inadvertent conversion from a negative int.
1417         return nullptr;
1418     }
1419 
1420     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1421             && len <= pad_size(len)) {
1422         if (mObjectsSize > 0) {
1423             status_t err = validateReadData(mDataPos + pad_size(len));
1424             if(err != NO_ERROR) {
1425                 // Still increment the data position by the expected length
1426                 mDataPos += pad_size(len);
1427                 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1428                 return nullptr;
1429             }
1430         }
1431 
1432         const void* data = mData+mDataPos;
1433         mDataPos += pad_size(len);
1434         ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1435         return data;
1436     }
1437     return nullptr;
1438 }
1439 
1440 template<class T>
readAligned(T * pArg) const1441 status_t Parcel::readAligned(T *pArg) const {
1442     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1443 
1444     if ((mDataPos+sizeof(T)) <= mDataSize) {
1445         if (mObjectsSize > 0) {
1446             status_t err = validateReadData(mDataPos + sizeof(T));
1447             if(err != NO_ERROR) {
1448                 // Still increment the data position by the expected length
1449                 mDataPos += sizeof(T);
1450                 return err;
1451             }
1452         }
1453 
1454         const void* data = mData+mDataPos;
1455         mDataPos += sizeof(T);
1456         *pArg =  *reinterpret_cast<const T*>(data);
1457         return NO_ERROR;
1458     } else {
1459         return NOT_ENOUGH_DATA;
1460     }
1461 }
1462 
1463 template<class T>
readAligned() const1464 T Parcel::readAligned() const {
1465     T result;
1466     if (readAligned(&result) != NO_ERROR) {
1467         result = 0;
1468     }
1469 
1470     return result;
1471 }
1472 
1473 template<class T>
writeAligned(T val)1474 status_t Parcel::writeAligned(T val) {
1475     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1476 
1477     if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1478 restart_write:
1479         *reinterpret_cast<T*>(mData+mDataPos) = val;
1480         return finishWrite(sizeof(val));
1481     }
1482 
1483     status_t err = growData(sizeof(val));
1484     if (err == NO_ERROR) goto restart_write;
1485     return err;
1486 }
1487 
readByteVector(std::vector<int8_t> * val) const1488 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1489     size_t size;
1490     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1491     return readByteVectorInternal(val, size);
1492 }
1493 
readByteVector(std::vector<uint8_t> * val) const1494 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1495     size_t size;
1496     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1497     return readByteVectorInternal(val, size);
1498 }
1499 
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1500 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1501     size_t size;
1502     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1503     if (val->get() == nullptr) {
1504         // reserveOutVector does not create the out vector if size is < 0.
1505         // This occurs when writing a null byte vector.
1506         return OK;
1507     }
1508     return readByteVectorInternal(val->get(), size);
1509 }
1510 
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1511 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1512     size_t size;
1513     if (status_t status = reserveOutVector(val, &size); status != OK) return status;
1514     if (val->get() == nullptr) {
1515         // reserveOutVector does not create the out vector if size is < 0.
1516         // This occurs when writing a null byte vector.
1517         return OK;
1518     }
1519     return readByteVectorInternal(val->get(), size);
1520 }
1521 
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1522 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1523     return readNullableTypedVector(val, &Parcel::readInt32);
1524 }
1525 
readInt32Vector(std::vector<int32_t> * val) const1526 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1527     return readTypedVector(val, &Parcel::readInt32);
1528 }
1529 
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1530 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1531     return readNullableTypedVector(val, &Parcel::readInt64);
1532 }
1533 
readInt64Vector(std::vector<int64_t> * val) const1534 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1535     return readTypedVector(val, &Parcel::readInt64);
1536 }
1537 
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1538 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const {
1539     return readNullableTypedVector(val, &Parcel::readUint64);
1540 }
1541 
readUint64Vector(std::vector<uint64_t> * val) const1542 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const {
1543     return readTypedVector(val, &Parcel::readUint64);
1544 }
1545 
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1546 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1547     return readNullableTypedVector(val, &Parcel::readFloat);
1548 }
1549 
readFloatVector(std::vector<float> * val) const1550 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1551     return readTypedVector(val, &Parcel::readFloat);
1552 }
1553 
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1554 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1555     return readNullableTypedVector(val, &Parcel::readDouble);
1556 }
1557 
readDoubleVector(std::vector<double> * val) const1558 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1559     return readTypedVector(val, &Parcel::readDouble);
1560 }
1561 
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1562 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1563     const int32_t start = dataPosition();
1564     int32_t size;
1565     status_t status = readInt32(&size);
1566     val->reset();
1567 
1568     if (status != OK || size < 0) {
1569         return status;
1570     }
1571 
1572     setDataPosition(start);
1573     val->reset(new (std::nothrow) std::vector<bool>());
1574 
1575     status = readBoolVector(val->get());
1576 
1577     if (status != OK) {
1578         val->reset();
1579     }
1580 
1581     return status;
1582 }
1583 
readBoolVector(std::vector<bool> * val) const1584 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1585     int32_t size;
1586     status_t status = readInt32(&size);
1587 
1588     if (status != OK) {
1589         return status;
1590     }
1591 
1592     if (size < 0) {
1593         return UNEXPECTED_NULL;
1594     }
1595 
1596     val->resize(size);
1597 
1598     /* C++ bool handling means a vector of bools isn't necessarily addressable
1599      * (we might use individual bits)
1600      */
1601     bool data;
1602     for (int32_t i = 0; i < size; ++i) {
1603         status = readBool(&data);
1604         (*val)[i] = data;
1605 
1606         if (status != OK) {
1607             return status;
1608         }
1609     }
1610 
1611     return OK;
1612 }
1613 
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1614 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1615     return readNullableTypedVector(val, &Parcel::readChar);
1616 }
1617 
readCharVector(std::vector<char16_t> * val) const1618 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1619     return readTypedVector(val, &Parcel::readChar);
1620 }
1621 
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1622 status_t Parcel::readString16Vector(
1623         std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1624     return readNullableTypedVector(val, &Parcel::readString16);
1625 }
1626 
readString16Vector(std::vector<String16> * val) const1627 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1628     return readTypedVector(val, &Parcel::readString16);
1629 }
1630 
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1631 status_t Parcel::readUtf8VectorFromUtf16Vector(
1632         std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1633     return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1634 }
1635 
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1636 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1637     return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1638 }
1639 
readInt32(int32_t * pArg) const1640 status_t Parcel::readInt32(int32_t *pArg) const
1641 {
1642     return readAligned(pArg);
1643 }
1644 
readInt32() const1645 int32_t Parcel::readInt32() const
1646 {
1647     return readAligned<int32_t>();
1648 }
1649 
readUint32(uint32_t * pArg) const1650 status_t Parcel::readUint32(uint32_t *pArg) const
1651 {
1652     return readAligned(pArg);
1653 }
1654 
readUint32() const1655 uint32_t Parcel::readUint32() const
1656 {
1657     return readAligned<uint32_t>();
1658 }
1659 
readInt64(int64_t * pArg) const1660 status_t Parcel::readInt64(int64_t *pArg) const
1661 {
1662     return readAligned(pArg);
1663 }
1664 
1665 
readInt64() const1666 int64_t Parcel::readInt64() const
1667 {
1668     return readAligned<int64_t>();
1669 }
1670 
readUint64(uint64_t * pArg) const1671 status_t Parcel::readUint64(uint64_t *pArg) const
1672 {
1673     return readAligned(pArg);
1674 }
1675 
readUint64() const1676 uint64_t Parcel::readUint64() const
1677 {
1678     return readAligned<uint64_t>();
1679 }
1680 
readPointer(uintptr_t * pArg) const1681 status_t Parcel::readPointer(uintptr_t *pArg) const
1682 {
1683     status_t ret;
1684     binder_uintptr_t ptr;
1685     ret = readAligned(&ptr);
1686     if (!ret)
1687         *pArg = ptr;
1688     return ret;
1689 }
1690 
readPointer() const1691 uintptr_t Parcel::readPointer() const
1692 {
1693     return readAligned<binder_uintptr_t>();
1694 }
1695 
1696 
readFloat(float * pArg) const1697 status_t Parcel::readFloat(float *pArg) const
1698 {
1699     return readAligned(pArg);
1700 }
1701 
1702 
readFloat() const1703 float Parcel::readFloat() const
1704 {
1705     return readAligned<float>();
1706 }
1707 
1708 #if defined(__mips__) && defined(__mips_hard_float)
1709 
readDouble(double * pArg) const1710 status_t Parcel::readDouble(double *pArg) const
1711 {
1712     union {
1713       double d;
1714       unsigned long long ll;
1715     } u;
1716     u.d = 0;
1717     status_t status;
1718     status = readAligned(&u.ll);
1719     *pArg = u.d;
1720     return status;
1721 }
1722 
readDouble() const1723 double Parcel::readDouble() const
1724 {
1725     union {
1726       double d;
1727       unsigned long long ll;
1728     } u;
1729     u.ll = readAligned<unsigned long long>();
1730     return u.d;
1731 }
1732 
1733 #else
1734 
readDouble(double * pArg) const1735 status_t Parcel::readDouble(double *pArg) const
1736 {
1737     return readAligned(pArg);
1738 }
1739 
readDouble() const1740 double Parcel::readDouble() const
1741 {
1742     return readAligned<double>();
1743 }
1744 
1745 #endif
1746 
readIntPtr(intptr_t * pArg) const1747 status_t Parcel::readIntPtr(intptr_t *pArg) const
1748 {
1749     return readAligned(pArg);
1750 }
1751 
1752 
readIntPtr() const1753 intptr_t Parcel::readIntPtr() const
1754 {
1755     return readAligned<intptr_t>();
1756 }
1757 
readBool(bool * pArg) const1758 status_t Parcel::readBool(bool *pArg) const
1759 {
1760     int32_t tmp = 0;
1761     status_t ret = readInt32(&tmp);
1762     *pArg = (tmp != 0);
1763     return ret;
1764 }
1765 
readBool() const1766 bool Parcel::readBool() const
1767 {
1768     return readInt32() != 0;
1769 }
1770 
readChar(char16_t * pArg) const1771 status_t Parcel::readChar(char16_t *pArg) const
1772 {
1773     int32_t tmp = 0;
1774     status_t ret = readInt32(&tmp);
1775     *pArg = char16_t(tmp);
1776     return ret;
1777 }
1778 
readChar() const1779 char16_t Parcel::readChar() const
1780 {
1781     return char16_t(readInt32());
1782 }
1783 
readByte(int8_t * pArg) const1784 status_t Parcel::readByte(int8_t *pArg) const
1785 {
1786     int32_t tmp = 0;
1787     status_t ret = readInt32(&tmp);
1788     *pArg = int8_t(tmp);
1789     return ret;
1790 }
1791 
readByte() const1792 int8_t Parcel::readByte() const
1793 {
1794     return int8_t(readInt32());
1795 }
1796 
readUtf8FromUtf16(std::string * str) const1797 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1798     size_t utf16Size = 0;
1799     const char16_t* src = readString16Inplace(&utf16Size);
1800     if (!src) {
1801         return UNEXPECTED_NULL;
1802     }
1803 
1804     // Save ourselves the trouble, we're done.
1805     if (utf16Size == 0u) {
1806         str->clear();
1807        return NO_ERROR;
1808     }
1809 
1810     // Allow for closing '\0'
1811     ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
1812     if (utf8Size < 1) {
1813         return BAD_VALUE;
1814     }
1815     // Note that while it is probably safe to assume string::resize keeps a
1816     // spare byte around for the trailing null, we still pass the size including the trailing null
1817     str->resize(utf8Size);
1818     utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
1819     str->resize(utf8Size - 1);
1820     return NO_ERROR;
1821 }
1822 
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1823 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
1824     const int32_t start = dataPosition();
1825     int32_t size;
1826     status_t status = readInt32(&size);
1827     str->reset();
1828 
1829     if (status != OK || size < 0) {
1830         return status;
1831     }
1832 
1833     setDataPosition(start);
1834     str->reset(new (std::nothrow) std::string());
1835     return readUtf8FromUtf16(str->get());
1836 }
1837 
readCString() const1838 const char* Parcel::readCString() const
1839 {
1840     if (mDataPos < mDataSize) {
1841         const size_t avail = mDataSize-mDataPos;
1842         const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1843         // is the string's trailing NUL within the parcel's valid bounds?
1844         const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1845         if (eos) {
1846             const size_t len = eos - str;
1847             mDataPos += pad_size(len+1);
1848             ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1849             return str;
1850         }
1851     }
1852     return nullptr;
1853 }
1854 
readString8() const1855 String8 Parcel::readString8() const
1856 {
1857     size_t len;
1858     const char* str = readString8Inplace(&len);
1859     if (str) return String8(str, len);
1860     ALOGE("Reading a NULL string not supported here.");
1861     return String8();
1862 }
1863 
readString8(String8 * pArg) const1864 status_t Parcel::readString8(String8* pArg) const
1865 {
1866     size_t len;
1867     const char* str = readString8Inplace(&len);
1868     if (str) {
1869         pArg->setTo(str, len);
1870         return 0;
1871     } else {
1872         *pArg = String8();
1873         return UNEXPECTED_NULL;
1874     }
1875 }
1876 
readString8Inplace(size_t * outLen) const1877 const char* Parcel::readString8Inplace(size_t* outLen) const
1878 {
1879     int32_t size = readInt32();
1880     // watch for potential int overflow from size+1
1881     if (size >= 0 && size < INT32_MAX) {
1882         *outLen = size;
1883         const char* str = (const char*)readInplace(size+1);
1884         if (str != nullptr) {
1885             if (str[size] == '\0') {
1886                 return str;
1887             }
1888             android_errorWriteLog(0x534e4554, "172655291");
1889         }
1890     }
1891     *outLen = 0;
1892     return nullptr;
1893 }
1894 
readString16() const1895 String16 Parcel::readString16() const
1896 {
1897     size_t len;
1898     const char16_t* str = readString16Inplace(&len);
1899     if (str) return String16(str, len);
1900     ALOGE("Reading a NULL string not supported here.");
1901     return String16();
1902 }
1903 
readString16(std::unique_ptr<String16> * pArg) const1904 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1905 {
1906     const int32_t start = dataPosition();
1907     int32_t size;
1908     status_t status = readInt32(&size);
1909     pArg->reset();
1910 
1911     if (status != OK || size < 0) {
1912         return status;
1913     }
1914 
1915     setDataPosition(start);
1916     pArg->reset(new (std::nothrow) String16());
1917 
1918     status = readString16(pArg->get());
1919 
1920     if (status != OK) {
1921         pArg->reset();
1922     }
1923 
1924     return status;
1925 }
1926 
readString16(String16 * pArg) const1927 status_t Parcel::readString16(String16* pArg) const
1928 {
1929     size_t len;
1930     const char16_t* str = readString16Inplace(&len);
1931     if (str) {
1932         pArg->setTo(str, len);
1933         return 0;
1934     } else {
1935         *pArg = String16();
1936         return UNEXPECTED_NULL;
1937     }
1938 }
1939 
readString16Inplace(size_t * outLen) const1940 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1941 {
1942     int32_t size = readInt32();
1943     // watch for potential int overflow from size+1
1944     if (size >= 0 && size < INT32_MAX) {
1945         *outLen = size;
1946         const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1947         if (str != nullptr) {
1948             if (str[size] == u'\0') {
1949                 return str;
1950             }
1951             android_errorWriteLog(0x534e4554, "172655291");
1952         }
1953     }
1954     *outLen = 0;
1955     return nullptr;
1956 }
1957 
readStrongBinder(sp<IBinder> * val) const1958 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1959 {
1960     status_t status = readNullableStrongBinder(val);
1961     if (status == OK && !val->get()) {
1962         status = UNEXPECTED_NULL;
1963     }
1964     return status;
1965 }
1966 
readNullableStrongBinder(sp<IBinder> * val) const1967 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
1968 {
1969     return unflattenBinder(val);
1970 }
1971 
readStrongBinder() const1972 sp<IBinder> Parcel::readStrongBinder() const
1973 {
1974     sp<IBinder> val;
1975     // Note that a lot of code in Android reads binders by hand with this
1976     // method, and that code has historically been ok with getting nullptr
1977     // back (while ignoring error codes).
1978     readNullableStrongBinder(&val);
1979     return val;
1980 }
1981 
readParcelable(Parcelable * parcelable) const1982 status_t Parcel::readParcelable(Parcelable* parcelable) const {
1983     int32_t have_parcelable = 0;
1984     status_t status = readInt32(&have_parcelable);
1985     if (status != OK) {
1986         return status;
1987     }
1988     if (!have_parcelable) {
1989         return UNEXPECTED_NULL;
1990     }
1991     return parcelable->readFromParcel(this);
1992 }
1993 
readExceptionCode() const1994 int32_t Parcel::readExceptionCode() const
1995 {
1996     binder::Status status;
1997     status.readFromParcel(*this);
1998     return status.exceptionCode();
1999 }
2000 
readNativeHandle() const2001 native_handle* Parcel::readNativeHandle() const
2002 {
2003     int numFds, numInts;
2004     status_t err;
2005     err = readInt32(&numFds);
2006     if (err != NO_ERROR) return nullptr;
2007     err = readInt32(&numInts);
2008     if (err != NO_ERROR) return nullptr;
2009 
2010     native_handle* h = native_handle_create(numFds, numInts);
2011     if (!h) {
2012         return nullptr;
2013     }
2014 
2015     for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2016         h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2017         if (h->data[i] < 0) {
2018             for (int j = 0; j < i; j++) {
2019                 close(h->data[j]);
2020             }
2021             native_handle_delete(h);
2022             return nullptr;
2023         }
2024     }
2025     err = read(h->data + numFds, sizeof(int)*numInts);
2026     if (err != NO_ERROR) {
2027         native_handle_close(h);
2028         native_handle_delete(h);
2029         h = nullptr;
2030     }
2031     return h;
2032 }
2033 
readFileDescriptor() const2034 int Parcel::readFileDescriptor() const
2035 {
2036     const flat_binder_object* flat = readObject(true);
2037 
2038     if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2039         return flat->handle;
2040     }
2041 
2042     return BAD_TYPE;
2043 }
2044 
readParcelFileDescriptor() const2045 int Parcel::readParcelFileDescriptor() const
2046 {
2047     int32_t hasComm = readInt32();
2048     int fd = readFileDescriptor();
2049     if (hasComm != 0) {
2050         // detach (owned by the binder driver)
2051         int comm = readFileDescriptor();
2052 
2053         // warning: this must be kept in sync with:
2054         // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2055         enum ParcelFileDescriptorStatus {
2056             DETACHED = 2,
2057         };
2058 
2059 #if BYTE_ORDER == BIG_ENDIAN
2060         const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2061 #endif
2062 #if BYTE_ORDER == LITTLE_ENDIAN
2063         const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2064 #endif
2065 
2066         ssize_t written = TEMP_FAILURE_RETRY(
2067             ::write(comm, &message, sizeof(message)));
2068 
2069         if (written == -1 || written != sizeof(message)) {
2070             ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2071                 written, strerror(errno));
2072             return BAD_TYPE;
2073         }
2074     }
2075     return fd;
2076 }
2077 
readUniqueFileDescriptor(base::unique_fd * val) const2078 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2079 {
2080     int got = readFileDescriptor();
2081 
2082     if (got == BAD_TYPE) {
2083         return BAD_TYPE;
2084     }
2085 
2086     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2087 
2088     if (val->get() < 0) {
2089         return BAD_VALUE;
2090     }
2091 
2092     return OK;
2093 }
2094 
readUniqueParcelFileDescriptor(base::unique_fd * val) const2095 status_t Parcel::readUniqueParcelFileDescriptor(base::unique_fd* val) const
2096 {
2097     int got = readParcelFileDescriptor();
2098 
2099     if (got == BAD_TYPE) {
2100         return BAD_TYPE;
2101     }
2102 
2103     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2104 
2105     if (val->get() < 0) {
2106         return BAD_VALUE;
2107     }
2108 
2109     return OK;
2110 }
2111 
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>> * val) const2112 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const {
2113     return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2114 }
2115 
readUniqueFileDescriptorVector(std::vector<base::unique_fd> * val) const2116 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const {
2117     return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2118 }
2119 
readBlob(size_t len,ReadableBlob * outBlob) const2120 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2121 {
2122     int32_t blobType;
2123     status_t status = readInt32(&blobType);
2124     if (status) return status;
2125 
2126     if (blobType == BLOB_INPLACE) {
2127         ALOGV("readBlob: read in place");
2128         const void* ptr = readInplace(len);
2129         if (!ptr) return BAD_VALUE;
2130 
2131         outBlob->init(-1, const_cast<void*>(ptr), len, false);
2132         return NO_ERROR;
2133     }
2134 
2135     ALOGV("readBlob: read from ashmem");
2136     bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2137     int fd = readFileDescriptor();
2138     if (fd == int(BAD_TYPE)) return BAD_VALUE;
2139 
2140     if (!ashmem_valid(fd)) {
2141         ALOGE("invalid fd");
2142         return BAD_VALUE;
2143     }
2144     int size = ashmem_get_size_region(fd);
2145     if (size < 0 || size_t(size) < len) {
2146         ALOGE("request size %zu does not match fd size %d", len, size);
2147         return BAD_VALUE;
2148     }
2149     void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2150             MAP_SHARED, fd, 0);
2151     if (ptr == MAP_FAILED) return NO_MEMORY;
2152 
2153     outBlob->init(fd, ptr, len, isMutable);
2154     return NO_ERROR;
2155 }
2156 
read(FlattenableHelperInterface & val) const2157 status_t Parcel::read(FlattenableHelperInterface& val) const
2158 {
2159     // size
2160     const size_t len = this->readInt32();
2161     const size_t fd_count = this->readInt32();
2162 
2163     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2164         // don't accept size_t values which may have come from an
2165         // inadvertent conversion from a negative int.
2166         return BAD_VALUE;
2167     }
2168 
2169     // payload
2170     void const* const buf = this->readInplace(pad_size(len));
2171     if (buf == nullptr)
2172         return BAD_VALUE;
2173 
2174     int* fds = nullptr;
2175     if (fd_count) {
2176         fds = new (std::nothrow) int[fd_count];
2177         if (fds == nullptr) {
2178             ALOGE("read: failed to allocate requested %zu fds", fd_count);
2179             return BAD_VALUE;
2180         }
2181     }
2182 
2183     status_t err = NO_ERROR;
2184     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2185         int fd = this->readFileDescriptor();
2186         if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2187             err = BAD_VALUE;
2188             ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2189                   i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2190             // Close all the file descriptors that were dup-ed.
2191             for (size_t j=0; j<i ;j++) {
2192                 close(fds[j]);
2193             }
2194         }
2195     }
2196 
2197     if (err == NO_ERROR) {
2198         err = val.unflatten(buf, len, fds, fd_count);
2199     }
2200 
2201     if (fd_count) {
2202         delete [] fds;
2203     }
2204 
2205     return err;
2206 }
readObject(bool nullMetaData) const2207 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2208 {
2209     const size_t DPOS = mDataPos;
2210     if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2211         const flat_binder_object* obj
2212                 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2213         mDataPos = DPOS + sizeof(flat_binder_object);
2214         if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2215             // When transferring a NULL object, we don't write it into
2216             // the object list, so we don't want to check for it when
2217             // reading.
2218             ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2219             return obj;
2220         }
2221 
2222         // Ensure that this object is valid...
2223         binder_size_t* const OBJS = mObjects;
2224         const size_t N = mObjectsSize;
2225         size_t opos = mNextObjectHint;
2226 
2227         if (N > 0) {
2228             ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2229                  this, DPOS, opos);
2230 
2231             // Start at the current hint position, looking for an object at
2232             // the current data position.
2233             if (opos < N) {
2234                 while (opos < (N-1) && OBJS[opos] < DPOS) {
2235                     opos++;
2236                 }
2237             } else {
2238                 opos = N-1;
2239             }
2240             if (OBJS[opos] == DPOS) {
2241                 // Found it!
2242                 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2243                      this, DPOS, opos);
2244                 mNextObjectHint = opos+1;
2245                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2246                 return obj;
2247             }
2248 
2249             // Look backwards for it...
2250             while (opos > 0 && OBJS[opos] > DPOS) {
2251                 opos--;
2252             }
2253             if (OBJS[opos] == DPOS) {
2254                 // Found it!
2255                 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2256                      this, DPOS, opos);
2257                 mNextObjectHint = opos+1;
2258                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2259                 return obj;
2260             }
2261         }
2262         ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2263              this, DPOS);
2264     }
2265     return nullptr;
2266 }
2267 
closeFileDescriptors()2268 void Parcel::closeFileDescriptors()
2269 {
2270     size_t i = mObjectsSize;
2271     if (i > 0) {
2272         //ALOGI("Closing file descriptors for %zu objects...", i);
2273     }
2274     while (i > 0) {
2275         i--;
2276         const flat_binder_object* flat
2277             = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2278         if (flat->hdr.type == BINDER_TYPE_FD) {
2279             //ALOGI("Closing fd: %ld", flat->handle);
2280             close(flat->handle);
2281         }
2282     }
2283 }
2284 
ipcData() const2285 uintptr_t Parcel::ipcData() const
2286 {
2287     return reinterpret_cast<uintptr_t>(mData);
2288 }
2289 
ipcDataSize() const2290 size_t Parcel::ipcDataSize() const
2291 {
2292     return (mDataSize > mDataPos ? mDataSize : mDataPos);
2293 }
2294 
ipcObjects() const2295 uintptr_t Parcel::ipcObjects() const
2296 {
2297     return reinterpret_cast<uintptr_t>(mObjects);
2298 }
2299 
ipcObjectsCount() const2300 size_t Parcel::ipcObjectsCount() const
2301 {
2302     return mObjectsSize;
2303 }
2304 
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2305 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2306     const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2307 {
2308     binder_size_t minOffset = 0;
2309     freeDataNoInit();
2310     mError = NO_ERROR;
2311     mData = const_cast<uint8_t*>(data);
2312     mDataSize = mDataCapacity = dataSize;
2313     //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2314     mDataPos = 0;
2315     ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2316     mObjects = const_cast<binder_size_t*>(objects);
2317     mObjectsSize = mObjectsCapacity = objectsCount;
2318     mNextObjectHint = 0;
2319     mObjectsSorted = false;
2320     mOwner = relFunc;
2321     mOwnerCookie = relCookie;
2322     for (size_t i = 0; i < mObjectsSize; i++) {
2323         binder_size_t offset = mObjects[i];
2324         if (offset < minOffset) {
2325             ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2326                   __func__, (uint64_t)offset, (uint64_t)minOffset);
2327             mObjectsSize = 0;
2328             break;
2329         }
2330         const flat_binder_object* flat
2331             = reinterpret_cast<const flat_binder_object*>(mData + offset);
2332         uint32_t type = flat->hdr.type;
2333         if (!(type == BINDER_TYPE_BINDER || type == BINDER_TYPE_HANDLE ||
2334               type == BINDER_TYPE_FD)) {
2335             // We should never receive other types (eg BINDER_TYPE_FDA) as long as we don't support
2336             // them in libbinder. If we do receive them, it probably means a kernel bug; try to
2337             // recover gracefully by clearing out the objects, and releasing the objects we do
2338             // know about.
2339             android_errorWriteLog(0x534e4554, "135930648");
2340             ALOGE("%s: unsupported type object (%" PRIu32 ") at offset %" PRIu64 "\n",
2341                   __func__, type, (uint64_t)offset);
2342             releaseObjects();
2343             mObjectsSize = 0;
2344             break;
2345         }
2346         minOffset = offset + sizeof(flat_binder_object);
2347     }
2348     scanForFds();
2349 }
2350 
print(TextOutput & to,uint32_t) const2351 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2352 {
2353     to << "Parcel(";
2354 
2355     if (errorCheck() != NO_ERROR) {
2356         const status_t err = errorCheck();
2357         to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2358     } else if (dataSize() > 0) {
2359         const uint8_t* DATA = data();
2360         to << indent << HexDump(DATA, dataSize()) << dedent;
2361         const binder_size_t* OBJS = mObjects;
2362         const size_t N = objectsCount();
2363         for (size_t i=0; i<N; i++) {
2364             const flat_binder_object* flat
2365                 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2366             to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2367                 << TypeCode(flat->hdr.type & 0x7f7f7f00)
2368                 << " = " << flat->binder;
2369         }
2370     } else {
2371         to << "NULL";
2372     }
2373 
2374     to << ")";
2375 }
2376 
releaseObjects()2377 void Parcel::releaseObjects()
2378 {
2379     size_t i = mObjectsSize;
2380     if (i == 0) {
2381         return;
2382     }
2383     sp<ProcessState> proc(ProcessState::self());
2384     uint8_t* const data = mData;
2385     binder_size_t* const objects = mObjects;
2386     while (i > 0) {
2387         i--;
2388         const flat_binder_object* flat
2389             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2390         release_object(proc, *flat, this, &mOpenAshmemSize);
2391     }
2392 }
2393 
acquireObjects()2394 void Parcel::acquireObjects()
2395 {
2396     size_t i = mObjectsSize;
2397     if (i == 0) {
2398         return;
2399     }
2400     const sp<ProcessState> proc(ProcessState::self());
2401     uint8_t* const data = mData;
2402     binder_size_t* const objects = mObjects;
2403     while (i > 0) {
2404         i--;
2405         const flat_binder_object* flat
2406             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2407         acquire_object(proc, *flat, this, &mOpenAshmemSize);
2408     }
2409 }
2410 
freeData()2411 void Parcel::freeData()
2412 {
2413     freeDataNoInit();
2414     initState();
2415 }
2416 
freeDataNoInit()2417 void Parcel::freeDataNoInit()
2418 {
2419     if (mOwner) {
2420         LOG_ALLOC("Parcel %p: freeing other owner data", this);
2421         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2422         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2423     } else {
2424         LOG_ALLOC("Parcel %p: freeing allocated data", this);
2425         releaseObjects();
2426         if (mData) {
2427             LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2428             pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2429             if (mDataCapacity <= gParcelGlobalAllocSize) {
2430               gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2431             } else {
2432               gParcelGlobalAllocSize = 0;
2433             }
2434             if (gParcelGlobalAllocCount > 0) {
2435               gParcelGlobalAllocCount--;
2436             }
2437             pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2438             free(mData);
2439         }
2440         if (mObjects) free(mObjects);
2441     }
2442 }
2443 
growData(size_t len)2444 status_t Parcel::growData(size_t len)
2445 {
2446     if (len > INT32_MAX) {
2447         // don't accept size_t values which may have come from an
2448         // inadvertent conversion from a negative int.
2449         return BAD_VALUE;
2450     }
2451 
2452     if (len > SIZE_MAX - mDataSize) return NO_MEMORY; // overflow
2453     if (mDataSize + len > SIZE_MAX / 3) return NO_MEMORY; // overflow
2454     size_t newSize = ((mDataSize+len)*3)/2;
2455     return (newSize <= mDataSize)
2456             ? (status_t) NO_MEMORY
2457             : continueWrite(newSize);
2458 }
2459 
restartWrite(size_t desired)2460 status_t Parcel::restartWrite(size_t desired)
2461 {
2462     if (desired > INT32_MAX) {
2463         // don't accept size_t values which may have come from an
2464         // inadvertent conversion from a negative int.
2465         return BAD_VALUE;
2466     }
2467 
2468     if (mOwner) {
2469         freeData();
2470         return continueWrite(desired);
2471     }
2472 
2473     uint8_t* data = (uint8_t*)realloc(mData, desired);
2474     if (!data && desired > mDataCapacity) {
2475         mError = NO_MEMORY;
2476         return NO_MEMORY;
2477     }
2478 
2479     releaseObjects();
2480 
2481     if (data || desired == 0) {
2482         LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2483         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2484         gParcelGlobalAllocSize += desired;
2485         gParcelGlobalAllocSize -= mDataCapacity;
2486         if (!mData) {
2487             gParcelGlobalAllocCount++;
2488         }
2489         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2490         mData = data;
2491         mDataCapacity = desired;
2492     }
2493 
2494     mDataSize = mDataPos = 0;
2495     ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2496     ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2497 
2498     free(mObjects);
2499     mObjects = nullptr;
2500     mObjectsSize = mObjectsCapacity = 0;
2501     mNextObjectHint = 0;
2502     mObjectsSorted = false;
2503     mHasFds = false;
2504     mFdsKnown = true;
2505     mAllowFds = true;
2506 
2507     return NO_ERROR;
2508 }
2509 
continueWrite(size_t desired)2510 status_t Parcel::continueWrite(size_t desired)
2511 {
2512     if (desired > INT32_MAX) {
2513         // don't accept size_t values which may have come from an
2514         // inadvertent conversion from a negative int.
2515         return BAD_VALUE;
2516     }
2517 
2518     // If shrinking, first adjust for any objects that appear
2519     // after the new data size.
2520     size_t objectsSize = mObjectsSize;
2521     if (desired < mDataSize) {
2522         if (desired == 0) {
2523             objectsSize = 0;
2524         } else {
2525             while (objectsSize > 0) {
2526                 if (mObjects[objectsSize-1] < desired)
2527                     break;
2528                 objectsSize--;
2529             }
2530         }
2531     }
2532 
2533     if (mOwner) {
2534         // If the size is going to zero, just release the owner's data.
2535         if (desired == 0) {
2536             freeData();
2537             return NO_ERROR;
2538         }
2539 
2540         // If there is a different owner, we need to take
2541         // posession.
2542         uint8_t* data = (uint8_t*)malloc(desired);
2543         if (!data) {
2544             mError = NO_MEMORY;
2545             return NO_MEMORY;
2546         }
2547         binder_size_t* objects = nullptr;
2548 
2549         if (objectsSize) {
2550             objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2551             if (!objects) {
2552                 free(data);
2553 
2554                 mError = NO_MEMORY;
2555                 return NO_MEMORY;
2556             }
2557 
2558             // Little hack to only acquire references on objects
2559             // we will be keeping.
2560             size_t oldObjectsSize = mObjectsSize;
2561             mObjectsSize = objectsSize;
2562             acquireObjects();
2563             mObjectsSize = oldObjectsSize;
2564         }
2565 
2566         if (mData) {
2567             memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2568         }
2569         if (objects && mObjects) {
2570             memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2571         }
2572         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2573         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2574         mOwner = nullptr;
2575 
2576         LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2577         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2578         gParcelGlobalAllocSize += desired;
2579         gParcelGlobalAllocCount++;
2580         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2581 
2582         mData = data;
2583         mObjects = objects;
2584         mDataSize = (mDataSize < desired) ? mDataSize : desired;
2585         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2586         mDataCapacity = desired;
2587         mObjectsSize = mObjectsCapacity = objectsSize;
2588         mNextObjectHint = 0;
2589         mObjectsSorted = false;
2590 
2591     } else if (mData) {
2592         if (objectsSize < mObjectsSize) {
2593             // Need to release refs on any objects we are dropping.
2594             const sp<ProcessState> proc(ProcessState::self());
2595             for (size_t i=objectsSize; i<mObjectsSize; i++) {
2596                 const flat_binder_object* flat
2597                     = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2598                 if (flat->hdr.type == BINDER_TYPE_FD) {
2599                     // will need to rescan because we may have lopped off the only FDs
2600                     mFdsKnown = false;
2601                 }
2602                 release_object(proc, *flat, this, &mOpenAshmemSize);
2603             }
2604 
2605             if (objectsSize == 0) {
2606                 free(mObjects);
2607                 mObjects = nullptr;
2608                 mObjectsCapacity = 0;
2609             } else {
2610                 binder_size_t* objects =
2611                     (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2612                 if (objects) {
2613                     mObjects = objects;
2614                     mObjectsCapacity = objectsSize;
2615                 }
2616             }
2617             mObjectsSize = objectsSize;
2618             mNextObjectHint = 0;
2619             mObjectsSorted = false;
2620         }
2621 
2622         // We own the data, so we can just do a realloc().
2623         if (desired > mDataCapacity) {
2624             uint8_t* data = (uint8_t*)realloc(mData, desired);
2625             if (data) {
2626                 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2627                         desired);
2628                 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2629                 gParcelGlobalAllocSize += desired;
2630                 gParcelGlobalAllocSize -= mDataCapacity;
2631                 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2632                 mData = data;
2633                 mDataCapacity = desired;
2634             } else {
2635                 mError = NO_MEMORY;
2636                 return NO_MEMORY;
2637             }
2638         } else {
2639             if (mDataSize > desired) {
2640                 mDataSize = desired;
2641                 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2642             }
2643             if (mDataPos > desired) {
2644                 mDataPos = desired;
2645                 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2646             }
2647         }
2648 
2649     } else {
2650         // This is the first data.  Easy!
2651         uint8_t* data = (uint8_t*)malloc(desired);
2652         if (!data) {
2653             mError = NO_MEMORY;
2654             return NO_MEMORY;
2655         }
2656 
2657         if(!(mDataCapacity == 0 && mObjects == nullptr
2658              && mObjectsCapacity == 0)) {
2659             ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2660         }
2661 
2662         LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2663         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2664         gParcelGlobalAllocSize += desired;
2665         gParcelGlobalAllocCount++;
2666         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2667 
2668         mData = data;
2669         mDataSize = mDataPos = 0;
2670         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2671         ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2672         mDataCapacity = desired;
2673     }
2674 
2675     return NO_ERROR;
2676 }
2677 
initState()2678 void Parcel::initState()
2679 {
2680     LOG_ALLOC("Parcel %p: initState", this);
2681     mError = NO_ERROR;
2682     mData = nullptr;
2683     mDataSize = 0;
2684     mDataCapacity = 0;
2685     mDataPos = 0;
2686     ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2687     ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2688     mObjects = nullptr;
2689     mObjectsSize = 0;
2690     mObjectsCapacity = 0;
2691     mNextObjectHint = 0;
2692     mObjectsSorted = false;
2693     mHasFds = false;
2694     mFdsKnown = true;
2695     mAllowFds = true;
2696     mOwner = nullptr;
2697     mOpenAshmemSize = 0;
2698     mWorkSourceRequestHeaderPosition = 0;
2699     mRequestHeaderPresent = false;
2700 
2701     // racing multiple init leads only to multiple identical write
2702     if (gMaxFds == 0) {
2703         struct rlimit result;
2704         if (!getrlimit(RLIMIT_NOFILE, &result)) {
2705             gMaxFds = (size_t)result.rlim_cur;
2706             //ALOGI("parcel fd limit set to %zu", gMaxFds);
2707         } else {
2708             ALOGW("Unable to getrlimit: %s", strerror(errno));
2709             gMaxFds = 1024;
2710         }
2711     }
2712 }
2713 
scanForFds() const2714 void Parcel::scanForFds() const
2715 {
2716     bool hasFds = false;
2717     for (size_t i=0; i<mObjectsSize; i++) {
2718         const flat_binder_object* flat
2719             = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2720         if (flat->hdr.type == BINDER_TYPE_FD) {
2721             hasFds = true;
2722             break;
2723         }
2724     }
2725     mHasFds = hasFds;
2726     mFdsKnown = true;
2727 }
2728 
getBlobAshmemSize() const2729 size_t Parcel::getBlobAshmemSize() const
2730 {
2731     // This used to return the size of all blobs that were written to ashmem, now we're returning
2732     // the ashmem currently referenced by this Parcel, which should be equivalent.
2733     // TODO: Remove method once ABI can be changed.
2734     return mOpenAshmemSize;
2735 }
2736 
getOpenAshmemSize() const2737 size_t Parcel::getOpenAshmemSize() const
2738 {
2739     return mOpenAshmemSize;
2740 }
2741 
2742 // --- Parcel::Blob ---
2743 
Blob()2744 Parcel::Blob::Blob() :
2745         mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
2746 }
2747 
~Blob()2748 Parcel::Blob::~Blob() {
2749     release();
2750 }
2751 
release()2752 void Parcel::Blob::release() {
2753     if (mFd != -1 && mData) {
2754         ::munmap(mData, mSize);
2755     }
2756     clear();
2757 }
2758 
init(int fd,void * data,size_t size,bool isMutable)2759 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2760     mFd = fd;
2761     mData = data;
2762     mSize = size;
2763     mMutable = isMutable;
2764 }
2765 
clear()2766 void Parcel::Blob::clear() {
2767     mFd = -1;
2768     mData = nullptr;
2769     mSize = 0;
2770     mMutable = false;
2771 }
2772 
2773 } // namespace android
2774