• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2005 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19 
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32 
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/IPCThreadState.h>
36 #include <binder/Parcel.h>
37 #include <binder/ProcessState.h>
38 #include <binder/Status.h>
39 #include <binder/TextOutput.h>
40 #include <binder/Value.h>
41 
42 #include <cutils/ashmem.h>
43 #include <utils/Debug.h>
44 #include <utils/Flattenable.h>
45 #include <utils/Log.h>
46 #include <utils/misc.h>
47 #include <utils/String8.h>
48 #include <utils/String16.h>
49 
50 #include <private/binder/binder_module.h>
51 #include <private/binder/Static.h>
52 
53 #ifndef INT32_MAX
54 #define INT32_MAX ((int32_t)(2147483647))
55 #endif
56 
57 #define LOG_REFS(...)
58 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
59 #define LOG_ALLOC(...)
60 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
61 
62 // ---------------------------------------------------------------------------
63 
64 // This macro should never be used at runtime, as a too large value
65 // of s could cause an integer overflow. Instead, you should always
66 // use the wrapper function pad_size()
67 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
68 
pad_size(size_t s)69 static size_t pad_size(size_t s) {
70     if (s > (SIZE_T_MAX - 3)) {
71         abort();
72     }
73     return PAD_SIZE_UNSAFE(s);
74 }
75 
76 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
77 #define STRICT_MODE_PENALTY_GATHER (1 << 31)
78 
79 namespace android {
80 
81 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
82 static size_t gParcelGlobalAllocSize = 0;
83 static size_t gParcelGlobalAllocCount = 0;
84 
85 static size_t gMaxFds = 0;
86 
87 // Maximum size of a blob to transfer in-place.
88 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
89 
90 enum {
91     BLOB_INPLACE = 0,
92     BLOB_ASHMEM_IMMUTABLE = 1,
93     BLOB_ASHMEM_MUTABLE = 2,
94 };
95 
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)96 void acquire_object(const sp<ProcessState>& proc,
97     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
98 {
99     switch (obj.hdr.type) {
100         case BINDER_TYPE_BINDER:
101             if (obj.binder) {
102                 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
103                 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
104             }
105             return;
106         case BINDER_TYPE_WEAK_BINDER:
107             if (obj.binder)
108                 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
109             return;
110         case BINDER_TYPE_HANDLE: {
111             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
112             if (b != nullptr) {
113                 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
114                 b->incStrong(who);
115             }
116             return;
117         }
118         case BINDER_TYPE_WEAK_HANDLE: {
119             const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
120             if (b != nullptr) b.get_refs()->incWeak(who);
121             return;
122         }
123         case BINDER_TYPE_FD: {
124             if ((obj.cookie != 0) && (outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
125                 // If we own an ashmem fd, keep track of how much memory it refers to.
126                 int size = ashmem_get_size_region(obj.handle);
127                 if (size > 0) {
128                     *outAshmemSize += size;
129                 }
130             }
131             return;
132         }
133     }
134 
135     ALOGD("Invalid object type 0x%08x", obj.hdr.type);
136 }
137 
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)138 void acquire_object(const sp<ProcessState>& proc,
139     const flat_binder_object& obj, const void* who)
140 {
141     acquire_object(proc, obj, who, nullptr);
142 }
143 
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)144 static void release_object(const sp<ProcessState>& proc,
145     const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
146 {
147     switch (obj.hdr.type) {
148         case BINDER_TYPE_BINDER:
149             if (obj.binder) {
150                 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
151                 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
152             }
153             return;
154         case BINDER_TYPE_WEAK_BINDER:
155             if (obj.binder)
156                 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
157             return;
158         case BINDER_TYPE_HANDLE: {
159             const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
160             if (b != nullptr) {
161                 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
162                 b->decStrong(who);
163             }
164             return;
165         }
166         case BINDER_TYPE_WEAK_HANDLE: {
167             const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
168             if (b != nullptr) b.get_refs()->decWeak(who);
169             return;
170         }
171         case BINDER_TYPE_FD: {
172             if (obj.cookie != 0) { // owned
173                 if ((outAshmemSize != nullptr) && ashmem_valid(obj.handle)) {
174                     int size = ashmem_get_size_region(obj.handle);
175                     if (size > 0) {
176                         // ashmem size might have changed since last time it was accounted for, e.g.
177                         // in acquire_object(). Value of *outAshmemSize is not critical since we are
178                         // releasing the object anyway. Check for integer overflow condition.
179                         *outAshmemSize -= std::min(*outAshmemSize, static_cast<size_t>(size));
180                     }
181                 }
182 
183                 close(obj.handle);
184             }
185             return;
186         }
187     }
188 
189     ALOGE("Invalid object type 0x%08x", obj.hdr.type);
190 }
191 
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)192 void release_object(const sp<ProcessState>& proc,
193     const flat_binder_object& obj, const void* who)
194 {
195     release_object(proc, obj, who, nullptr);
196 }
197 
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)198 inline static status_t finish_flatten_binder(
199     const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
200 {
201     return out->writeObject(flat, false);
202 }
203 
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)204 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
205     const sp<IBinder>& binder, Parcel* out)
206 {
207     flat_binder_object obj;
208 
209     if (IPCThreadState::self()->backgroundSchedulingDisabled()) {
210         /* minimum priority for all nodes is nice 0 */
211         obj.flags = FLAT_BINDER_FLAG_ACCEPTS_FDS;
212     } else {
213         /* minimum priority for all nodes is MAX_NICE(19) */
214         obj.flags = 0x13 | FLAT_BINDER_FLAG_ACCEPTS_FDS;
215     }
216 
217     if (binder != nullptr) {
218         BBinder *local = binder->localBinder();
219         if (!local) {
220             BpBinder *proxy = binder->remoteBinder();
221             if (proxy == nullptr) {
222                 ALOGE("null proxy");
223             }
224             const int32_t handle = proxy ? proxy->handle() : 0;
225             obj.hdr.type = BINDER_TYPE_HANDLE;
226             obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
227             obj.handle = handle;
228             obj.cookie = 0;
229         } else {
230             if (local->isRequestingSid()) {
231                 obj.flags |= FLAT_BINDER_FLAG_TXN_SECURITY_CTX;
232             }
233             obj.hdr.type = BINDER_TYPE_BINDER;
234             obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
235             obj.cookie = reinterpret_cast<uintptr_t>(local);
236         }
237     } else {
238         obj.hdr.type = BINDER_TYPE_BINDER;
239         obj.binder = 0;
240         obj.cookie = 0;
241     }
242 
243     return finish_flatten_binder(binder, obj, out);
244 }
245 
flatten_binder(const sp<ProcessState> &,const wp<IBinder> & binder,Parcel * out)246 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
247     const wp<IBinder>& binder, Parcel* out)
248 {
249     flat_binder_object obj;
250 
251     obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
252     if (binder != nullptr) {
253         sp<IBinder> real = binder.promote();
254         if (real != nullptr) {
255             IBinder *local = real->localBinder();
256             if (!local) {
257                 BpBinder *proxy = real->remoteBinder();
258                 if (proxy == nullptr) {
259                     ALOGE("null proxy");
260                 }
261                 const int32_t handle = proxy ? proxy->handle() : 0;
262                 obj.hdr.type = BINDER_TYPE_WEAK_HANDLE;
263                 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
264                 obj.handle = handle;
265                 obj.cookie = 0;
266             } else {
267                 obj.hdr.type = BINDER_TYPE_WEAK_BINDER;
268                 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
269                 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
270             }
271             return finish_flatten_binder(real, obj, out);
272         }
273 
274         // XXX How to deal?  In order to flatten the given binder,
275         // we need to probe it for information, which requires a primary
276         // reference...  but we don't have one.
277         //
278         // The OpenBinder implementation uses a dynamic_cast<> here,
279         // but we can't do that with the different reference counting
280         // implementation we are using.
281         ALOGE("Unable to unflatten Binder weak reference!");
282         obj.hdr.type = BINDER_TYPE_BINDER;
283         obj.binder = 0;
284         obj.cookie = 0;
285         return finish_flatten_binder(nullptr, obj, out);
286 
287     } else {
288         obj.hdr.type = BINDER_TYPE_BINDER;
289         obj.binder = 0;
290         obj.cookie = 0;
291         return finish_flatten_binder(nullptr, obj, out);
292     }
293 }
294 
finish_unflatten_binder(BpBinder *,const flat_binder_object &,const Parcel &)295 inline static status_t finish_unflatten_binder(
296     BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
297     const Parcel& /*in*/)
298 {
299     return NO_ERROR;
300 }
301 
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)302 status_t unflatten_binder(const sp<ProcessState>& proc,
303     const Parcel& in, sp<IBinder>* out)
304 {
305     const flat_binder_object* flat = in.readObject(false);
306 
307     if (flat) {
308         switch (flat->hdr.type) {
309             case BINDER_TYPE_BINDER:
310                 *out = reinterpret_cast<IBinder*>(flat->cookie);
311                 return finish_unflatten_binder(nullptr, *flat, in);
312             case BINDER_TYPE_HANDLE:
313                 *out = proc->getStrongProxyForHandle(flat->handle);
314                 return finish_unflatten_binder(
315                     static_cast<BpBinder*>(out->get()), *flat, in);
316         }
317     }
318     return BAD_TYPE;
319 }
320 
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)321 status_t unflatten_binder(const sp<ProcessState>& proc,
322     const Parcel& in, wp<IBinder>* out)
323 {
324     const flat_binder_object* flat = in.readObject(false);
325 
326     if (flat) {
327         switch (flat->hdr.type) {
328             case BINDER_TYPE_BINDER:
329                 *out = reinterpret_cast<IBinder*>(flat->cookie);
330                 return finish_unflatten_binder(nullptr, *flat, in);
331             case BINDER_TYPE_WEAK_BINDER:
332                 if (flat->binder != 0) {
333                     out->set_object_and_refs(
334                         reinterpret_cast<IBinder*>(flat->cookie),
335                         reinterpret_cast<RefBase::weakref_type*>(flat->binder));
336                 } else {
337                     *out = nullptr;
338                 }
339                 return finish_unflatten_binder(nullptr, *flat, in);
340             case BINDER_TYPE_HANDLE:
341             case BINDER_TYPE_WEAK_HANDLE:
342                 *out = proc->getWeakProxyForHandle(flat->handle);
343                 return finish_unflatten_binder(
344                     static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
345         }
346     }
347     return BAD_TYPE;
348 }
349 
350 // ---------------------------------------------------------------------------
351 
Parcel()352 Parcel::Parcel()
353 {
354     LOG_ALLOC("Parcel %p: constructing", this);
355     initState();
356 }
357 
~Parcel()358 Parcel::~Parcel()
359 {
360     freeDataNoInit();
361     LOG_ALLOC("Parcel %p: destroyed", this);
362 }
363 
getGlobalAllocSize()364 size_t Parcel::getGlobalAllocSize() {
365     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
366     size_t size = gParcelGlobalAllocSize;
367     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
368     return size;
369 }
370 
getGlobalAllocCount()371 size_t Parcel::getGlobalAllocCount() {
372     pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
373     size_t count = gParcelGlobalAllocCount;
374     pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
375     return count;
376 }
377 
data() const378 const uint8_t* Parcel::data() const
379 {
380     return mData;
381 }
382 
dataSize() const383 size_t Parcel::dataSize() const
384 {
385     return (mDataSize > mDataPos ? mDataSize : mDataPos);
386 }
387 
dataAvail() const388 size_t Parcel::dataAvail() const
389 {
390     size_t result = dataSize() - dataPosition();
391     if (result > INT32_MAX) {
392         abort();
393     }
394     return result;
395 }
396 
dataPosition() const397 size_t Parcel::dataPosition() const
398 {
399     return mDataPos;
400 }
401 
dataCapacity() const402 size_t Parcel::dataCapacity() const
403 {
404     return mDataCapacity;
405 }
406 
setDataSize(size_t size)407 status_t Parcel::setDataSize(size_t size)
408 {
409     if (size > INT32_MAX) {
410         // don't accept size_t values which may have come from an
411         // inadvertent conversion from a negative int.
412         return BAD_VALUE;
413     }
414 
415     status_t err;
416     err = continueWrite(size);
417     if (err == NO_ERROR) {
418         mDataSize = size;
419         ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
420     }
421     return err;
422 }
423 
setDataPosition(size_t pos) const424 void Parcel::setDataPosition(size_t pos) const
425 {
426     if (pos > INT32_MAX) {
427         // don't accept size_t values which may have come from an
428         // inadvertent conversion from a negative int.
429         abort();
430     }
431 
432     mDataPos = pos;
433     mNextObjectHint = 0;
434     mObjectsSorted = false;
435 }
436 
setDataCapacity(size_t size)437 status_t Parcel::setDataCapacity(size_t size)
438 {
439     if (size > INT32_MAX) {
440         // don't accept size_t values which may have come from an
441         // inadvertent conversion from a negative int.
442         return BAD_VALUE;
443     }
444 
445     if (size > mDataCapacity) return continueWrite(size);
446     return NO_ERROR;
447 }
448 
setData(const uint8_t * buffer,size_t len)449 status_t Parcel::setData(const uint8_t* buffer, size_t len)
450 {
451     if (len > INT32_MAX) {
452         // don't accept size_t values which may have come from an
453         // inadvertent conversion from a negative int.
454         return BAD_VALUE;
455     }
456 
457     status_t err = restartWrite(len);
458     if (err == NO_ERROR) {
459         memcpy(const_cast<uint8_t*>(data()), buffer, len);
460         mDataSize = len;
461         mFdsKnown = false;
462     }
463     return err;
464 }
465 
appendFrom(const Parcel * parcel,size_t offset,size_t len)466 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
467 {
468     status_t err;
469     const uint8_t *data = parcel->mData;
470     const binder_size_t *objects = parcel->mObjects;
471     size_t size = parcel->mObjectsSize;
472     int startPos = mDataPos;
473     int firstIndex = -1, lastIndex = -2;
474 
475     if (len == 0) {
476         return NO_ERROR;
477     }
478 
479     if (len > INT32_MAX) {
480         // don't accept size_t values which may have come from an
481         // inadvertent conversion from a negative int.
482         return BAD_VALUE;
483     }
484 
485     // range checks against the source parcel size
486     if ((offset > parcel->mDataSize)
487             || (len > parcel->mDataSize)
488             || (offset + len > parcel->mDataSize)) {
489         return BAD_VALUE;
490     }
491 
492     // Count objects in range
493     for (int i = 0; i < (int) size; i++) {
494         size_t off = objects[i];
495         if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
496             if (firstIndex == -1) {
497                 firstIndex = i;
498             }
499             lastIndex = i;
500         }
501     }
502     int numObjects = lastIndex - firstIndex + 1;
503 
504     if ((mDataSize+len) > mDataCapacity) {
505         // grow data
506         err = growData(len);
507         if (err != NO_ERROR) {
508             return err;
509         }
510     }
511 
512     // append data
513     memcpy(mData + mDataPos, data + offset, len);
514     mDataPos += len;
515     mDataSize += len;
516 
517     err = NO_ERROR;
518 
519     if (numObjects > 0) {
520         const sp<ProcessState> proc(ProcessState::self());
521         // grow objects
522         if (mObjectsCapacity < mObjectsSize + numObjects) {
523             size_t newSize = ((mObjectsSize + numObjects)*3)/2;
524             if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
525             binder_size_t *objects =
526                 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
527             if (objects == (binder_size_t*)nullptr) {
528                 return NO_MEMORY;
529             }
530             mObjects = objects;
531             mObjectsCapacity = newSize;
532         }
533 
534         // append and acquire objects
535         int idx = mObjectsSize;
536         for (int i = firstIndex; i <= lastIndex; i++) {
537             size_t off = objects[i] - offset + startPos;
538             mObjects[idx++] = off;
539             mObjectsSize++;
540 
541             flat_binder_object* flat
542                 = reinterpret_cast<flat_binder_object*>(mData + off);
543             acquire_object(proc, *flat, this, &mOpenAshmemSize);
544 
545             if (flat->hdr.type == BINDER_TYPE_FD) {
546                 // If this is a file descriptor, we need to dup it so the
547                 // new Parcel now owns its own fd, and can declare that we
548                 // officially know we have fds.
549                 flat->handle = fcntl(flat->handle, F_DUPFD_CLOEXEC, 0);
550                 flat->cookie = 1;
551                 mHasFds = mFdsKnown = true;
552                 if (!mAllowFds) {
553                     err = FDS_NOT_ALLOWED;
554                 }
555             }
556         }
557     }
558 
559     return err;
560 }
561 
compareData(const Parcel & other)562 int Parcel::compareData(const Parcel& other) {
563     size_t size = dataSize();
564     if (size != other.dataSize()) {
565         return size < other.dataSize() ? -1 : 1;
566     }
567     return memcmp(data(), other.data(), size);
568 }
569 
allowFds() const570 bool Parcel::allowFds() const
571 {
572     return mAllowFds;
573 }
574 
pushAllowFds(bool allowFds)575 bool Parcel::pushAllowFds(bool allowFds)
576 {
577     const bool origValue = mAllowFds;
578     if (!allowFds) {
579         mAllowFds = false;
580     }
581     return origValue;
582 }
583 
restoreAllowFds(bool lastValue)584 void Parcel::restoreAllowFds(bool lastValue)
585 {
586     mAllowFds = lastValue;
587 }
588 
hasFileDescriptors() const589 bool Parcel::hasFileDescriptors() const
590 {
591     if (!mFdsKnown) {
592         scanForFds();
593     }
594     return mHasFds;
595 }
596 
updateWorkSourceRequestHeaderPosition() const597 void Parcel::updateWorkSourceRequestHeaderPosition() const {
598     // Only update the request headers once. We only want to point
599     // to the first headers read/written.
600     if (!mRequestHeaderPresent) {
601         mWorkSourceRequestHeaderPosition = dataPosition();
602         mRequestHeaderPresent = true;
603     }
604 }
605 
606 // Write RPC headers.  (previously just the interface token)
writeInterfaceToken(const String16 & interface)607 status_t Parcel::writeInterfaceToken(const String16& interface)
608 {
609     const IPCThreadState* threadState = IPCThreadState::self();
610     writeInt32(threadState->getStrictModePolicy() | STRICT_MODE_PENALTY_GATHER);
611     updateWorkSourceRequestHeaderPosition();
612     writeInt32(threadState->shouldPropagateWorkSource() ?
613             threadState->getCallingWorkSourceUid() : IPCThreadState::kUnsetWorkSource);
614     // currently the interface identification token is just its name as a string
615     return writeString16(interface);
616 }
617 
replaceCallingWorkSourceUid(uid_t uid)618 bool Parcel::replaceCallingWorkSourceUid(uid_t uid)
619 {
620     if (!mRequestHeaderPresent) {
621         return false;
622     }
623 
624     const size_t initialPosition = dataPosition();
625     setDataPosition(mWorkSourceRequestHeaderPosition);
626     status_t err = writeInt32(uid);
627     setDataPosition(initialPosition);
628     return err == NO_ERROR;
629 }
630 
readCallingWorkSourceUid()631 uid_t Parcel::readCallingWorkSourceUid()
632 {
633     if (!mRequestHeaderPresent) {
634         return IPCThreadState::kUnsetWorkSource;
635     }
636 
637     const size_t initialPosition = dataPosition();
638     setDataPosition(mWorkSourceRequestHeaderPosition);
639     uid_t uid = readInt32();
640     setDataPosition(initialPosition);
641     return uid;
642 }
643 
checkInterface(IBinder * binder) const644 bool Parcel::checkInterface(IBinder* binder) const
645 {
646     return enforceInterface(binder->getInterfaceDescriptor());
647 }
648 
enforceInterface(const String16 & interface,IPCThreadState * threadState) const649 bool Parcel::enforceInterface(const String16& interface,
650                               IPCThreadState* threadState) const
651 {
652     // StrictModePolicy.
653     int32_t strictPolicy = readInt32();
654     if (threadState == nullptr) {
655         threadState = IPCThreadState::self();
656     }
657     if ((threadState->getLastTransactionBinderFlags() &
658          IBinder::FLAG_ONEWAY) != 0) {
659       // For one-way calls, the callee is running entirely
660       // disconnected from the caller, so disable StrictMode entirely.
661       // Not only does disk/network usage not impact the caller, but
662       // there's no way to commuicate back any violations anyway.
663       threadState->setStrictModePolicy(0);
664     } else {
665       threadState->setStrictModePolicy(strictPolicy);
666     }
667     // WorkSource.
668     updateWorkSourceRequestHeaderPosition();
669     int32_t workSource = readInt32();
670     threadState->setCallingWorkSourceUidWithoutPropagation(workSource);
671     // Interface descriptor.
672     const String16 str(readString16());
673     if (str == interface) {
674         return true;
675     } else {
676         ALOGW("**** enforceInterface() expected '%s' but read '%s'",
677                 String8(interface).string(), String8(str).string());
678         return false;
679     }
680 }
681 
objects() const682 const binder_size_t* Parcel::objects() const
683 {
684     return mObjects;
685 }
686 
objectsCount() const687 size_t Parcel::objectsCount() const
688 {
689     return mObjectsSize;
690 }
691 
errorCheck() const692 status_t Parcel::errorCheck() const
693 {
694     return mError;
695 }
696 
setError(status_t err)697 void Parcel::setError(status_t err)
698 {
699     mError = err;
700 }
701 
finishWrite(size_t len)702 status_t Parcel::finishWrite(size_t len)
703 {
704     if (len > INT32_MAX) {
705         // don't accept size_t values which may have come from an
706         // inadvertent conversion from a negative int.
707         return BAD_VALUE;
708     }
709 
710     //printf("Finish write of %d\n", len);
711     mDataPos += len;
712     ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
713     if (mDataPos > mDataSize) {
714         mDataSize = mDataPos;
715         ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
716     }
717     //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
718     return NO_ERROR;
719 }
720 
writeUnpadded(const void * data,size_t len)721 status_t Parcel::writeUnpadded(const void* data, size_t len)
722 {
723     if (len > INT32_MAX) {
724         // don't accept size_t values which may have come from an
725         // inadvertent conversion from a negative int.
726         return BAD_VALUE;
727     }
728 
729     size_t end = mDataPos + len;
730     if (end < mDataPos) {
731         // integer overflow
732         return BAD_VALUE;
733     }
734 
735     if (end <= mDataCapacity) {
736 restart_write:
737         memcpy(mData+mDataPos, data, len);
738         return finishWrite(len);
739     }
740 
741     status_t err = growData(len);
742     if (err == NO_ERROR) goto restart_write;
743     return err;
744 }
745 
write(const void * data,size_t len)746 status_t Parcel::write(const void* data, size_t len)
747 {
748     if (len > INT32_MAX) {
749         // don't accept size_t values which may have come from an
750         // inadvertent conversion from a negative int.
751         return BAD_VALUE;
752     }
753 
754     void* const d = writeInplace(len);
755     if (d) {
756         memcpy(d, data, len);
757         return NO_ERROR;
758     }
759     return mError;
760 }
761 
writeInplace(size_t len)762 void* Parcel::writeInplace(size_t len)
763 {
764     if (len > INT32_MAX) {
765         // don't accept size_t values which may have come from an
766         // inadvertent conversion from a negative int.
767         return nullptr;
768     }
769 
770     const size_t padded = pad_size(len);
771 
772     // sanity check for integer overflow
773     if (mDataPos+padded < mDataPos) {
774         return nullptr;
775     }
776 
777     if ((mDataPos+padded) <= mDataCapacity) {
778 restart_write:
779         //printf("Writing %ld bytes, padded to %ld\n", len, padded);
780         uint8_t* const data = mData+mDataPos;
781 
782         // Need to pad at end?
783         if (padded != len) {
784 #if BYTE_ORDER == BIG_ENDIAN
785             static const uint32_t mask[4] = {
786                 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
787             };
788 #endif
789 #if BYTE_ORDER == LITTLE_ENDIAN
790             static const uint32_t mask[4] = {
791                 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
792             };
793 #endif
794             //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
795             //    *reinterpret_cast<void**>(data+padded-4));
796             *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
797         }
798 
799         finishWrite(padded);
800         return data;
801     }
802 
803     status_t err = growData(padded);
804     if (err == NO_ERROR) goto restart_write;
805     return nullptr;
806 }
807 
writeUtf8AsUtf16(const std::string & str)808 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
809     const uint8_t* strData = (uint8_t*)str.data();
810     const size_t strLen= str.length();
811     const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
812     if (utf16Len < 0 || utf16Len > std::numeric_limits<int32_t>::max()) {
813         return BAD_VALUE;
814     }
815 
816     status_t err = writeInt32(utf16Len);
817     if (err) {
818         return err;
819     }
820 
821     // Allocate enough bytes to hold our converted string and its terminating NULL.
822     void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
823     if (!dst) {
824         return NO_MEMORY;
825     }
826 
827     utf8_to_utf16(strData, strLen, (char16_t*)dst, (size_t) utf16Len + 1);
828 
829     return NO_ERROR;
830 }
831 
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)832 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
833   if (!str) {
834     return writeInt32(-1);
835   }
836   return writeUtf8AsUtf16(*str);
837 }
838 
839 namespace {
840 
841 template<typename T>
writeByteVectorInternal(Parcel * parcel,const std::vector<T> & val)842 status_t writeByteVectorInternal(Parcel* parcel, const std::vector<T>& val)
843 {
844     status_t status;
845     if (val.size() > std::numeric_limits<int32_t>::max()) {
846         status = BAD_VALUE;
847         return status;
848     }
849 
850     status = parcel->writeInt32(val.size());
851     if (status != OK) {
852         return status;
853     }
854 
855     void* data = parcel->writeInplace(val.size());
856     if (!data) {
857         status = BAD_VALUE;
858         return status;
859     }
860 
861     memcpy(data, val.data(), val.size());
862     return status;
863 }
864 
865 template<typename T>
writeByteVectorInternalPtr(Parcel * parcel,const std::unique_ptr<std::vector<T>> & val)866 status_t writeByteVectorInternalPtr(Parcel* parcel,
867                                     const std::unique_ptr<std::vector<T>>& val)
868 {
869     if (!val) {
870         return parcel->writeInt32(-1);
871     }
872 
873     return writeByteVectorInternal(parcel, *val);
874 }
875 
876 }  // namespace
877 
writeByteVector(const std::vector<int8_t> & val)878 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
879     return writeByteVectorInternal(this, val);
880 }
881 
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)882 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
883 {
884     return writeByteVectorInternalPtr(this, val);
885 }
886 
writeByteVector(const std::vector<uint8_t> & val)887 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
888     return writeByteVectorInternal(this, val);
889 }
890 
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)891 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
892 {
893     return writeByteVectorInternalPtr(this, val);
894 }
895 
writeInt32Vector(const std::vector<int32_t> & val)896 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
897 {
898     return writeTypedVector(val, &Parcel::writeInt32);
899 }
900 
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)901 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
902 {
903     return writeNullableTypedVector(val, &Parcel::writeInt32);
904 }
905 
writeInt64Vector(const std::vector<int64_t> & val)906 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
907 {
908     return writeTypedVector(val, &Parcel::writeInt64);
909 }
910 
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)911 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
912 {
913     return writeNullableTypedVector(val, &Parcel::writeInt64);
914 }
915 
writeUint64Vector(const std::vector<uint64_t> & val)916 status_t Parcel::writeUint64Vector(const std::vector<uint64_t>& val)
917 {
918     return writeTypedVector(val, &Parcel::writeUint64);
919 }
920 
writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>> & val)921 status_t Parcel::writeUint64Vector(const std::unique_ptr<std::vector<uint64_t>>& val)
922 {
923     return writeNullableTypedVector(val, &Parcel::writeUint64);
924 }
925 
writeFloatVector(const std::vector<float> & val)926 status_t Parcel::writeFloatVector(const std::vector<float>& val)
927 {
928     return writeTypedVector(val, &Parcel::writeFloat);
929 }
930 
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)931 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
932 {
933     return writeNullableTypedVector(val, &Parcel::writeFloat);
934 }
935 
writeDoubleVector(const std::vector<double> & val)936 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
937 {
938     return writeTypedVector(val, &Parcel::writeDouble);
939 }
940 
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)941 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
942 {
943     return writeNullableTypedVector(val, &Parcel::writeDouble);
944 }
945 
writeBoolVector(const std::vector<bool> & val)946 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
947 {
948     return writeTypedVector(val, &Parcel::writeBool);
949 }
950 
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)951 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
952 {
953     return writeNullableTypedVector(val, &Parcel::writeBool);
954 }
955 
writeCharVector(const std::vector<char16_t> & val)956 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
957 {
958     return writeTypedVector(val, &Parcel::writeChar);
959 }
960 
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)961 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
962 {
963     return writeNullableTypedVector(val, &Parcel::writeChar);
964 }
965 
writeString16Vector(const std::vector<String16> & val)966 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
967 {
968     return writeTypedVector(val, &Parcel::writeString16);
969 }
970 
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)971 status_t Parcel::writeString16Vector(
972         const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
973 {
974     return writeNullableTypedVector(val, &Parcel::writeString16);
975 }
976 
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)977 status_t Parcel::writeUtf8VectorAsUtf16Vector(
978                         const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
979     return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
980 }
981 
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)982 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
983     return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
984 }
985 
writeInt32(int32_t val)986 status_t Parcel::writeInt32(int32_t val)
987 {
988     return writeAligned(val);
989 }
990 
writeUint32(uint32_t val)991 status_t Parcel::writeUint32(uint32_t val)
992 {
993     return writeAligned(val);
994 }
995 
writeInt32Array(size_t len,const int32_t * val)996 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
997     if (len > INT32_MAX) {
998         // don't accept size_t values which may have come from an
999         // inadvertent conversion from a negative int.
1000         return BAD_VALUE;
1001     }
1002 
1003     if (!val) {
1004         return writeInt32(-1);
1005     }
1006     status_t ret = writeInt32(static_cast<uint32_t>(len));
1007     if (ret == NO_ERROR) {
1008         ret = write(val, len * sizeof(*val));
1009     }
1010     return ret;
1011 }
writeByteArray(size_t len,const uint8_t * val)1012 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
1013     if (len > INT32_MAX) {
1014         // don't accept size_t values which may have come from an
1015         // inadvertent conversion from a negative int.
1016         return BAD_VALUE;
1017     }
1018 
1019     if (!val) {
1020         return writeInt32(-1);
1021     }
1022     status_t ret = writeInt32(static_cast<uint32_t>(len));
1023     if (ret == NO_ERROR) {
1024         ret = write(val, len * sizeof(*val));
1025     }
1026     return ret;
1027 }
1028 
writeBool(bool val)1029 status_t Parcel::writeBool(bool val)
1030 {
1031     return writeInt32(int32_t(val));
1032 }
1033 
writeChar(char16_t val)1034 status_t Parcel::writeChar(char16_t val)
1035 {
1036     return writeInt32(int32_t(val));
1037 }
1038 
writeByte(int8_t val)1039 status_t Parcel::writeByte(int8_t val)
1040 {
1041     return writeInt32(int32_t(val));
1042 }
1043 
writeInt64(int64_t val)1044 status_t Parcel::writeInt64(int64_t val)
1045 {
1046     return writeAligned(val);
1047 }
1048 
writeUint64(uint64_t val)1049 status_t Parcel::writeUint64(uint64_t val)
1050 {
1051     return writeAligned(val);
1052 }
1053 
writePointer(uintptr_t val)1054 status_t Parcel::writePointer(uintptr_t val)
1055 {
1056     return writeAligned<binder_uintptr_t>(val);
1057 }
1058 
writeFloat(float val)1059 status_t Parcel::writeFloat(float val)
1060 {
1061     return writeAligned(val);
1062 }
1063 
1064 #if defined(__mips__) && defined(__mips_hard_float)
1065 
writeDouble(double val)1066 status_t Parcel::writeDouble(double val)
1067 {
1068     union {
1069         double d;
1070         unsigned long long ll;
1071     } u;
1072     u.d = val;
1073     return writeAligned(u.ll);
1074 }
1075 
1076 #else
1077 
writeDouble(double val)1078 status_t Parcel::writeDouble(double val)
1079 {
1080     return writeAligned(val);
1081 }
1082 
1083 #endif
1084 
writeCString(const char * str)1085 status_t Parcel::writeCString(const char* str)
1086 {
1087     return write(str, strlen(str)+1);
1088 }
1089 
writeString8(const String8 & str)1090 status_t Parcel::writeString8(const String8& str)
1091 {
1092     status_t err = writeInt32(str.bytes());
1093     // only write string if its length is more than zero characters,
1094     // as readString8 will only read if the length field is non-zero.
1095     // this is slightly different from how writeString16 works.
1096     if (str.bytes() > 0 && err == NO_ERROR) {
1097         err = write(str.string(), str.bytes()+1);
1098     }
1099     return err;
1100 }
1101 
writeString16(const std::unique_ptr<String16> & str)1102 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1103 {
1104     if (!str) {
1105         return writeInt32(-1);
1106     }
1107 
1108     return writeString16(*str);
1109 }
1110 
writeString16(const String16 & str)1111 status_t Parcel::writeString16(const String16& str)
1112 {
1113     return writeString16(str.string(), str.size());
1114 }
1115 
writeString16(const char16_t * str,size_t len)1116 status_t Parcel::writeString16(const char16_t* str, size_t len)
1117 {
1118     if (str == nullptr) return writeInt32(-1);
1119 
1120     status_t err = writeInt32(len);
1121     if (err == NO_ERROR) {
1122         len *= sizeof(char16_t);
1123         uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1124         if (data) {
1125             memcpy(data, str, len);
1126             *reinterpret_cast<char16_t*>(data+len) = 0;
1127             return NO_ERROR;
1128         }
1129         err = mError;
1130     }
1131     return err;
1132 }
1133 
writeStrongBinder(const sp<IBinder> & val)1134 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1135 {
1136     return flatten_binder(ProcessState::self(), val, this);
1137 }
1138 
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1139 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1140 {
1141     return writeTypedVector(val, &Parcel::writeStrongBinder);
1142 }
1143 
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1144 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1145 {
1146     return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1147 }
1148 
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1149 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1150     return readNullableTypedVector(val, &Parcel::readNullableStrongBinder);
1151 }
1152 
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1153 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1154     return readTypedVector(val, &Parcel::readStrongBinder);
1155 }
1156 
writeWeakBinder(const wp<IBinder> & val)1157 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
1158 {
1159     return flatten_binder(ProcessState::self(), val, this);
1160 }
1161 
writeRawNullableParcelable(const Parcelable * parcelable)1162 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1163     if (!parcelable) {
1164         return writeInt32(0);
1165     }
1166 
1167     return writeParcelable(*parcelable);
1168 }
1169 
writeParcelable(const Parcelable & parcelable)1170 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1171     status_t status = writeInt32(1);  // parcelable is not null.
1172     if (status != OK) {
1173         return status;
1174     }
1175     return parcelable.writeToParcel(this);
1176 }
1177 
writeValue(const binder::Value & value)1178 status_t Parcel::writeValue(const binder::Value& value) {
1179     return value.writeToParcel(this);
1180 }
1181 
writeNativeHandle(const native_handle * handle)1182 status_t Parcel::writeNativeHandle(const native_handle* handle)
1183 {
1184     if (!handle || handle->version != sizeof(native_handle))
1185         return BAD_TYPE;
1186 
1187     status_t err;
1188     err = writeInt32(handle->numFds);
1189     if (err != NO_ERROR) return err;
1190 
1191     err = writeInt32(handle->numInts);
1192     if (err != NO_ERROR) return err;
1193 
1194     for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1195         err = writeDupFileDescriptor(handle->data[i]);
1196 
1197     if (err != NO_ERROR) {
1198         ALOGD("write native handle, write dup fd failed");
1199         return err;
1200     }
1201     err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1202     return err;
1203 }
1204 
writeFileDescriptor(int fd,bool takeOwnership)1205 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1206 {
1207     flat_binder_object obj;
1208     obj.hdr.type = BINDER_TYPE_FD;
1209     obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1210     obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1211     obj.handle = fd;
1212     obj.cookie = takeOwnership ? 1 : 0;
1213     return writeObject(obj, true);
1214 }
1215 
writeDupFileDescriptor(int fd)1216 status_t Parcel::writeDupFileDescriptor(int fd)
1217 {
1218     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1219     if (dupFd < 0) {
1220         return -errno;
1221     }
1222     status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1223     if (err != OK) {
1224         close(dupFd);
1225     }
1226     return err;
1227 }
1228 
writeParcelFileDescriptor(int fd,bool takeOwnership)1229 status_t Parcel::writeParcelFileDescriptor(int fd, bool takeOwnership)
1230 {
1231     writeInt32(0);
1232     return writeFileDescriptor(fd, takeOwnership);
1233 }
1234 
writeDupParcelFileDescriptor(int fd)1235 status_t Parcel::writeDupParcelFileDescriptor(int fd)
1236 {
1237     int dupFd = fcntl(fd, F_DUPFD_CLOEXEC, 0);
1238     if (dupFd < 0) {
1239         return -errno;
1240     }
1241     status_t err = writeParcelFileDescriptor(dupFd, true /*takeOwnership*/);
1242     if (err != OK) {
1243         close(dupFd);
1244     }
1245     return err;
1246 }
1247 
writeUniqueFileDescriptor(const base::unique_fd & fd)1248 status_t Parcel::writeUniqueFileDescriptor(const base::unique_fd& fd) {
1249     return writeDupFileDescriptor(fd.get());
1250 }
1251 
writeUniqueFileDescriptorVector(const std::vector<base::unique_fd> & val)1252 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<base::unique_fd>& val) {
1253     return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1254 }
1255 
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>> & val)1256 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<base::unique_fd>>& val) {
1257     return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1258 }
1259 
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1260 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1261 {
1262     if (len > INT32_MAX) {
1263         // don't accept size_t values which may have come from an
1264         // inadvertent conversion from a negative int.
1265         return BAD_VALUE;
1266     }
1267 
1268     status_t status;
1269     if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1270         ALOGV("writeBlob: write in place");
1271         status = writeInt32(BLOB_INPLACE);
1272         if (status) return status;
1273 
1274         void* ptr = writeInplace(len);
1275         if (!ptr) return NO_MEMORY;
1276 
1277         outBlob->init(-1, ptr, len, false);
1278         return NO_ERROR;
1279     }
1280 
1281     ALOGV("writeBlob: write to ashmem");
1282     int fd = ashmem_create_region("Parcel Blob", len);
1283     if (fd < 0) return NO_MEMORY;
1284 
1285     int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1286     if (result < 0) {
1287         status = result;
1288     } else {
1289         void* ptr = ::mmap(nullptr, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1290         if (ptr == MAP_FAILED) {
1291             status = -errno;
1292         } else {
1293             if (!mutableCopy) {
1294                 result = ashmem_set_prot_region(fd, PROT_READ);
1295             }
1296             if (result < 0) {
1297                 status = result;
1298             } else {
1299                 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1300                 if (!status) {
1301                     status = writeFileDescriptor(fd, true /*takeOwnership*/);
1302                     if (!status) {
1303                         outBlob->init(fd, ptr, len, mutableCopy);
1304                         return NO_ERROR;
1305                     }
1306                 }
1307             }
1308         }
1309         ::munmap(ptr, len);
1310     }
1311     ::close(fd);
1312     return status;
1313 }
1314 
writeDupImmutableBlobFileDescriptor(int fd)1315 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1316 {
1317     // Must match up with what's done in writeBlob.
1318     if (!mAllowFds) return FDS_NOT_ALLOWED;
1319     status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1320     if (status) return status;
1321     return writeDupFileDescriptor(fd);
1322 }
1323 
write(const FlattenableHelperInterface & val)1324 status_t Parcel::write(const FlattenableHelperInterface& val)
1325 {
1326     status_t err;
1327 
1328     // size if needed
1329     const size_t len = val.getFlattenedSize();
1330     const size_t fd_count = val.getFdCount();
1331 
1332     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1333         // don't accept size_t values which may have come from an
1334         // inadvertent conversion from a negative int.
1335         return BAD_VALUE;
1336     }
1337 
1338     err = this->writeInt32(len);
1339     if (err) return err;
1340 
1341     err = this->writeInt32(fd_count);
1342     if (err) return err;
1343 
1344     // payload
1345     void* const buf = this->writeInplace(len);
1346     if (buf == nullptr)
1347         return BAD_VALUE;
1348 
1349     int* fds = nullptr;
1350     if (fd_count) {
1351         fds = new (std::nothrow) int[fd_count];
1352         if (fds == nullptr) {
1353             ALOGE("write: failed to allocate requested %zu fds", fd_count);
1354             return BAD_VALUE;
1355         }
1356     }
1357 
1358     err = val.flatten(buf, len, fds, fd_count);
1359     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1360         err = this->writeDupFileDescriptor( fds[i] );
1361     }
1362 
1363     if (fd_count) {
1364         delete [] fds;
1365     }
1366 
1367     return err;
1368 }
1369 
writeObject(const flat_binder_object & val,bool nullMetaData)1370 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1371 {
1372     const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1373     const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1374     if (enoughData && enoughObjects) {
1375 restart_write:
1376         *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1377 
1378         // remember if it's a file descriptor
1379         if (val.hdr.type == BINDER_TYPE_FD) {
1380             if (!mAllowFds) {
1381                 // fail before modifying our object index
1382                 return FDS_NOT_ALLOWED;
1383             }
1384             mHasFds = mFdsKnown = true;
1385         }
1386 
1387         // Need to write meta-data?
1388         if (nullMetaData || val.binder != 0) {
1389             mObjects[mObjectsSize] = mDataPos;
1390             acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1391             mObjectsSize++;
1392         }
1393 
1394         return finishWrite(sizeof(flat_binder_object));
1395     }
1396 
1397     if (!enoughData) {
1398         const status_t err = growData(sizeof(val));
1399         if (err != NO_ERROR) return err;
1400     }
1401     if (!enoughObjects) {
1402         size_t newSize = ((mObjectsSize+2)*3)/2;
1403         if (newSize*sizeof(binder_size_t) < mObjectsSize) return NO_MEMORY;   // overflow
1404         binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1405         if (objects == nullptr) return NO_MEMORY;
1406         mObjects = objects;
1407         mObjectsCapacity = newSize;
1408     }
1409 
1410     goto restart_write;
1411 }
1412 
writeNoException()1413 status_t Parcel::writeNoException()
1414 {
1415     binder::Status status;
1416     return status.writeToParcel(this);
1417 }
1418 
writeMap(const::android::binder::Map & map_in)1419 status_t Parcel::writeMap(const ::android::binder::Map& map_in)
1420 {
1421     using ::std::map;
1422     using ::android::binder::Value;
1423     using ::android::binder::Map;
1424 
1425     Map::const_iterator iter;
1426     status_t ret;
1427 
1428     ret = writeInt32(map_in.size());
1429 
1430     if (ret != NO_ERROR) {
1431         return ret;
1432     }
1433 
1434     for (iter = map_in.begin(); iter != map_in.end(); ++iter) {
1435         ret = writeValue(Value(iter->first));
1436         if (ret != NO_ERROR) {
1437             return ret;
1438         }
1439 
1440         ret = writeValue(iter->second);
1441         if (ret != NO_ERROR) {
1442             return ret;
1443         }
1444     }
1445 
1446     return ret;
1447 }
1448 
writeNullableMap(const std::unique_ptr<binder::Map> & map)1449 status_t Parcel::writeNullableMap(const std::unique_ptr<binder::Map>& map)
1450 {
1451     if (map == nullptr) {
1452         return writeInt32(-1);
1453     }
1454 
1455     return writeMap(*map.get());
1456 }
1457 
readMap(::android::binder::Map * map_out) const1458 status_t Parcel::readMap(::android::binder::Map* map_out)const
1459 {
1460     using ::std::map;
1461     using ::android::String16;
1462     using ::android::String8;
1463     using ::android::binder::Value;
1464     using ::android::binder::Map;
1465 
1466     status_t ret = NO_ERROR;
1467     int32_t count;
1468 
1469     ret = readInt32(&count);
1470     if (ret != NO_ERROR) {
1471         return ret;
1472     }
1473 
1474     if (count < 0) {
1475         ALOGE("readMap: Unexpected count: %d", count);
1476         return (count == -1)
1477             ? UNEXPECTED_NULL
1478             : BAD_VALUE;
1479     }
1480 
1481     map_out->clear();
1482 
1483     while (count--) {
1484         Map::key_type key;
1485         Value value;
1486 
1487         ret = readValue(&value);
1488         if (ret != NO_ERROR) {
1489             return ret;
1490         }
1491 
1492         if (!value.getString(&key)) {
1493             ALOGE("readMap: Key type not a string (parcelType = %d)", value.parcelType());
1494             return BAD_VALUE;
1495         }
1496 
1497         ret = readValue(&value);
1498         if (ret != NO_ERROR) {
1499             return ret;
1500         }
1501 
1502         (*map_out)[key] = value;
1503     }
1504 
1505     return ret;
1506 }
1507 
readNullableMap(std::unique_ptr<binder::Map> * map) const1508 status_t Parcel::readNullableMap(std::unique_ptr<binder::Map>* map) const
1509 {
1510     const size_t start = dataPosition();
1511     int32_t count;
1512     status_t status = readInt32(&count);
1513     map->reset();
1514 
1515     if (status != OK || count == -1) {
1516         return status;
1517     }
1518 
1519     setDataPosition(start);
1520     map->reset(new binder::Map());
1521 
1522     status = readMap(map->get());
1523 
1524     if (status != OK) {
1525         map->reset();
1526     }
1527 
1528     return status;
1529 }
1530 
1531 
1532 
remove(size_t,size_t)1533 void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1534 {
1535     LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1536 }
1537 
validateReadData(size_t upperBound) const1538 status_t Parcel::validateReadData(size_t upperBound) const
1539 {
1540     // Don't allow non-object reads on object data
1541     if (mObjectsSorted || mObjectsSize <= 1) {
1542 data_sorted:
1543         // Expect to check only against the next object
1544         if (mNextObjectHint < mObjectsSize && upperBound > mObjects[mNextObjectHint]) {
1545             // For some reason the current read position is greater than the next object
1546             // hint. Iterate until we find the right object
1547             size_t nextObject = mNextObjectHint;
1548             do {
1549                 if (mDataPos < mObjects[nextObject] + sizeof(flat_binder_object)) {
1550                     // Requested info overlaps with an object
1551                     ALOGE("Attempt to read from protected data in Parcel %p", this);
1552                     return PERMISSION_DENIED;
1553                 }
1554                 nextObject++;
1555             } while (nextObject < mObjectsSize && upperBound > mObjects[nextObject]);
1556             mNextObjectHint = nextObject;
1557         }
1558         return NO_ERROR;
1559     }
1560     // Quickly determine if mObjects is sorted.
1561     binder_size_t* currObj = mObjects + mObjectsSize - 1;
1562     binder_size_t* prevObj = currObj;
1563     while (currObj > mObjects) {
1564         prevObj--;
1565         if(*prevObj > *currObj) {
1566             goto data_unsorted;
1567         }
1568         currObj--;
1569     }
1570     mObjectsSorted = true;
1571     goto data_sorted;
1572 
1573 data_unsorted:
1574     // Insertion Sort mObjects
1575     // Great for mostly sorted lists. If randomly sorted or reverse ordered mObjects become common,
1576     // switch to std::sort(mObjects, mObjects + mObjectsSize);
1577     for (binder_size_t* iter0 = mObjects + 1; iter0 < mObjects + mObjectsSize; iter0++) {
1578         binder_size_t temp = *iter0;
1579         binder_size_t* iter1 = iter0 - 1;
1580         while (iter1 >= mObjects && *iter1 > temp) {
1581             *(iter1 + 1) = *iter1;
1582             iter1--;
1583         }
1584         *(iter1 + 1) = temp;
1585     }
1586     mNextObjectHint = 0;
1587     mObjectsSorted = true;
1588     goto data_sorted;
1589 }
1590 
read(void * outData,size_t len) const1591 status_t Parcel::read(void* outData, size_t len) const
1592 {
1593     if (len > INT32_MAX) {
1594         // don't accept size_t values which may have come from an
1595         // inadvertent conversion from a negative int.
1596         return BAD_VALUE;
1597     }
1598 
1599     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1600             && len <= pad_size(len)) {
1601         if (mObjectsSize > 0) {
1602             status_t err = validateReadData(mDataPos + pad_size(len));
1603             if(err != NO_ERROR) {
1604                 // Still increment the data position by the expected length
1605                 mDataPos += pad_size(len);
1606                 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1607                 return err;
1608             }
1609         }
1610         memcpy(outData, mData+mDataPos, len);
1611         mDataPos += pad_size(len);
1612         ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1613         return NO_ERROR;
1614     }
1615     return NOT_ENOUGH_DATA;
1616 }
1617 
readInplace(size_t len) const1618 const void* Parcel::readInplace(size_t len) const
1619 {
1620     if (len > INT32_MAX) {
1621         // don't accept size_t values which may have come from an
1622         // inadvertent conversion from a negative int.
1623         return nullptr;
1624     }
1625 
1626     if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1627             && len <= pad_size(len)) {
1628         if (mObjectsSize > 0) {
1629             status_t err = validateReadData(mDataPos + pad_size(len));
1630             if(err != NO_ERROR) {
1631                 // Still increment the data position by the expected length
1632                 mDataPos += pad_size(len);
1633                 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1634                 return nullptr;
1635             }
1636         }
1637 
1638         const void* data = mData+mDataPos;
1639         mDataPos += pad_size(len);
1640         ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1641         return data;
1642     }
1643     return nullptr;
1644 }
1645 
1646 template<class T>
readAligned(T * pArg) const1647 status_t Parcel::readAligned(T *pArg) const {
1648     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1649 
1650     if ((mDataPos+sizeof(T)) <= mDataSize) {
1651         if (mObjectsSize > 0) {
1652             status_t err = validateReadData(mDataPos + sizeof(T));
1653             if(err != NO_ERROR) {
1654                 // Still increment the data position by the expected length
1655                 mDataPos += sizeof(T);
1656                 return err;
1657             }
1658         }
1659 
1660         const void* data = mData+mDataPos;
1661         mDataPos += sizeof(T);
1662         *pArg =  *reinterpret_cast<const T*>(data);
1663         return NO_ERROR;
1664     } else {
1665         return NOT_ENOUGH_DATA;
1666     }
1667 }
1668 
1669 template<class T>
readAligned() const1670 T Parcel::readAligned() const {
1671     T result;
1672     if (readAligned(&result) != NO_ERROR) {
1673         result = 0;
1674     }
1675 
1676     return result;
1677 }
1678 
1679 template<class T>
writeAligned(T val)1680 status_t Parcel::writeAligned(T val) {
1681     COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1682 
1683     if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1684 restart_write:
1685         *reinterpret_cast<T*>(mData+mDataPos) = val;
1686         return finishWrite(sizeof(val));
1687     }
1688 
1689     status_t err = growData(sizeof(val));
1690     if (err == NO_ERROR) goto restart_write;
1691     return err;
1692 }
1693 
1694 namespace {
1695 
1696 template<typename T>
readByteVectorInternal(const Parcel * parcel,std::vector<T> * val)1697 status_t readByteVectorInternal(const Parcel* parcel,
1698                                 std::vector<T>* val) {
1699     val->clear();
1700 
1701     int32_t size;
1702     status_t status = parcel->readInt32(&size);
1703 
1704     if (status != OK) {
1705         return status;
1706     }
1707 
1708     if (size < 0) {
1709         status = UNEXPECTED_NULL;
1710         return status;
1711     }
1712     if (size_t(size) > parcel->dataAvail()) {
1713         status = BAD_VALUE;
1714         return status;
1715     }
1716 
1717     T* data = const_cast<T*>(reinterpret_cast<const T*>(parcel->readInplace(size)));
1718     if (!data) {
1719         status = BAD_VALUE;
1720         return status;
1721     }
1722     val->reserve(size);
1723     val->insert(val->end(), data, data + size);
1724 
1725     return status;
1726 }
1727 
1728 template<typename T>
readByteVectorInternalPtr(const Parcel * parcel,std::unique_ptr<std::vector<T>> * val)1729 status_t readByteVectorInternalPtr(
1730         const Parcel* parcel,
1731         std::unique_ptr<std::vector<T>>* val) {
1732     const int32_t start = parcel->dataPosition();
1733     int32_t size;
1734     status_t status = parcel->readInt32(&size);
1735     val->reset();
1736 
1737     if (status != OK || size < 0) {
1738         return status;
1739     }
1740 
1741     parcel->setDataPosition(start);
1742     val->reset(new (std::nothrow) std::vector<T>());
1743 
1744     status = readByteVectorInternal(parcel, val->get());
1745 
1746     if (status != OK) {
1747         val->reset();
1748     }
1749 
1750     return status;
1751 }
1752 
1753 }  // namespace
1754 
readByteVector(std::vector<int8_t> * val) const1755 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1756     return readByteVectorInternal(this, val);
1757 }
1758 
readByteVector(std::vector<uint8_t> * val) const1759 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1760     return readByteVectorInternal(this, val);
1761 }
1762 
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1763 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1764     return readByteVectorInternalPtr(this, val);
1765 }
1766 
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1767 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1768     return readByteVectorInternalPtr(this, val);
1769 }
1770 
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1771 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1772     return readNullableTypedVector(val, &Parcel::readInt32);
1773 }
1774 
readInt32Vector(std::vector<int32_t> * val) const1775 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1776     return readTypedVector(val, &Parcel::readInt32);
1777 }
1778 
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1779 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1780     return readNullableTypedVector(val, &Parcel::readInt64);
1781 }
1782 
readInt64Vector(std::vector<int64_t> * val) const1783 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1784     return readTypedVector(val, &Parcel::readInt64);
1785 }
1786 
readUint64Vector(std::unique_ptr<std::vector<uint64_t>> * val) const1787 status_t Parcel::readUint64Vector(std::unique_ptr<std::vector<uint64_t>>* val) const {
1788     return readNullableTypedVector(val, &Parcel::readUint64);
1789 }
1790 
readUint64Vector(std::vector<uint64_t> * val) const1791 status_t Parcel::readUint64Vector(std::vector<uint64_t>* val) const {
1792     return readTypedVector(val, &Parcel::readUint64);
1793 }
1794 
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1795 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1796     return readNullableTypedVector(val, &Parcel::readFloat);
1797 }
1798 
readFloatVector(std::vector<float> * val) const1799 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1800     return readTypedVector(val, &Parcel::readFloat);
1801 }
1802 
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1803 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1804     return readNullableTypedVector(val, &Parcel::readDouble);
1805 }
1806 
readDoubleVector(std::vector<double> * val) const1807 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1808     return readTypedVector(val, &Parcel::readDouble);
1809 }
1810 
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1811 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1812     const int32_t start = dataPosition();
1813     int32_t size;
1814     status_t status = readInt32(&size);
1815     val->reset();
1816 
1817     if (status != OK || size < 0) {
1818         return status;
1819     }
1820 
1821     setDataPosition(start);
1822     val->reset(new (std::nothrow) std::vector<bool>());
1823 
1824     status = readBoolVector(val->get());
1825 
1826     if (status != OK) {
1827         val->reset();
1828     }
1829 
1830     return status;
1831 }
1832 
readBoolVector(std::vector<bool> * val) const1833 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1834     int32_t size;
1835     status_t status = readInt32(&size);
1836 
1837     if (status != OK) {
1838         return status;
1839     }
1840 
1841     if (size < 0) {
1842         return UNEXPECTED_NULL;
1843     }
1844 
1845     val->resize(size);
1846 
1847     /* C++ bool handling means a vector of bools isn't necessarily addressable
1848      * (we might use individual bits)
1849      */
1850     bool data;
1851     for (int32_t i = 0; i < size; ++i) {
1852         status = readBool(&data);
1853         (*val)[i] = data;
1854 
1855         if (status != OK) {
1856             return status;
1857         }
1858     }
1859 
1860     return OK;
1861 }
1862 
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1863 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1864     return readNullableTypedVector(val, &Parcel::readChar);
1865 }
1866 
readCharVector(std::vector<char16_t> * val) const1867 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1868     return readTypedVector(val, &Parcel::readChar);
1869 }
1870 
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1871 status_t Parcel::readString16Vector(
1872         std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1873     return readNullableTypedVector(val, &Parcel::readString16);
1874 }
1875 
readString16Vector(std::vector<String16> * val) const1876 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1877     return readTypedVector(val, &Parcel::readString16);
1878 }
1879 
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1880 status_t Parcel::readUtf8VectorFromUtf16Vector(
1881         std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1882     return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1883 }
1884 
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1885 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1886     return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1887 }
1888 
readInt32(int32_t * pArg) const1889 status_t Parcel::readInt32(int32_t *pArg) const
1890 {
1891     return readAligned(pArg);
1892 }
1893 
readInt32() const1894 int32_t Parcel::readInt32() const
1895 {
1896     return readAligned<int32_t>();
1897 }
1898 
readUint32(uint32_t * pArg) const1899 status_t Parcel::readUint32(uint32_t *pArg) const
1900 {
1901     return readAligned(pArg);
1902 }
1903 
readUint32() const1904 uint32_t Parcel::readUint32() const
1905 {
1906     return readAligned<uint32_t>();
1907 }
1908 
readInt64(int64_t * pArg) const1909 status_t Parcel::readInt64(int64_t *pArg) const
1910 {
1911     return readAligned(pArg);
1912 }
1913 
1914 
readInt64() const1915 int64_t Parcel::readInt64() const
1916 {
1917     return readAligned<int64_t>();
1918 }
1919 
readUint64(uint64_t * pArg) const1920 status_t Parcel::readUint64(uint64_t *pArg) const
1921 {
1922     return readAligned(pArg);
1923 }
1924 
readUint64() const1925 uint64_t Parcel::readUint64() const
1926 {
1927     return readAligned<uint64_t>();
1928 }
1929 
readPointer(uintptr_t * pArg) const1930 status_t Parcel::readPointer(uintptr_t *pArg) const
1931 {
1932     status_t ret;
1933     binder_uintptr_t ptr;
1934     ret = readAligned(&ptr);
1935     if (!ret)
1936         *pArg = ptr;
1937     return ret;
1938 }
1939 
readPointer() const1940 uintptr_t Parcel::readPointer() const
1941 {
1942     return readAligned<binder_uintptr_t>();
1943 }
1944 
1945 
readFloat(float * pArg) const1946 status_t Parcel::readFloat(float *pArg) const
1947 {
1948     return readAligned(pArg);
1949 }
1950 
1951 
readFloat() const1952 float Parcel::readFloat() const
1953 {
1954     return readAligned<float>();
1955 }
1956 
1957 #if defined(__mips__) && defined(__mips_hard_float)
1958 
readDouble(double * pArg) const1959 status_t Parcel::readDouble(double *pArg) const
1960 {
1961     union {
1962       double d;
1963       unsigned long long ll;
1964     } u;
1965     u.d = 0;
1966     status_t status;
1967     status = readAligned(&u.ll);
1968     *pArg = u.d;
1969     return status;
1970 }
1971 
readDouble() const1972 double Parcel::readDouble() const
1973 {
1974     union {
1975       double d;
1976       unsigned long long ll;
1977     } u;
1978     u.ll = readAligned<unsigned long long>();
1979     return u.d;
1980 }
1981 
1982 #else
1983 
readDouble(double * pArg) const1984 status_t Parcel::readDouble(double *pArg) const
1985 {
1986     return readAligned(pArg);
1987 }
1988 
readDouble() const1989 double Parcel::readDouble() const
1990 {
1991     return readAligned<double>();
1992 }
1993 
1994 #endif
1995 
readIntPtr(intptr_t * pArg) const1996 status_t Parcel::readIntPtr(intptr_t *pArg) const
1997 {
1998     return readAligned(pArg);
1999 }
2000 
2001 
readIntPtr() const2002 intptr_t Parcel::readIntPtr() const
2003 {
2004     return readAligned<intptr_t>();
2005 }
2006 
readBool(bool * pArg) const2007 status_t Parcel::readBool(bool *pArg) const
2008 {
2009     int32_t tmp = 0;
2010     status_t ret = readInt32(&tmp);
2011     *pArg = (tmp != 0);
2012     return ret;
2013 }
2014 
readBool() const2015 bool Parcel::readBool() const
2016 {
2017     return readInt32() != 0;
2018 }
2019 
readChar(char16_t * pArg) const2020 status_t Parcel::readChar(char16_t *pArg) const
2021 {
2022     int32_t tmp = 0;
2023     status_t ret = readInt32(&tmp);
2024     *pArg = char16_t(tmp);
2025     return ret;
2026 }
2027 
readChar() const2028 char16_t Parcel::readChar() const
2029 {
2030     return char16_t(readInt32());
2031 }
2032 
readByte(int8_t * pArg) const2033 status_t Parcel::readByte(int8_t *pArg) const
2034 {
2035     int32_t tmp = 0;
2036     status_t ret = readInt32(&tmp);
2037     *pArg = int8_t(tmp);
2038     return ret;
2039 }
2040 
readByte() const2041 int8_t Parcel::readByte() const
2042 {
2043     return int8_t(readInt32());
2044 }
2045 
readUtf8FromUtf16(std::string * str) const2046 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
2047     size_t utf16Size = 0;
2048     const char16_t* src = readString16Inplace(&utf16Size);
2049     if (!src) {
2050         return UNEXPECTED_NULL;
2051     }
2052 
2053     // Save ourselves the trouble, we're done.
2054     if (utf16Size == 0u) {
2055         str->clear();
2056        return NO_ERROR;
2057     }
2058 
2059     // Allow for closing '\0'
2060     ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
2061     if (utf8Size < 1) {
2062         return BAD_VALUE;
2063     }
2064     // Note that while it is probably safe to assume string::resize keeps a
2065     // spare byte around for the trailing null, we still pass the size including the trailing null
2066     str->resize(utf8Size);
2067     utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
2068     str->resize(utf8Size - 1);
2069     return NO_ERROR;
2070 }
2071 
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const2072 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
2073     const int32_t start = dataPosition();
2074     int32_t size;
2075     status_t status = readInt32(&size);
2076     str->reset();
2077 
2078     if (status != OK || size < 0) {
2079         return status;
2080     }
2081 
2082     setDataPosition(start);
2083     str->reset(new (std::nothrow) std::string());
2084     return readUtf8FromUtf16(str->get());
2085 }
2086 
readCString() const2087 const char* Parcel::readCString() const
2088 {
2089     if (mDataPos < mDataSize) {
2090         const size_t avail = mDataSize-mDataPos;
2091         const char* str = reinterpret_cast<const char*>(mData+mDataPos);
2092         // is the string's trailing NUL within the parcel's valid bounds?
2093         const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
2094         if (eos) {
2095             const size_t len = eos - str;
2096             mDataPos += pad_size(len+1);
2097             ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
2098             return str;
2099         }
2100     }
2101     return nullptr;
2102 }
2103 
readString8() const2104 String8 Parcel::readString8() const
2105 {
2106     String8 retString;
2107     status_t status = readString8(&retString);
2108     if (status != OK) {
2109         // We don't care about errors here, so just return an empty string.
2110         return String8();
2111     }
2112     return retString;
2113 }
2114 
readString8(String8 * pArg) const2115 status_t Parcel::readString8(String8* pArg) const
2116 {
2117     int32_t size;
2118     status_t status = readInt32(&size);
2119     if (status != OK) {
2120         return status;
2121     }
2122     // watch for potential int overflow from size+1
2123     if (size < 0 || size >= INT32_MAX) {
2124         return BAD_VALUE;
2125     }
2126     // |writeString8| writes nothing for empty string.
2127     if (size == 0) {
2128         *pArg = String8();
2129         return OK;
2130     }
2131     const char* str = (const char*)readInplace(size + 1);
2132     if (str == nullptr) {
2133         return BAD_VALUE;
2134     }
2135     pArg->setTo(str, size);
2136     return OK;
2137 }
2138 
readString16() const2139 String16 Parcel::readString16() const
2140 {
2141     size_t len;
2142     const char16_t* str = readString16Inplace(&len);
2143     if (str) return String16(str, len);
2144     ALOGE("Reading a NULL string not supported here.");
2145     return String16();
2146 }
2147 
readString16(std::unique_ptr<String16> * pArg) const2148 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
2149 {
2150     const int32_t start = dataPosition();
2151     int32_t size;
2152     status_t status = readInt32(&size);
2153     pArg->reset();
2154 
2155     if (status != OK || size < 0) {
2156         return status;
2157     }
2158 
2159     setDataPosition(start);
2160     pArg->reset(new (std::nothrow) String16());
2161 
2162     status = readString16(pArg->get());
2163 
2164     if (status != OK) {
2165         pArg->reset();
2166     }
2167 
2168     return status;
2169 }
2170 
readString16(String16 * pArg) const2171 status_t Parcel::readString16(String16* pArg) const
2172 {
2173     size_t len;
2174     const char16_t* str = readString16Inplace(&len);
2175     if (str) {
2176         pArg->setTo(str, len);
2177         return 0;
2178     } else {
2179         *pArg = String16();
2180         return UNEXPECTED_NULL;
2181     }
2182 }
2183 
readString16Inplace(size_t * outLen) const2184 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
2185 {
2186     int32_t size = readInt32();
2187     // watch for potential int overflow from size+1
2188     if (size >= 0 && size < INT32_MAX) {
2189         *outLen = size;
2190         const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
2191         if (str != nullptr) {
2192             return str;
2193         }
2194     }
2195     *outLen = 0;
2196     return nullptr;
2197 }
2198 
readStrongBinder(sp<IBinder> * val) const2199 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
2200 {
2201     status_t status = readNullableStrongBinder(val);
2202     if (status == OK && !val->get()) {
2203         status = UNEXPECTED_NULL;
2204     }
2205     return status;
2206 }
2207 
readNullableStrongBinder(sp<IBinder> * val) const2208 status_t Parcel::readNullableStrongBinder(sp<IBinder>* val) const
2209 {
2210     return unflatten_binder(ProcessState::self(), *this, val);
2211 }
2212 
readStrongBinder() const2213 sp<IBinder> Parcel::readStrongBinder() const
2214 {
2215     sp<IBinder> val;
2216     // Note that a lot of code in Android reads binders by hand with this
2217     // method, and that code has historically been ok with getting nullptr
2218     // back (while ignoring error codes).
2219     readNullableStrongBinder(&val);
2220     return val;
2221 }
2222 
readWeakBinder() const2223 wp<IBinder> Parcel::readWeakBinder() const
2224 {
2225     wp<IBinder> val;
2226     unflatten_binder(ProcessState::self(), *this, &val);
2227     return val;
2228 }
2229 
readParcelable(Parcelable * parcelable) const2230 status_t Parcel::readParcelable(Parcelable* parcelable) const {
2231     int32_t have_parcelable = 0;
2232     status_t status = readInt32(&have_parcelable);
2233     if (status != OK) {
2234         return status;
2235     }
2236     if (!have_parcelable) {
2237         return UNEXPECTED_NULL;
2238     }
2239     return parcelable->readFromParcel(this);
2240 }
2241 
readValue(binder::Value * value) const2242 status_t Parcel::readValue(binder::Value* value) const {
2243     return value->readFromParcel(this);
2244 }
2245 
readExceptionCode() const2246 int32_t Parcel::readExceptionCode() const
2247 {
2248     binder::Status status;
2249     status.readFromParcel(*this);
2250     return status.exceptionCode();
2251 }
2252 
readNativeHandle() const2253 native_handle* Parcel::readNativeHandle() const
2254 {
2255     int numFds, numInts;
2256     status_t err;
2257     err = readInt32(&numFds);
2258     if (err != NO_ERROR) return nullptr;
2259     err = readInt32(&numInts);
2260     if (err != NO_ERROR) return nullptr;
2261 
2262     native_handle* h = native_handle_create(numFds, numInts);
2263     if (!h) {
2264         return nullptr;
2265     }
2266 
2267     for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
2268         h->data[i] = fcntl(readFileDescriptor(), F_DUPFD_CLOEXEC, 0);
2269         if (h->data[i] < 0) {
2270             for (int j = 0; j < i; j++) {
2271                 close(h->data[j]);
2272             }
2273             native_handle_delete(h);
2274             return nullptr;
2275         }
2276     }
2277     err = read(h->data + numFds, sizeof(int)*numInts);
2278     if (err != NO_ERROR) {
2279         native_handle_close(h);
2280         native_handle_delete(h);
2281         h = nullptr;
2282     }
2283     return h;
2284 }
2285 
readFileDescriptor() const2286 int Parcel::readFileDescriptor() const
2287 {
2288     const flat_binder_object* flat = readObject(true);
2289 
2290     if (flat && flat->hdr.type == BINDER_TYPE_FD) {
2291         return flat->handle;
2292     }
2293 
2294     return BAD_TYPE;
2295 }
2296 
readParcelFileDescriptor() const2297 int Parcel::readParcelFileDescriptor() const
2298 {
2299     int32_t hasComm = readInt32();
2300     int fd = readFileDescriptor();
2301     if (hasComm != 0) {
2302         // detach (owned by the binder driver)
2303         int comm = readFileDescriptor();
2304 
2305         // warning: this must be kept in sync with:
2306         // frameworks/base/core/java/android/os/ParcelFileDescriptor.java
2307         enum ParcelFileDescriptorStatus {
2308             DETACHED = 2,
2309         };
2310 
2311 #if BYTE_ORDER == BIG_ENDIAN
2312         const int32_t message = ParcelFileDescriptorStatus::DETACHED;
2313 #endif
2314 #if BYTE_ORDER == LITTLE_ENDIAN
2315         const int32_t message = __builtin_bswap32(ParcelFileDescriptorStatus::DETACHED);
2316 #endif
2317 
2318         ssize_t written = TEMP_FAILURE_RETRY(
2319             ::write(comm, &message, sizeof(message)));
2320 
2321         if (written == -1 || written != sizeof(message)) {
2322             ALOGW("Failed to detach ParcelFileDescriptor written: %zd err: %s",
2323                 written, strerror(errno));
2324             return BAD_TYPE;
2325         }
2326     }
2327     return fd;
2328 }
2329 
readUniqueFileDescriptor(base::unique_fd * val) const2330 status_t Parcel::readUniqueFileDescriptor(base::unique_fd* val) const
2331 {
2332     int got = readFileDescriptor();
2333 
2334     if (got == BAD_TYPE) {
2335         return BAD_TYPE;
2336     }
2337 
2338     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2339 
2340     if (val->get() < 0) {
2341         return BAD_VALUE;
2342     }
2343 
2344     return OK;
2345 }
2346 
readUniqueParcelFileDescriptor(base::unique_fd * val) const2347 status_t Parcel::readUniqueParcelFileDescriptor(base::unique_fd* val) const
2348 {
2349     int got = readParcelFileDescriptor();
2350 
2351     if (got == BAD_TYPE) {
2352         return BAD_TYPE;
2353     }
2354 
2355     val->reset(fcntl(got, F_DUPFD_CLOEXEC, 0));
2356 
2357     if (val->get() < 0) {
2358         return BAD_VALUE;
2359     }
2360 
2361     return OK;
2362 }
2363 
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>> * val) const2364 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<base::unique_fd>>* val) const {
2365     return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2366 }
2367 
readUniqueFileDescriptorVector(std::vector<base::unique_fd> * val) const2368 status_t Parcel::readUniqueFileDescriptorVector(std::vector<base::unique_fd>* val) const {
2369     return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2370 }
2371 
readBlob(size_t len,ReadableBlob * outBlob) const2372 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2373 {
2374     int32_t blobType;
2375     status_t status = readInt32(&blobType);
2376     if (status) return status;
2377 
2378     if (blobType == BLOB_INPLACE) {
2379         ALOGV("readBlob: read in place");
2380         const void* ptr = readInplace(len);
2381         if (!ptr) return BAD_VALUE;
2382 
2383         outBlob->init(-1, const_cast<void*>(ptr), len, false);
2384         return NO_ERROR;
2385     }
2386 
2387     ALOGV("readBlob: read from ashmem");
2388     bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2389     int fd = readFileDescriptor();
2390     if (fd == int(BAD_TYPE)) return BAD_VALUE;
2391 
2392     if (!ashmem_valid(fd)) {
2393         ALOGE("invalid fd");
2394         return BAD_VALUE;
2395     }
2396     int size = ashmem_get_size_region(fd);
2397     if (size < 0 || size_t(size) < len) {
2398         ALOGE("request size %zu does not match fd size %d", len, size);
2399         return BAD_VALUE;
2400     }
2401     void* ptr = ::mmap(nullptr, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2402             MAP_SHARED, fd, 0);
2403     if (ptr == MAP_FAILED) return NO_MEMORY;
2404 
2405     outBlob->init(fd, ptr, len, isMutable);
2406     return NO_ERROR;
2407 }
2408 
read(FlattenableHelperInterface & val) const2409 status_t Parcel::read(FlattenableHelperInterface& val) const
2410 {
2411     // size
2412     const size_t len = this->readInt32();
2413     const size_t fd_count = this->readInt32();
2414 
2415     if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2416         // don't accept size_t values which may have come from an
2417         // inadvertent conversion from a negative int.
2418         return BAD_VALUE;
2419     }
2420 
2421     // payload
2422     void const* const buf = this->readInplace(pad_size(len));
2423     if (buf == nullptr)
2424         return BAD_VALUE;
2425 
2426     int* fds = nullptr;
2427     if (fd_count) {
2428         fds = new (std::nothrow) int[fd_count];
2429         if (fds == nullptr) {
2430             ALOGE("read: failed to allocate requested %zu fds", fd_count);
2431             return BAD_VALUE;
2432         }
2433     }
2434 
2435     status_t err = NO_ERROR;
2436     for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2437         int fd = this->readFileDescriptor();
2438         if (fd < 0 || ((fds[i] = fcntl(fd, F_DUPFD_CLOEXEC, 0)) < 0)) {
2439             err = BAD_VALUE;
2440             ALOGE("fcntl(F_DUPFD_CLOEXEC) failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2441                   i, fds[i], fd_count, strerror(fd < 0 ? -fd : errno));
2442             // Close all the file descriptors that were dup-ed.
2443             for (size_t j=0; j<i ;j++) {
2444                 close(fds[j]);
2445             }
2446         }
2447     }
2448 
2449     if (err == NO_ERROR) {
2450         err = val.unflatten(buf, len, fds, fd_count);
2451     }
2452 
2453     if (fd_count) {
2454         delete [] fds;
2455     }
2456 
2457     return err;
2458 }
readObject(bool nullMetaData) const2459 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2460 {
2461     const size_t DPOS = mDataPos;
2462     if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2463         const flat_binder_object* obj
2464                 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2465         mDataPos = DPOS + sizeof(flat_binder_object);
2466         if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2467             // When transferring a NULL object, we don't write it into
2468             // the object list, so we don't want to check for it when
2469             // reading.
2470             ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2471             return obj;
2472         }
2473 
2474         // Ensure that this object is valid...
2475         binder_size_t* const OBJS = mObjects;
2476         const size_t N = mObjectsSize;
2477         size_t opos = mNextObjectHint;
2478 
2479         if (N > 0) {
2480             ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2481                  this, DPOS, opos);
2482 
2483             // Start at the current hint position, looking for an object at
2484             // the current data position.
2485             if (opos < N) {
2486                 while (opos < (N-1) && OBJS[opos] < DPOS) {
2487                     opos++;
2488                 }
2489             } else {
2490                 opos = N-1;
2491             }
2492             if (OBJS[opos] == DPOS) {
2493                 // Found it!
2494                 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2495                      this, DPOS, opos);
2496                 mNextObjectHint = opos+1;
2497                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2498                 return obj;
2499             }
2500 
2501             // Look backwards for it...
2502             while (opos > 0 && OBJS[opos] > DPOS) {
2503                 opos--;
2504             }
2505             if (OBJS[opos] == DPOS) {
2506                 // Found it!
2507                 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2508                      this, DPOS, opos);
2509                 mNextObjectHint = opos+1;
2510                 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2511                 return obj;
2512             }
2513         }
2514         ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2515              this, DPOS);
2516     }
2517     return nullptr;
2518 }
2519 
closeFileDescriptors()2520 void Parcel::closeFileDescriptors()
2521 {
2522     size_t i = mObjectsSize;
2523     if (i > 0) {
2524         //ALOGI("Closing file descriptors for %zu objects...", i);
2525     }
2526     while (i > 0) {
2527         i--;
2528         const flat_binder_object* flat
2529             = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2530         if (flat->hdr.type == BINDER_TYPE_FD) {
2531             //ALOGI("Closing fd: %ld", flat->handle);
2532             close(flat->handle);
2533         }
2534     }
2535 }
2536 
ipcData() const2537 uintptr_t Parcel::ipcData() const
2538 {
2539     return reinterpret_cast<uintptr_t>(mData);
2540 }
2541 
ipcDataSize() const2542 size_t Parcel::ipcDataSize() const
2543 {
2544     return (mDataSize > mDataPos ? mDataSize : mDataPos);
2545 }
2546 
ipcObjects() const2547 uintptr_t Parcel::ipcObjects() const
2548 {
2549     return reinterpret_cast<uintptr_t>(mObjects);
2550 }
2551 
ipcObjectsCount() const2552 size_t Parcel::ipcObjectsCount() const
2553 {
2554     return mObjectsSize;
2555 }
2556 
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2557 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2558     const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2559 {
2560     binder_size_t minOffset = 0;
2561     freeDataNoInit();
2562     mError = NO_ERROR;
2563     mData = const_cast<uint8_t*>(data);
2564     mDataSize = mDataCapacity = dataSize;
2565     //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2566     mDataPos = 0;
2567     ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2568     mObjects = const_cast<binder_size_t*>(objects);
2569     mObjectsSize = mObjectsCapacity = objectsCount;
2570     mNextObjectHint = 0;
2571     mObjectsSorted = false;
2572     mOwner = relFunc;
2573     mOwnerCookie = relCookie;
2574     for (size_t i = 0; i < mObjectsSize; i++) {
2575         binder_size_t offset = mObjects[i];
2576         if (offset < minOffset) {
2577             ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2578                   __func__, (uint64_t)offset, (uint64_t)minOffset);
2579             mObjectsSize = 0;
2580             break;
2581         }
2582         minOffset = offset + sizeof(flat_binder_object);
2583     }
2584     scanForFds();
2585 }
2586 
print(TextOutput & to,uint32_t) const2587 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2588 {
2589     to << "Parcel(";
2590 
2591     if (errorCheck() != NO_ERROR) {
2592         const status_t err = errorCheck();
2593         to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2594     } else if (dataSize() > 0) {
2595         const uint8_t* DATA = data();
2596         to << indent << HexDump(DATA, dataSize()) << dedent;
2597         const binder_size_t* OBJS = objects();
2598         const size_t N = objectsCount();
2599         for (size_t i=0; i<N; i++) {
2600             const flat_binder_object* flat
2601                 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2602             to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2603                 << TypeCode(flat->hdr.type & 0x7f7f7f00)
2604                 << " = " << flat->binder;
2605         }
2606     } else {
2607         to << "NULL";
2608     }
2609 
2610     to << ")";
2611 }
2612 
releaseObjects()2613 void Parcel::releaseObjects()
2614 {
2615     size_t i = mObjectsSize;
2616     if (i == 0) {
2617         return;
2618     }
2619     sp<ProcessState> proc(ProcessState::self());
2620     uint8_t* const data = mData;
2621     binder_size_t* const objects = mObjects;
2622     while (i > 0) {
2623         i--;
2624         const flat_binder_object* flat
2625             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2626         release_object(proc, *flat, this, &mOpenAshmemSize);
2627     }
2628 }
2629 
acquireObjects()2630 void Parcel::acquireObjects()
2631 {
2632     size_t i = mObjectsSize;
2633     if (i == 0) {
2634         return;
2635     }
2636     const sp<ProcessState> proc(ProcessState::self());
2637     uint8_t* const data = mData;
2638     binder_size_t* const objects = mObjects;
2639     while (i > 0) {
2640         i--;
2641         const flat_binder_object* flat
2642             = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2643         acquire_object(proc, *flat, this, &mOpenAshmemSize);
2644     }
2645 }
2646 
freeData()2647 void Parcel::freeData()
2648 {
2649     freeDataNoInit();
2650     initState();
2651 }
2652 
freeDataNoInit()2653 void Parcel::freeDataNoInit()
2654 {
2655     if (mOwner) {
2656         LOG_ALLOC("Parcel %p: freeing other owner data", this);
2657         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2658         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2659     } else {
2660         LOG_ALLOC("Parcel %p: freeing allocated data", this);
2661         releaseObjects();
2662         if (mData) {
2663             LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2664             pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2665             if (mDataCapacity <= gParcelGlobalAllocSize) {
2666               gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2667             } else {
2668               gParcelGlobalAllocSize = 0;
2669             }
2670             if (gParcelGlobalAllocCount > 0) {
2671               gParcelGlobalAllocCount--;
2672             }
2673             pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2674             free(mData);
2675         }
2676         if (mObjects) free(mObjects);
2677     }
2678 }
2679 
growData(size_t len)2680 status_t Parcel::growData(size_t len)
2681 {
2682     if (len > INT32_MAX) {
2683         // don't accept size_t values which may have come from an
2684         // inadvertent conversion from a negative int.
2685         return BAD_VALUE;
2686     }
2687 
2688     size_t newSize = ((mDataSize+len)*3)/2;
2689     return (newSize <= mDataSize)
2690             ? (status_t) NO_MEMORY
2691             : continueWrite(newSize);
2692 }
2693 
restartWrite(size_t desired)2694 status_t Parcel::restartWrite(size_t desired)
2695 {
2696     if (desired > INT32_MAX) {
2697         // don't accept size_t values which may have come from an
2698         // inadvertent conversion from a negative int.
2699         return BAD_VALUE;
2700     }
2701 
2702     if (mOwner) {
2703         freeData();
2704         return continueWrite(desired);
2705     }
2706 
2707     uint8_t* data = (uint8_t*)realloc(mData, desired);
2708     if (!data && desired > mDataCapacity) {
2709         mError = NO_MEMORY;
2710         return NO_MEMORY;
2711     }
2712 
2713     releaseObjects();
2714 
2715     if (data) {
2716         LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2717         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2718         gParcelGlobalAllocSize += desired;
2719         gParcelGlobalAllocSize -= mDataCapacity;
2720         if (!mData) {
2721             gParcelGlobalAllocCount++;
2722         }
2723         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2724         mData = data;
2725         mDataCapacity = desired;
2726     }
2727 
2728     mDataSize = mDataPos = 0;
2729     ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2730     ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2731 
2732     free(mObjects);
2733     mObjects = nullptr;
2734     mObjectsSize = mObjectsCapacity = 0;
2735     mNextObjectHint = 0;
2736     mObjectsSorted = false;
2737     mHasFds = false;
2738     mFdsKnown = true;
2739     mAllowFds = true;
2740 
2741     return NO_ERROR;
2742 }
2743 
continueWrite(size_t desired)2744 status_t Parcel::continueWrite(size_t desired)
2745 {
2746     if (desired > INT32_MAX) {
2747         // don't accept size_t values which may have come from an
2748         // inadvertent conversion from a negative int.
2749         return BAD_VALUE;
2750     }
2751 
2752     // If shrinking, first adjust for any objects that appear
2753     // after the new data size.
2754     size_t objectsSize = mObjectsSize;
2755     if (desired < mDataSize) {
2756         if (desired == 0) {
2757             objectsSize = 0;
2758         } else {
2759             while (objectsSize > 0) {
2760                 if (mObjects[objectsSize-1] < desired)
2761                     break;
2762                 objectsSize--;
2763             }
2764         }
2765     }
2766 
2767     if (mOwner) {
2768         // If the size is going to zero, just release the owner's data.
2769         if (desired == 0) {
2770             freeData();
2771             return NO_ERROR;
2772         }
2773 
2774         // If there is a different owner, we need to take
2775         // posession.
2776         uint8_t* data = (uint8_t*)malloc(desired);
2777         if (!data) {
2778             mError = NO_MEMORY;
2779             return NO_MEMORY;
2780         }
2781         binder_size_t* objects = nullptr;
2782 
2783         if (objectsSize) {
2784             objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2785             if (!objects) {
2786                 free(data);
2787 
2788                 mError = NO_MEMORY;
2789                 return NO_MEMORY;
2790             }
2791 
2792             // Little hack to only acquire references on objects
2793             // we will be keeping.
2794             size_t oldObjectsSize = mObjectsSize;
2795             mObjectsSize = objectsSize;
2796             acquireObjects();
2797             mObjectsSize = oldObjectsSize;
2798         }
2799 
2800         if (mData) {
2801             memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2802         }
2803         if (objects && mObjects) {
2804             memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2805         }
2806         //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2807         mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2808         mOwner = nullptr;
2809 
2810         LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2811         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2812         gParcelGlobalAllocSize += desired;
2813         gParcelGlobalAllocCount++;
2814         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2815 
2816         mData = data;
2817         mObjects = objects;
2818         mDataSize = (mDataSize < desired) ? mDataSize : desired;
2819         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2820         mDataCapacity = desired;
2821         mObjectsSize = mObjectsCapacity = objectsSize;
2822         mNextObjectHint = 0;
2823         mObjectsSorted = false;
2824 
2825     } else if (mData) {
2826         if (objectsSize < mObjectsSize) {
2827             // Need to release refs on any objects we are dropping.
2828             const sp<ProcessState> proc(ProcessState::self());
2829             for (size_t i=objectsSize; i<mObjectsSize; i++) {
2830                 const flat_binder_object* flat
2831                     = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2832                 if (flat->hdr.type == BINDER_TYPE_FD) {
2833                     // will need to rescan because we may have lopped off the only FDs
2834                     mFdsKnown = false;
2835                 }
2836                 release_object(proc, *flat, this, &mOpenAshmemSize);
2837             }
2838 
2839             if (objectsSize == 0) {
2840                 free(mObjects);
2841                 mObjects = nullptr;
2842                 mObjectsCapacity = 0;
2843             } else {
2844                 binder_size_t* objects =
2845                     (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2846                 if (objects) {
2847                     mObjects = objects;
2848                     mObjectsCapacity = objectsSize;
2849                 }
2850             }
2851             mObjectsSize = objectsSize;
2852             mNextObjectHint = 0;
2853             mObjectsSorted = false;
2854         }
2855 
2856         // We own the data, so we can just do a realloc().
2857         if (desired > mDataCapacity) {
2858             uint8_t* data = (uint8_t*)realloc(mData, desired);
2859             if (data) {
2860                 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2861                         desired);
2862                 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2863                 gParcelGlobalAllocSize += desired;
2864                 gParcelGlobalAllocSize -= mDataCapacity;
2865                 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2866                 mData = data;
2867                 mDataCapacity = desired;
2868             } else {
2869                 mError = NO_MEMORY;
2870                 return NO_MEMORY;
2871             }
2872         } else {
2873             if (mDataSize > desired) {
2874                 mDataSize = desired;
2875                 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2876             }
2877             if (mDataPos > desired) {
2878                 mDataPos = desired;
2879                 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2880             }
2881         }
2882 
2883     } else {
2884         // This is the first data.  Easy!
2885         uint8_t* data = (uint8_t*)malloc(desired);
2886         if (!data) {
2887             mError = NO_MEMORY;
2888             return NO_MEMORY;
2889         }
2890 
2891         if(!(mDataCapacity == 0 && mObjects == nullptr
2892              && mObjectsCapacity == 0)) {
2893             ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2894         }
2895 
2896         LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2897         pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2898         gParcelGlobalAllocSize += desired;
2899         gParcelGlobalAllocCount++;
2900         pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2901 
2902         mData = data;
2903         mDataSize = mDataPos = 0;
2904         ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2905         ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2906         mDataCapacity = desired;
2907     }
2908 
2909     return NO_ERROR;
2910 }
2911 
initState()2912 void Parcel::initState()
2913 {
2914     LOG_ALLOC("Parcel %p: initState", this);
2915     mError = NO_ERROR;
2916     mData = nullptr;
2917     mDataSize = 0;
2918     mDataCapacity = 0;
2919     mDataPos = 0;
2920     ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2921     ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2922     mObjects = nullptr;
2923     mObjectsSize = 0;
2924     mObjectsCapacity = 0;
2925     mNextObjectHint = 0;
2926     mObjectsSorted = false;
2927     mHasFds = false;
2928     mFdsKnown = true;
2929     mAllowFds = true;
2930     mOwner = nullptr;
2931     mOpenAshmemSize = 0;
2932     mWorkSourceRequestHeaderPosition = 0;
2933     mRequestHeaderPresent = false;
2934 
2935     // racing multiple init leads only to multiple identical write
2936     if (gMaxFds == 0) {
2937         struct rlimit result;
2938         if (!getrlimit(RLIMIT_NOFILE, &result)) {
2939             gMaxFds = (size_t)result.rlim_cur;
2940             //ALOGI("parcel fd limit set to %zu", gMaxFds);
2941         } else {
2942             ALOGW("Unable to getrlimit: %s", strerror(errno));
2943             gMaxFds = 1024;
2944         }
2945     }
2946 }
2947 
scanForFds() const2948 void Parcel::scanForFds() const
2949 {
2950     bool hasFds = false;
2951     for (size_t i=0; i<mObjectsSize; i++) {
2952         const flat_binder_object* flat
2953             = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2954         if (flat->hdr.type == BINDER_TYPE_FD) {
2955             hasFds = true;
2956             break;
2957         }
2958     }
2959     mHasFds = hasFds;
2960     mFdsKnown = true;
2961 }
2962 
getBlobAshmemSize() const2963 size_t Parcel::getBlobAshmemSize() const
2964 {
2965     // This used to return the size of all blobs that were written to ashmem, now we're returning
2966     // the ashmem currently referenced by this Parcel, which should be equivalent.
2967     // TODO: Remove method once ABI can be changed.
2968     return mOpenAshmemSize;
2969 }
2970 
getOpenAshmemSize() const2971 size_t Parcel::getOpenAshmemSize() const
2972 {
2973     return mOpenAshmemSize;
2974 }
2975 
2976 // --- Parcel::Blob ---
2977 
Blob()2978 Parcel::Blob::Blob() :
2979         mFd(-1), mData(nullptr), mSize(0), mMutable(false) {
2980 }
2981 
~Blob()2982 Parcel::Blob::~Blob() {
2983     release();
2984 }
2985 
release()2986 void Parcel::Blob::release() {
2987     if (mFd != -1 && mData) {
2988         ::munmap(mData, mSize);
2989     }
2990     clear();
2991 }
2992 
init(int fd,void * data,size_t size,bool isMutable)2993 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2994     mFd = fd;
2995     mData = data;
2996     mSize = size;
2997     mMutable = isMutable;
2998 }
2999 
clear()3000 void Parcel::Blob::clear() {
3001     mFd = -1;
3002     mData = nullptr;
3003     mSize = 0;
3004     mMutable = false;
3005 }
3006 
3007 }; // namespace android
3008