1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <errno.h>
21 #include <fcntl.h>
22 #include <inttypes.h>
23 #include <pthread.h>
24 #include <stdint.h>
25 #include <stdio.h>
26 #include <stdlib.h>
27 #include <sys/mman.h>
28 #include <sys/stat.h>
29 #include <sys/types.h>
30 #include <sys/resource.h>
31 #include <unistd.h>
32
33 #include <binder/Binder.h>
34 #include <binder/BpBinder.h>
35 #include <binder/IPCThreadState.h>
36 #include <binder/Parcel.h>
37 #include <binder/ProcessState.h>
38 #include <binder/Status.h>
39 #include <binder/TextOutput.h>
40
41 #include <cutils/ashmem.h>
42 #include <utils/Debug.h>
43 #include <utils/Flattenable.h>
44 #include <utils/Log.h>
45 #include <utils/misc.h>
46 #include <utils/String8.h>
47 #include <utils/String16.h>
48
49 #include <private/binder/binder_module.h>
50 #include <private/binder/Static.h>
51
52 #ifndef INT32_MAX
53 #define INT32_MAX ((int32_t)(2147483647))
54 #endif
55
56 #define LOG_REFS(...)
57 //#define LOG_REFS(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
58 #define LOG_ALLOC(...)
59 //#define LOG_ALLOC(...) ALOG(LOG_DEBUG, LOG_TAG, __VA_ARGS__)
60
61 // ---------------------------------------------------------------------------
62
63 // This macro should never be used at runtime, as a too large value
64 // of s could cause an integer overflow. Instead, you should always
65 // use the wrapper function pad_size()
66 #define PAD_SIZE_UNSAFE(s) (((s)+3)&~3)
67
pad_size(size_t s)68 static size_t pad_size(size_t s) {
69 if (s > (SIZE_T_MAX - 3)) {
70 abort();
71 }
72 return PAD_SIZE_UNSAFE(s);
73 }
74
75 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
76 #define STRICT_MODE_PENALTY_GATHER (0x40 << 16)
77
78 // XXX This can be made public if we want to provide
79 // support for typed data.
80 struct small_flat_data
81 {
82 uint32_t type;
83 uint32_t data;
84 };
85
86 namespace android {
87
88 static pthread_mutex_t gParcelGlobalAllocSizeLock = PTHREAD_MUTEX_INITIALIZER;
89 static size_t gParcelGlobalAllocSize = 0;
90 static size_t gParcelGlobalAllocCount = 0;
91
92 static size_t gMaxFds = 0;
93
94 // Maximum size of a blob to transfer in-place.
95 static const size_t BLOB_INPLACE_LIMIT = 16 * 1024;
96
97 enum {
98 BLOB_INPLACE = 0,
99 BLOB_ASHMEM_IMMUTABLE = 1,
100 BLOB_ASHMEM_MUTABLE = 2,
101 };
102
ashmem_rdev()103 static dev_t ashmem_rdev()
104 {
105 static dev_t __ashmem_rdev;
106 static pthread_mutex_t __ashmem_rdev_lock = PTHREAD_MUTEX_INITIALIZER;
107
108 pthread_mutex_lock(&__ashmem_rdev_lock);
109
110 dev_t rdev = __ashmem_rdev;
111 if (!rdev) {
112 int fd = TEMP_FAILURE_RETRY(open("/dev/ashmem", O_RDONLY));
113 if (fd >= 0) {
114 struct stat st;
115
116 int ret = TEMP_FAILURE_RETRY(fstat(fd, &st));
117 close(fd);
118 if ((ret >= 0) && S_ISCHR(st.st_mode)) {
119 rdev = __ashmem_rdev = st.st_rdev;
120 }
121 }
122 }
123
124 pthread_mutex_unlock(&__ashmem_rdev_lock);
125
126 return rdev;
127 }
128
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)129 void acquire_object(const sp<ProcessState>& proc,
130 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
131 {
132 switch (obj.type) {
133 case BINDER_TYPE_BINDER:
134 if (obj.binder) {
135 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
136 reinterpret_cast<IBinder*>(obj.cookie)->incStrong(who);
137 }
138 return;
139 case BINDER_TYPE_WEAK_BINDER:
140 if (obj.binder)
141 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
142 return;
143 case BINDER_TYPE_HANDLE: {
144 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
145 if (b != NULL) {
146 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
147 b->incStrong(who);
148 }
149 return;
150 }
151 case BINDER_TYPE_WEAK_HANDLE: {
152 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
153 if (b != NULL) b.get_refs()->incWeak(who);
154 return;
155 }
156 case BINDER_TYPE_FD: {
157 if ((obj.cookie != 0) && (outAshmemSize != NULL)) {
158 struct stat st;
159 int ret = fstat(obj.handle, &st);
160 if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
161 // If we own an ashmem fd, keep track of how much memory it refers to.
162 int size = ashmem_get_size_region(obj.handle);
163 if (size > 0) {
164 *outAshmemSize += size;
165 }
166 }
167 }
168 return;
169 }
170 }
171
172 ALOGD("Invalid object type 0x%08x", obj.type);
173 }
174
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)175 void acquire_object(const sp<ProcessState>& proc,
176 const flat_binder_object& obj, const void* who)
177 {
178 acquire_object(proc, obj, who, NULL);
179 }
180
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who,size_t * outAshmemSize)181 static void release_object(const sp<ProcessState>& proc,
182 const flat_binder_object& obj, const void* who, size_t* outAshmemSize)
183 {
184 switch (obj.type) {
185 case BINDER_TYPE_BINDER:
186 if (obj.binder) {
187 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
188 reinterpret_cast<IBinder*>(obj.cookie)->decStrong(who);
189 }
190 return;
191 case BINDER_TYPE_WEAK_BINDER:
192 if (obj.binder)
193 reinterpret_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
194 return;
195 case BINDER_TYPE_HANDLE: {
196 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
197 if (b != NULL) {
198 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
199 b->decStrong(who);
200 }
201 return;
202 }
203 case BINDER_TYPE_WEAK_HANDLE: {
204 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
205 if (b != NULL) b.get_refs()->decWeak(who);
206 return;
207 }
208 case BINDER_TYPE_FD: {
209 if (obj.cookie != 0) { // owned
210 if (outAshmemSize != NULL) {
211 struct stat st;
212 int ret = fstat(obj.handle, &st);
213 if (!ret && S_ISCHR(st.st_mode) && (st.st_rdev == ashmem_rdev())) {
214 int size = ashmem_get_size_region(obj.handle);
215 if (size > 0) {
216 *outAshmemSize -= size;
217 }
218 }
219 }
220
221 close(obj.handle);
222 }
223 return;
224 }
225 }
226
227 ALOGE("Invalid object type 0x%08x", obj.type);
228 }
229
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)230 void release_object(const sp<ProcessState>& proc,
231 const flat_binder_object& obj, const void* who)
232 {
233 release_object(proc, obj, who, NULL);
234 }
235
finish_flatten_binder(const sp<IBinder> &,const flat_binder_object & flat,Parcel * out)236 inline static status_t finish_flatten_binder(
237 const sp<IBinder>& /*binder*/, const flat_binder_object& flat, Parcel* out)
238 {
239 return out->writeObject(flat, false);
240 }
241
flatten_binder(const sp<ProcessState> &,const sp<IBinder> & binder,Parcel * out)242 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
243 const sp<IBinder>& binder, Parcel* out)
244 {
245 flat_binder_object obj;
246
247 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
248 if (binder != NULL) {
249 IBinder *local = binder->localBinder();
250 if (!local) {
251 BpBinder *proxy = binder->remoteBinder();
252 if (proxy == NULL) {
253 ALOGE("null proxy");
254 }
255 const int32_t handle = proxy ? proxy->handle() : 0;
256 obj.type = BINDER_TYPE_HANDLE;
257 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
258 obj.handle = handle;
259 obj.cookie = 0;
260 } else {
261 obj.type = BINDER_TYPE_BINDER;
262 obj.binder = reinterpret_cast<uintptr_t>(local->getWeakRefs());
263 obj.cookie = reinterpret_cast<uintptr_t>(local);
264 }
265 } else {
266 obj.type = BINDER_TYPE_BINDER;
267 obj.binder = 0;
268 obj.cookie = 0;
269 }
270
271 return finish_flatten_binder(binder, obj, out);
272 }
273
flatten_binder(const sp<ProcessState> &,const wp<IBinder> & binder,Parcel * out)274 status_t flatten_binder(const sp<ProcessState>& /*proc*/,
275 const wp<IBinder>& binder, Parcel* out)
276 {
277 flat_binder_object obj;
278
279 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
280 if (binder != NULL) {
281 sp<IBinder> real = binder.promote();
282 if (real != NULL) {
283 IBinder *local = real->localBinder();
284 if (!local) {
285 BpBinder *proxy = real->remoteBinder();
286 if (proxy == NULL) {
287 ALOGE("null proxy");
288 }
289 const int32_t handle = proxy ? proxy->handle() : 0;
290 obj.type = BINDER_TYPE_WEAK_HANDLE;
291 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
292 obj.handle = handle;
293 obj.cookie = 0;
294 } else {
295 obj.type = BINDER_TYPE_WEAK_BINDER;
296 obj.binder = reinterpret_cast<uintptr_t>(binder.get_refs());
297 obj.cookie = reinterpret_cast<uintptr_t>(binder.unsafe_get());
298 }
299 return finish_flatten_binder(real, obj, out);
300 }
301
302 // XXX How to deal? In order to flatten the given binder,
303 // we need to probe it for information, which requires a primary
304 // reference... but we don't have one.
305 //
306 // The OpenBinder implementation uses a dynamic_cast<> here,
307 // but we can't do that with the different reference counting
308 // implementation we are using.
309 ALOGE("Unable to unflatten Binder weak reference!");
310 obj.type = BINDER_TYPE_BINDER;
311 obj.binder = 0;
312 obj.cookie = 0;
313 return finish_flatten_binder(NULL, obj, out);
314
315 } else {
316 obj.type = BINDER_TYPE_BINDER;
317 obj.binder = 0;
318 obj.cookie = 0;
319 return finish_flatten_binder(NULL, obj, out);
320 }
321 }
322
finish_unflatten_binder(BpBinder *,const flat_binder_object &,const Parcel &)323 inline static status_t finish_unflatten_binder(
324 BpBinder* /*proxy*/, const flat_binder_object& /*flat*/,
325 const Parcel& /*in*/)
326 {
327 return NO_ERROR;
328 }
329
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)330 status_t unflatten_binder(const sp<ProcessState>& proc,
331 const Parcel& in, sp<IBinder>* out)
332 {
333 const flat_binder_object* flat = in.readObject(false);
334
335 if (flat) {
336 switch (flat->type) {
337 case BINDER_TYPE_BINDER:
338 *out = reinterpret_cast<IBinder*>(flat->cookie);
339 return finish_unflatten_binder(NULL, *flat, in);
340 case BINDER_TYPE_HANDLE:
341 *out = proc->getStrongProxyForHandle(flat->handle);
342 return finish_unflatten_binder(
343 static_cast<BpBinder*>(out->get()), *flat, in);
344 }
345 }
346 return BAD_TYPE;
347 }
348
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)349 status_t unflatten_binder(const sp<ProcessState>& proc,
350 const Parcel& in, wp<IBinder>* out)
351 {
352 const flat_binder_object* flat = in.readObject(false);
353
354 if (flat) {
355 switch (flat->type) {
356 case BINDER_TYPE_BINDER:
357 *out = reinterpret_cast<IBinder*>(flat->cookie);
358 return finish_unflatten_binder(NULL, *flat, in);
359 case BINDER_TYPE_WEAK_BINDER:
360 if (flat->binder != 0) {
361 out->set_object_and_refs(
362 reinterpret_cast<IBinder*>(flat->cookie),
363 reinterpret_cast<RefBase::weakref_type*>(flat->binder));
364 } else {
365 *out = NULL;
366 }
367 return finish_unflatten_binder(NULL, *flat, in);
368 case BINDER_TYPE_HANDLE:
369 case BINDER_TYPE_WEAK_HANDLE:
370 *out = proc->getWeakProxyForHandle(flat->handle);
371 return finish_unflatten_binder(
372 static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
373 }
374 }
375 return BAD_TYPE;
376 }
377
378 // ---------------------------------------------------------------------------
379
Parcel()380 Parcel::Parcel()
381 {
382 LOG_ALLOC("Parcel %p: constructing", this);
383 initState();
384 }
385
~Parcel()386 Parcel::~Parcel()
387 {
388 freeDataNoInit();
389 LOG_ALLOC("Parcel %p: destroyed", this);
390 }
391
getGlobalAllocSize()392 size_t Parcel::getGlobalAllocSize() {
393 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
394 size_t size = gParcelGlobalAllocSize;
395 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
396 return size;
397 }
398
getGlobalAllocCount()399 size_t Parcel::getGlobalAllocCount() {
400 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
401 size_t count = gParcelGlobalAllocCount;
402 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
403 return count;
404 }
405
data() const406 const uint8_t* Parcel::data() const
407 {
408 return mData;
409 }
410
dataSize() const411 size_t Parcel::dataSize() const
412 {
413 return (mDataSize > mDataPos ? mDataSize : mDataPos);
414 }
415
dataAvail() const416 size_t Parcel::dataAvail() const
417 {
418 size_t result = dataSize() - dataPosition();
419 if (result > INT32_MAX) {
420 abort();
421 }
422 return result;
423 }
424
dataPosition() const425 size_t Parcel::dataPosition() const
426 {
427 return mDataPos;
428 }
429
dataCapacity() const430 size_t Parcel::dataCapacity() const
431 {
432 return mDataCapacity;
433 }
434
setDataSize(size_t size)435 status_t Parcel::setDataSize(size_t size)
436 {
437 if (size > INT32_MAX) {
438 // don't accept size_t values which may have come from an
439 // inadvertent conversion from a negative int.
440 return BAD_VALUE;
441 }
442
443 status_t err;
444 err = continueWrite(size);
445 if (err == NO_ERROR) {
446 mDataSize = size;
447 ALOGV("setDataSize Setting data size of %p to %zu", this, mDataSize);
448 }
449 return err;
450 }
451
setDataPosition(size_t pos) const452 void Parcel::setDataPosition(size_t pos) const
453 {
454 if (pos > INT32_MAX) {
455 // don't accept size_t values which may have come from an
456 // inadvertent conversion from a negative int.
457 abort();
458 }
459
460 mDataPos = pos;
461 mNextObjectHint = 0;
462 }
463
setDataCapacity(size_t size)464 status_t Parcel::setDataCapacity(size_t size)
465 {
466 if (size > INT32_MAX) {
467 // don't accept size_t values which may have come from an
468 // inadvertent conversion from a negative int.
469 return BAD_VALUE;
470 }
471
472 if (size > mDataCapacity) return continueWrite(size);
473 return NO_ERROR;
474 }
475
setData(const uint8_t * buffer,size_t len)476 status_t Parcel::setData(const uint8_t* buffer, size_t len)
477 {
478 if (len > INT32_MAX) {
479 // don't accept size_t values which may have come from an
480 // inadvertent conversion from a negative int.
481 return BAD_VALUE;
482 }
483
484 status_t err = restartWrite(len);
485 if (err == NO_ERROR) {
486 memcpy(const_cast<uint8_t*>(data()), buffer, len);
487 mDataSize = len;
488 mFdsKnown = false;
489 }
490 return err;
491 }
492
appendFrom(const Parcel * parcel,size_t offset,size_t len)493 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
494 {
495 const sp<ProcessState> proc(ProcessState::self());
496 status_t err;
497 const uint8_t *data = parcel->mData;
498 const binder_size_t *objects = parcel->mObjects;
499 size_t size = parcel->mObjectsSize;
500 int startPos = mDataPos;
501 int firstIndex = -1, lastIndex = -2;
502
503 if (len == 0) {
504 return NO_ERROR;
505 }
506
507 if (len > INT32_MAX) {
508 // don't accept size_t values which may have come from an
509 // inadvertent conversion from a negative int.
510 return BAD_VALUE;
511 }
512
513 // range checks against the source parcel size
514 if ((offset > parcel->mDataSize)
515 || (len > parcel->mDataSize)
516 || (offset + len > parcel->mDataSize)) {
517 return BAD_VALUE;
518 }
519
520 // Count objects in range
521 for (int i = 0; i < (int) size; i++) {
522 size_t off = objects[i];
523 if ((off >= offset) && (off + sizeof(flat_binder_object) <= offset + len)) {
524 if (firstIndex == -1) {
525 firstIndex = i;
526 }
527 lastIndex = i;
528 }
529 }
530 int numObjects = lastIndex - firstIndex + 1;
531
532 if ((mDataSize+len) > mDataCapacity) {
533 // grow data
534 err = growData(len);
535 if (err != NO_ERROR) {
536 return err;
537 }
538 }
539
540 // append data
541 memcpy(mData + mDataPos, data + offset, len);
542 mDataPos += len;
543 mDataSize += len;
544
545 err = NO_ERROR;
546
547 if (numObjects > 0) {
548 // grow objects
549 if (mObjectsCapacity < mObjectsSize + numObjects) {
550 size_t newSize = ((mObjectsSize + numObjects)*3)/2;
551 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
552 binder_size_t *objects =
553 (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
554 if (objects == (binder_size_t*)0) {
555 return NO_MEMORY;
556 }
557 mObjects = objects;
558 mObjectsCapacity = newSize;
559 }
560
561 // append and acquire objects
562 int idx = mObjectsSize;
563 for (int i = firstIndex; i <= lastIndex; i++) {
564 size_t off = objects[i] - offset + startPos;
565 mObjects[idx++] = off;
566 mObjectsSize++;
567
568 flat_binder_object* flat
569 = reinterpret_cast<flat_binder_object*>(mData + off);
570 acquire_object(proc, *flat, this, &mOpenAshmemSize);
571
572 if (flat->type == BINDER_TYPE_FD) {
573 // If this is a file descriptor, we need to dup it so the
574 // new Parcel now owns its own fd, and can declare that we
575 // officially know we have fds.
576 flat->handle = dup(flat->handle);
577 flat->cookie = 1;
578 mHasFds = mFdsKnown = true;
579 if (!mAllowFds) {
580 err = FDS_NOT_ALLOWED;
581 }
582 }
583 }
584 }
585
586 return err;
587 }
588
allowFds() const589 bool Parcel::allowFds() const
590 {
591 return mAllowFds;
592 }
593
pushAllowFds(bool allowFds)594 bool Parcel::pushAllowFds(bool allowFds)
595 {
596 const bool origValue = mAllowFds;
597 if (!allowFds) {
598 mAllowFds = false;
599 }
600 return origValue;
601 }
602
restoreAllowFds(bool lastValue)603 void Parcel::restoreAllowFds(bool lastValue)
604 {
605 mAllowFds = lastValue;
606 }
607
hasFileDescriptors() const608 bool Parcel::hasFileDescriptors() const
609 {
610 if (!mFdsKnown) {
611 scanForFds();
612 }
613 return mHasFds;
614 }
615
616 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)617 status_t Parcel::writeInterfaceToken(const String16& interface)
618 {
619 writeInt32(IPCThreadState::self()->getStrictModePolicy() |
620 STRICT_MODE_PENALTY_GATHER);
621 // currently the interface identification token is just its name as a string
622 return writeString16(interface);
623 }
624
checkInterface(IBinder * binder) const625 bool Parcel::checkInterface(IBinder* binder) const
626 {
627 return enforceInterface(binder->getInterfaceDescriptor());
628 }
629
enforceInterface(const String16 & interface,IPCThreadState * threadState) const630 bool Parcel::enforceInterface(const String16& interface,
631 IPCThreadState* threadState) const
632 {
633 int32_t strictPolicy = readInt32();
634 if (threadState == NULL) {
635 threadState = IPCThreadState::self();
636 }
637 if ((threadState->getLastTransactionBinderFlags() &
638 IBinder::FLAG_ONEWAY) != 0) {
639 // For one-way calls, the callee is running entirely
640 // disconnected from the caller, so disable StrictMode entirely.
641 // Not only does disk/network usage not impact the caller, but
642 // there's no way to commuicate back any violations anyway.
643 threadState->setStrictModePolicy(0);
644 } else {
645 threadState->setStrictModePolicy(strictPolicy);
646 }
647 const String16 str(readString16());
648 if (str == interface) {
649 return true;
650 } else {
651 ALOGW("**** enforceInterface() expected '%s' but read '%s'",
652 String8(interface).string(), String8(str).string());
653 return false;
654 }
655 }
656
objects() const657 const binder_size_t* Parcel::objects() const
658 {
659 return mObjects;
660 }
661
objectsCount() const662 size_t Parcel::objectsCount() const
663 {
664 return mObjectsSize;
665 }
666
errorCheck() const667 status_t Parcel::errorCheck() const
668 {
669 return mError;
670 }
671
setError(status_t err)672 void Parcel::setError(status_t err)
673 {
674 mError = err;
675 }
676
finishWrite(size_t len)677 status_t Parcel::finishWrite(size_t len)
678 {
679 if (len > INT32_MAX) {
680 // don't accept size_t values which may have come from an
681 // inadvertent conversion from a negative int.
682 return BAD_VALUE;
683 }
684
685 //printf("Finish write of %d\n", len);
686 mDataPos += len;
687 ALOGV("finishWrite Setting data pos of %p to %zu", this, mDataPos);
688 if (mDataPos > mDataSize) {
689 mDataSize = mDataPos;
690 ALOGV("finishWrite Setting data size of %p to %zu", this, mDataSize);
691 }
692 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
693 return NO_ERROR;
694 }
695
writeUnpadded(const void * data,size_t len)696 status_t Parcel::writeUnpadded(const void* data, size_t len)
697 {
698 if (len > INT32_MAX) {
699 // don't accept size_t values which may have come from an
700 // inadvertent conversion from a negative int.
701 return BAD_VALUE;
702 }
703
704 size_t end = mDataPos + len;
705 if (end < mDataPos) {
706 // integer overflow
707 return BAD_VALUE;
708 }
709
710 if (end <= mDataCapacity) {
711 restart_write:
712 memcpy(mData+mDataPos, data, len);
713 return finishWrite(len);
714 }
715
716 status_t err = growData(len);
717 if (err == NO_ERROR) goto restart_write;
718 return err;
719 }
720
write(const void * data,size_t len)721 status_t Parcel::write(const void* data, size_t len)
722 {
723 if (len > INT32_MAX) {
724 // don't accept size_t values which may have come from an
725 // inadvertent conversion from a negative int.
726 return BAD_VALUE;
727 }
728
729 void* const d = writeInplace(len);
730 if (d) {
731 memcpy(d, data, len);
732 return NO_ERROR;
733 }
734 return mError;
735 }
736
writeInplace(size_t len)737 void* Parcel::writeInplace(size_t len)
738 {
739 if (len > INT32_MAX) {
740 // don't accept size_t values which may have come from an
741 // inadvertent conversion from a negative int.
742 return NULL;
743 }
744
745 const size_t padded = pad_size(len);
746
747 // sanity check for integer overflow
748 if (mDataPos+padded < mDataPos) {
749 return NULL;
750 }
751
752 if ((mDataPos+padded) <= mDataCapacity) {
753 restart_write:
754 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
755 uint8_t* const data = mData+mDataPos;
756
757 // Need to pad at end?
758 if (padded != len) {
759 #if BYTE_ORDER == BIG_ENDIAN
760 static const uint32_t mask[4] = {
761 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
762 };
763 #endif
764 #if BYTE_ORDER == LITTLE_ENDIAN
765 static const uint32_t mask[4] = {
766 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
767 };
768 #endif
769 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
770 // *reinterpret_cast<void**>(data+padded-4));
771 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
772 }
773
774 finishWrite(padded);
775 return data;
776 }
777
778 status_t err = growData(padded);
779 if (err == NO_ERROR) goto restart_write;
780 return NULL;
781 }
782
writeUtf8AsUtf16(const std::string & str)783 status_t Parcel::writeUtf8AsUtf16(const std::string& str) {
784 const uint8_t* strData = (uint8_t*)str.data();
785 const size_t strLen= str.length();
786 const ssize_t utf16Len = utf8_to_utf16_length(strData, strLen);
787 if (utf16Len < 0 || utf16Len> std::numeric_limits<int32_t>::max()) {
788 return BAD_VALUE;
789 }
790
791 status_t err = writeInt32(utf16Len);
792 if (err) {
793 return err;
794 }
795
796 // Allocate enough bytes to hold our converted string and its terminating NULL.
797 void* dst = writeInplace((utf16Len + 1) * sizeof(char16_t));
798 if (!dst) {
799 return NO_MEMORY;
800 }
801
802 utf8_to_utf16(strData, strLen, (char16_t*)dst);
803
804 return NO_ERROR;
805 }
806
writeUtf8AsUtf16(const std::unique_ptr<std::string> & str)807 status_t Parcel::writeUtf8AsUtf16(const std::unique_ptr<std::string>& str) {
808 if (!str) {
809 return writeInt32(-1);
810 }
811 return writeUtf8AsUtf16(*str);
812 }
813
814 namespace {
815
816 template<typename T>
writeByteVectorInternal(Parcel * parcel,const std::vector<T> & val)817 status_t writeByteVectorInternal(Parcel* parcel, const std::vector<T>& val)
818 {
819 status_t status;
820 if (val.size() > std::numeric_limits<int32_t>::max()) {
821 status = BAD_VALUE;
822 return status;
823 }
824
825 status = parcel->writeInt32(val.size());
826 if (status != OK) {
827 return status;
828 }
829
830 void* data = parcel->writeInplace(val.size());
831 if (!data) {
832 status = BAD_VALUE;
833 return status;
834 }
835
836 memcpy(data, val.data(), val.size());
837 return status;
838 }
839
840 template<typename T>
writeByteVectorInternalPtr(Parcel * parcel,const std::unique_ptr<std::vector<T>> & val)841 status_t writeByteVectorInternalPtr(Parcel* parcel,
842 const std::unique_ptr<std::vector<T>>& val)
843 {
844 if (!val) {
845 return parcel->writeInt32(-1);
846 }
847
848 return writeByteVectorInternal(parcel, *val);
849 }
850
851 } // namespace
852
writeByteVector(const std::vector<int8_t> & val)853 status_t Parcel::writeByteVector(const std::vector<int8_t>& val) {
854 return writeByteVectorInternal(this, val);
855 }
856
writeByteVector(const std::unique_ptr<std::vector<int8_t>> & val)857 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<int8_t>>& val)
858 {
859 return writeByteVectorInternalPtr(this, val);
860 }
861
writeByteVector(const std::vector<uint8_t> & val)862 status_t Parcel::writeByteVector(const std::vector<uint8_t>& val) {
863 return writeByteVectorInternal(this, val);
864 }
865
writeByteVector(const std::unique_ptr<std::vector<uint8_t>> & val)866 status_t Parcel::writeByteVector(const std::unique_ptr<std::vector<uint8_t>>& val)
867 {
868 return writeByteVectorInternalPtr(this, val);
869 }
870
writeInt32Vector(const std::vector<int32_t> & val)871 status_t Parcel::writeInt32Vector(const std::vector<int32_t>& val)
872 {
873 return writeTypedVector(val, &Parcel::writeInt32);
874 }
875
writeInt32Vector(const std::unique_ptr<std::vector<int32_t>> & val)876 status_t Parcel::writeInt32Vector(const std::unique_ptr<std::vector<int32_t>>& val)
877 {
878 return writeNullableTypedVector(val, &Parcel::writeInt32);
879 }
880
writeInt64Vector(const std::vector<int64_t> & val)881 status_t Parcel::writeInt64Vector(const std::vector<int64_t>& val)
882 {
883 return writeTypedVector(val, &Parcel::writeInt64);
884 }
885
writeInt64Vector(const std::unique_ptr<std::vector<int64_t>> & val)886 status_t Parcel::writeInt64Vector(const std::unique_ptr<std::vector<int64_t>>& val)
887 {
888 return writeNullableTypedVector(val, &Parcel::writeInt64);
889 }
890
writeFloatVector(const std::vector<float> & val)891 status_t Parcel::writeFloatVector(const std::vector<float>& val)
892 {
893 return writeTypedVector(val, &Parcel::writeFloat);
894 }
895
writeFloatVector(const std::unique_ptr<std::vector<float>> & val)896 status_t Parcel::writeFloatVector(const std::unique_ptr<std::vector<float>>& val)
897 {
898 return writeNullableTypedVector(val, &Parcel::writeFloat);
899 }
900
writeDoubleVector(const std::vector<double> & val)901 status_t Parcel::writeDoubleVector(const std::vector<double>& val)
902 {
903 return writeTypedVector(val, &Parcel::writeDouble);
904 }
905
writeDoubleVector(const std::unique_ptr<std::vector<double>> & val)906 status_t Parcel::writeDoubleVector(const std::unique_ptr<std::vector<double>>& val)
907 {
908 return writeNullableTypedVector(val, &Parcel::writeDouble);
909 }
910
writeBoolVector(const std::vector<bool> & val)911 status_t Parcel::writeBoolVector(const std::vector<bool>& val)
912 {
913 return writeTypedVector(val, &Parcel::writeBool);
914 }
915
writeBoolVector(const std::unique_ptr<std::vector<bool>> & val)916 status_t Parcel::writeBoolVector(const std::unique_ptr<std::vector<bool>>& val)
917 {
918 return writeNullableTypedVector(val, &Parcel::writeBool);
919 }
920
writeCharVector(const std::vector<char16_t> & val)921 status_t Parcel::writeCharVector(const std::vector<char16_t>& val)
922 {
923 return writeTypedVector(val, &Parcel::writeChar);
924 }
925
writeCharVector(const std::unique_ptr<std::vector<char16_t>> & val)926 status_t Parcel::writeCharVector(const std::unique_ptr<std::vector<char16_t>>& val)
927 {
928 return writeNullableTypedVector(val, &Parcel::writeChar);
929 }
930
writeString16Vector(const std::vector<String16> & val)931 status_t Parcel::writeString16Vector(const std::vector<String16>& val)
932 {
933 return writeTypedVector(val, &Parcel::writeString16);
934 }
935
writeString16Vector(const std::unique_ptr<std::vector<std::unique_ptr<String16>>> & val)936 status_t Parcel::writeString16Vector(
937 const std::unique_ptr<std::vector<std::unique_ptr<String16>>>& val)
938 {
939 return writeNullableTypedVector(val, &Parcel::writeString16);
940 }
941
writeUtf8VectorAsUtf16Vector(const std::unique_ptr<std::vector<std::unique_ptr<std::string>>> & val)942 status_t Parcel::writeUtf8VectorAsUtf16Vector(
943 const std::unique_ptr<std::vector<std::unique_ptr<std::string>>>& val) {
944 return writeNullableTypedVector(val, &Parcel::writeUtf8AsUtf16);
945 }
946
writeUtf8VectorAsUtf16Vector(const std::vector<std::string> & val)947 status_t Parcel::writeUtf8VectorAsUtf16Vector(const std::vector<std::string>& val) {
948 return writeTypedVector(val, &Parcel::writeUtf8AsUtf16);
949 }
950
writeInt32(int32_t val)951 status_t Parcel::writeInt32(int32_t val)
952 {
953 return writeAligned(val);
954 }
955
writeUint32(uint32_t val)956 status_t Parcel::writeUint32(uint32_t val)
957 {
958 return writeAligned(val);
959 }
960
writeInt32Array(size_t len,const int32_t * val)961 status_t Parcel::writeInt32Array(size_t len, const int32_t *val) {
962 if (len > INT32_MAX) {
963 // don't accept size_t values which may have come from an
964 // inadvertent conversion from a negative int.
965 return BAD_VALUE;
966 }
967
968 if (!val) {
969 return writeInt32(-1);
970 }
971 status_t ret = writeInt32(static_cast<uint32_t>(len));
972 if (ret == NO_ERROR) {
973 ret = write(val, len * sizeof(*val));
974 }
975 return ret;
976 }
writeByteArray(size_t len,const uint8_t * val)977 status_t Parcel::writeByteArray(size_t len, const uint8_t *val) {
978 if (len > INT32_MAX) {
979 // don't accept size_t values which may have come from an
980 // inadvertent conversion from a negative int.
981 return BAD_VALUE;
982 }
983
984 if (!val) {
985 return writeInt32(-1);
986 }
987 status_t ret = writeInt32(static_cast<uint32_t>(len));
988 if (ret == NO_ERROR) {
989 ret = write(val, len * sizeof(*val));
990 }
991 return ret;
992 }
993
writeBool(bool val)994 status_t Parcel::writeBool(bool val)
995 {
996 return writeInt32(int32_t(val));
997 }
998
writeChar(char16_t val)999 status_t Parcel::writeChar(char16_t val)
1000 {
1001 return writeInt32(int32_t(val));
1002 }
1003
writeByte(int8_t val)1004 status_t Parcel::writeByte(int8_t val)
1005 {
1006 return writeInt32(int32_t(val));
1007 }
1008
writeInt64(int64_t val)1009 status_t Parcel::writeInt64(int64_t val)
1010 {
1011 return writeAligned(val);
1012 }
1013
writeUint64(uint64_t val)1014 status_t Parcel::writeUint64(uint64_t val)
1015 {
1016 return writeAligned(val);
1017 }
1018
writePointer(uintptr_t val)1019 status_t Parcel::writePointer(uintptr_t val)
1020 {
1021 return writeAligned<binder_uintptr_t>(val);
1022 }
1023
writeFloat(float val)1024 status_t Parcel::writeFloat(float val)
1025 {
1026 return writeAligned(val);
1027 }
1028
1029 #if defined(__mips__) && defined(__mips_hard_float)
1030
writeDouble(double val)1031 status_t Parcel::writeDouble(double val)
1032 {
1033 union {
1034 double d;
1035 unsigned long long ll;
1036 } u;
1037 u.d = val;
1038 return writeAligned(u.ll);
1039 }
1040
1041 #else
1042
writeDouble(double val)1043 status_t Parcel::writeDouble(double val)
1044 {
1045 return writeAligned(val);
1046 }
1047
1048 #endif
1049
writeCString(const char * str)1050 status_t Parcel::writeCString(const char* str)
1051 {
1052 return write(str, strlen(str)+1);
1053 }
1054
writeString8(const String8 & str)1055 status_t Parcel::writeString8(const String8& str)
1056 {
1057 status_t err = writeInt32(str.bytes());
1058 // only write string if its length is more than zero characters,
1059 // as readString8 will only read if the length field is non-zero.
1060 // this is slightly different from how writeString16 works.
1061 if (str.bytes() > 0 && err == NO_ERROR) {
1062 err = write(str.string(), str.bytes()+1);
1063 }
1064 return err;
1065 }
1066
writeString16(const std::unique_ptr<String16> & str)1067 status_t Parcel::writeString16(const std::unique_ptr<String16>& str)
1068 {
1069 if (!str) {
1070 return writeInt32(-1);
1071 }
1072
1073 return writeString16(*str);
1074 }
1075
writeString16(const String16 & str)1076 status_t Parcel::writeString16(const String16& str)
1077 {
1078 return writeString16(str.string(), str.size());
1079 }
1080
writeString16(const char16_t * str,size_t len)1081 status_t Parcel::writeString16(const char16_t* str, size_t len)
1082 {
1083 if (str == NULL) return writeInt32(-1);
1084
1085 status_t err = writeInt32(len);
1086 if (err == NO_ERROR) {
1087 len *= sizeof(char16_t);
1088 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
1089 if (data) {
1090 memcpy(data, str, len);
1091 *reinterpret_cast<char16_t*>(data+len) = 0;
1092 return NO_ERROR;
1093 }
1094 err = mError;
1095 }
1096 return err;
1097 }
1098
writeStrongBinder(const sp<IBinder> & val)1099 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
1100 {
1101 return flatten_binder(ProcessState::self(), val, this);
1102 }
1103
writeStrongBinderVector(const std::vector<sp<IBinder>> & val)1104 status_t Parcel::writeStrongBinderVector(const std::vector<sp<IBinder>>& val)
1105 {
1106 return writeTypedVector(val, &Parcel::writeStrongBinder);
1107 }
1108
writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>> & val)1109 status_t Parcel::writeStrongBinderVector(const std::unique_ptr<std::vector<sp<IBinder>>>& val)
1110 {
1111 return writeNullableTypedVector(val, &Parcel::writeStrongBinder);
1112 }
1113
readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>> * val) const1114 status_t Parcel::readStrongBinderVector(std::unique_ptr<std::vector<sp<IBinder>>>* val) const {
1115 return readNullableTypedVector(val, &Parcel::readStrongBinder);
1116 }
1117
readStrongBinderVector(std::vector<sp<IBinder>> * val) const1118 status_t Parcel::readStrongBinderVector(std::vector<sp<IBinder>>* val) const {
1119 return readTypedVector(val, &Parcel::readStrongBinder);
1120 }
1121
writeWeakBinder(const wp<IBinder> & val)1122 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
1123 {
1124 return flatten_binder(ProcessState::self(), val, this);
1125 }
1126
writeRawNullableParcelable(const Parcelable * parcelable)1127 status_t Parcel::writeRawNullableParcelable(const Parcelable* parcelable) {
1128 if (!parcelable) {
1129 return writeInt32(0);
1130 }
1131
1132 return writeParcelable(*parcelable);
1133 }
1134
writeParcelable(const Parcelable & parcelable)1135 status_t Parcel::writeParcelable(const Parcelable& parcelable) {
1136 status_t status = writeInt32(1); // parcelable is not null.
1137 if (status != OK) {
1138 return status;
1139 }
1140 return parcelable.writeToParcel(this);
1141 }
1142
writeNativeHandle(const native_handle * handle)1143 status_t Parcel::writeNativeHandle(const native_handle* handle)
1144 {
1145 if (!handle || handle->version != sizeof(native_handle))
1146 return BAD_TYPE;
1147
1148 status_t err;
1149 err = writeInt32(handle->numFds);
1150 if (err != NO_ERROR) return err;
1151
1152 err = writeInt32(handle->numInts);
1153 if (err != NO_ERROR) return err;
1154
1155 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
1156 err = writeDupFileDescriptor(handle->data[i]);
1157
1158 if (err != NO_ERROR) {
1159 ALOGD("write native handle, write dup fd failed");
1160 return err;
1161 }
1162 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
1163 return err;
1164 }
1165
writeFileDescriptor(int fd,bool takeOwnership)1166 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
1167 {
1168 flat_binder_object obj;
1169 obj.type = BINDER_TYPE_FD;
1170 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
1171 obj.binder = 0; /* Don't pass uninitialized stack data to a remote process */
1172 obj.handle = fd;
1173 obj.cookie = takeOwnership ? 1 : 0;
1174 return writeObject(obj, true);
1175 }
1176
writeDupFileDescriptor(int fd)1177 status_t Parcel::writeDupFileDescriptor(int fd)
1178 {
1179 int dupFd = dup(fd);
1180 if (dupFd < 0) {
1181 return -errno;
1182 }
1183 status_t err = writeFileDescriptor(dupFd, true /*takeOwnership*/);
1184 if (err != OK) {
1185 close(dupFd);
1186 }
1187 return err;
1188 }
1189
writeUniqueFileDescriptor(const ScopedFd & fd)1190 status_t Parcel::writeUniqueFileDescriptor(const ScopedFd& fd) {
1191 return writeDupFileDescriptor(fd.get());
1192 }
1193
writeUniqueFileDescriptorVector(const std::vector<ScopedFd> & val)1194 status_t Parcel::writeUniqueFileDescriptorVector(const std::vector<ScopedFd>& val) {
1195 return writeTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1196 }
1197
writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>> & val)1198 status_t Parcel::writeUniqueFileDescriptorVector(const std::unique_ptr<std::vector<ScopedFd>>& val) {
1199 return writeNullableTypedVector(val, &Parcel::writeUniqueFileDescriptor);
1200 }
1201
writeBlob(size_t len,bool mutableCopy,WritableBlob * outBlob)1202 status_t Parcel::writeBlob(size_t len, bool mutableCopy, WritableBlob* outBlob)
1203 {
1204 if (len > INT32_MAX) {
1205 // don't accept size_t values which may have come from an
1206 // inadvertent conversion from a negative int.
1207 return BAD_VALUE;
1208 }
1209
1210 status_t status;
1211 if (!mAllowFds || len <= BLOB_INPLACE_LIMIT) {
1212 ALOGV("writeBlob: write in place");
1213 status = writeInt32(BLOB_INPLACE);
1214 if (status) return status;
1215
1216 void* ptr = writeInplace(len);
1217 if (!ptr) return NO_MEMORY;
1218
1219 outBlob->init(-1, ptr, len, false);
1220 return NO_ERROR;
1221 }
1222
1223 ALOGV("writeBlob: write to ashmem");
1224 int fd = ashmem_create_region("Parcel Blob", len);
1225 if (fd < 0) return NO_MEMORY;
1226
1227 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
1228 if (result < 0) {
1229 status = result;
1230 } else {
1231 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
1232 if (ptr == MAP_FAILED) {
1233 status = -errno;
1234 } else {
1235 if (!mutableCopy) {
1236 result = ashmem_set_prot_region(fd, PROT_READ);
1237 }
1238 if (result < 0) {
1239 status = result;
1240 } else {
1241 status = writeInt32(mutableCopy ? BLOB_ASHMEM_MUTABLE : BLOB_ASHMEM_IMMUTABLE);
1242 if (!status) {
1243 status = writeFileDescriptor(fd, true /*takeOwnership*/);
1244 if (!status) {
1245 outBlob->init(fd, ptr, len, mutableCopy);
1246 return NO_ERROR;
1247 }
1248 }
1249 }
1250 }
1251 ::munmap(ptr, len);
1252 }
1253 ::close(fd);
1254 return status;
1255 }
1256
writeDupImmutableBlobFileDescriptor(int fd)1257 status_t Parcel::writeDupImmutableBlobFileDescriptor(int fd)
1258 {
1259 // Must match up with what's done in writeBlob.
1260 if (!mAllowFds) return FDS_NOT_ALLOWED;
1261 status_t status = writeInt32(BLOB_ASHMEM_IMMUTABLE);
1262 if (status) return status;
1263 return writeDupFileDescriptor(fd);
1264 }
1265
write(const FlattenableHelperInterface & val)1266 status_t Parcel::write(const FlattenableHelperInterface& val)
1267 {
1268 status_t err;
1269
1270 // size if needed
1271 const size_t len = val.getFlattenedSize();
1272 const size_t fd_count = val.getFdCount();
1273
1274 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
1275 // don't accept size_t values which may have come from an
1276 // inadvertent conversion from a negative int.
1277 return BAD_VALUE;
1278 }
1279
1280 err = this->writeInt32(len);
1281 if (err) return err;
1282
1283 err = this->writeInt32(fd_count);
1284 if (err) return err;
1285
1286 // payload
1287 void* const buf = this->writeInplace(pad_size(len));
1288 if (buf == NULL)
1289 return BAD_VALUE;
1290
1291 int* fds = NULL;
1292 if (fd_count) {
1293 fds = new (std::nothrow) int[fd_count];
1294 if (fds == nullptr) {
1295 ALOGE("write: failed to allocate requested %zu fds", fd_count);
1296 return BAD_VALUE;
1297 }
1298 }
1299
1300 err = val.flatten(buf, len, fds, fd_count);
1301 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1302 err = this->writeDupFileDescriptor( fds[i] );
1303 }
1304
1305 if (fd_count) {
1306 delete [] fds;
1307 }
1308
1309 return err;
1310 }
1311
writeObject(const flat_binder_object & val,bool nullMetaData)1312 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
1313 {
1314 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
1315 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
1316 if (enoughData && enoughObjects) {
1317 restart_write:
1318 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
1319
1320 // remember if it's a file descriptor
1321 if (val.type == BINDER_TYPE_FD) {
1322 if (!mAllowFds) {
1323 // fail before modifying our object index
1324 return FDS_NOT_ALLOWED;
1325 }
1326 mHasFds = mFdsKnown = true;
1327 }
1328
1329 // Need to write meta-data?
1330 if (nullMetaData || val.binder != 0) {
1331 mObjects[mObjectsSize] = mDataPos;
1332 acquire_object(ProcessState::self(), val, this, &mOpenAshmemSize);
1333 mObjectsSize++;
1334 }
1335
1336 return finishWrite(sizeof(flat_binder_object));
1337 }
1338
1339 if (!enoughData) {
1340 const status_t err = growData(sizeof(val));
1341 if (err != NO_ERROR) return err;
1342 }
1343 if (!enoughObjects) {
1344 size_t newSize = ((mObjectsSize+2)*3)/2;
1345 if (newSize < mObjectsSize) return NO_MEMORY; // overflow
1346 binder_size_t* objects = (binder_size_t*)realloc(mObjects, newSize*sizeof(binder_size_t));
1347 if (objects == NULL) return NO_MEMORY;
1348 mObjects = objects;
1349 mObjectsCapacity = newSize;
1350 }
1351
1352 goto restart_write;
1353 }
1354
writeNoException()1355 status_t Parcel::writeNoException()
1356 {
1357 binder::Status status;
1358 return status.writeToParcel(this);
1359 }
1360
remove(size_t,size_t)1361 void Parcel::remove(size_t /*start*/, size_t /*amt*/)
1362 {
1363 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
1364 }
1365
read(void * outData,size_t len) const1366 status_t Parcel::read(void* outData, size_t len) const
1367 {
1368 if (len > INT32_MAX) {
1369 // don't accept size_t values which may have come from an
1370 // inadvertent conversion from a negative int.
1371 return BAD_VALUE;
1372 }
1373
1374 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1375 && len <= pad_size(len)) {
1376 memcpy(outData, mData+mDataPos, len);
1377 mDataPos += pad_size(len);
1378 ALOGV("read Setting data pos of %p to %zu", this, mDataPos);
1379 return NO_ERROR;
1380 }
1381 return NOT_ENOUGH_DATA;
1382 }
1383
readInplace(size_t len) const1384 const void* Parcel::readInplace(size_t len) const
1385 {
1386 if (len > INT32_MAX) {
1387 // don't accept size_t values which may have come from an
1388 // inadvertent conversion from a negative int.
1389 return NULL;
1390 }
1391
1392 if ((mDataPos+pad_size(len)) >= mDataPos && (mDataPos+pad_size(len)) <= mDataSize
1393 && len <= pad_size(len)) {
1394 const void* data = mData+mDataPos;
1395 mDataPos += pad_size(len);
1396 ALOGV("readInplace Setting data pos of %p to %zu", this, mDataPos);
1397 return data;
1398 }
1399 return NULL;
1400 }
1401
1402 template<class T>
readAligned(T * pArg) const1403 status_t Parcel::readAligned(T *pArg) const {
1404 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1405
1406 if ((mDataPos+sizeof(T)) <= mDataSize) {
1407 const void* data = mData+mDataPos;
1408 mDataPos += sizeof(T);
1409 *pArg = *reinterpret_cast<const T*>(data);
1410 return NO_ERROR;
1411 } else {
1412 return NOT_ENOUGH_DATA;
1413 }
1414 }
1415
1416 template<class T>
readAligned() const1417 T Parcel::readAligned() const {
1418 T result;
1419 if (readAligned(&result) != NO_ERROR) {
1420 result = 0;
1421 }
1422
1423 return result;
1424 }
1425
1426 template<class T>
writeAligned(T val)1427 status_t Parcel::writeAligned(T val) {
1428 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE_UNSAFE(sizeof(T)) == sizeof(T));
1429
1430 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
1431 restart_write:
1432 *reinterpret_cast<T*>(mData+mDataPos) = val;
1433 return finishWrite(sizeof(val));
1434 }
1435
1436 status_t err = growData(sizeof(val));
1437 if (err == NO_ERROR) goto restart_write;
1438 return err;
1439 }
1440
1441 namespace {
1442
1443 template<typename T>
readByteVectorInternal(const Parcel * parcel,std::vector<T> * val)1444 status_t readByteVectorInternal(const Parcel* parcel,
1445 std::vector<T>* val) {
1446 val->clear();
1447
1448 int32_t size;
1449 status_t status = parcel->readInt32(&size);
1450
1451 if (status != OK) {
1452 return status;
1453 }
1454
1455 if (size < 0) {
1456 status = UNEXPECTED_NULL;
1457 return status;
1458 }
1459 if (size_t(size) > parcel->dataAvail()) {
1460 status = BAD_VALUE;
1461 return status;
1462 }
1463
1464 const void* data = parcel->readInplace(size);
1465 if (!data) {
1466 status = BAD_VALUE;
1467 return status;
1468 }
1469 val->resize(size);
1470 memcpy(val->data(), data, size);
1471
1472 return status;
1473 }
1474
1475 template<typename T>
readByteVectorInternalPtr(const Parcel * parcel,std::unique_ptr<std::vector<T>> * val)1476 status_t readByteVectorInternalPtr(
1477 const Parcel* parcel,
1478 std::unique_ptr<std::vector<T>>* val) {
1479 const int32_t start = parcel->dataPosition();
1480 int32_t size;
1481 status_t status = parcel->readInt32(&size);
1482 val->reset();
1483
1484 if (status != OK || size < 0) {
1485 return status;
1486 }
1487
1488 parcel->setDataPosition(start);
1489 val->reset(new (std::nothrow) std::vector<T>());
1490
1491 status = readByteVectorInternal(parcel, val->get());
1492
1493 if (status != OK) {
1494 val->reset();
1495 }
1496
1497 return status;
1498 }
1499
1500 } // namespace
1501
readByteVector(std::vector<int8_t> * val) const1502 status_t Parcel::readByteVector(std::vector<int8_t>* val) const {
1503 return readByteVectorInternal(this, val);
1504 }
1505
readByteVector(std::vector<uint8_t> * val) const1506 status_t Parcel::readByteVector(std::vector<uint8_t>* val) const {
1507 return readByteVectorInternal(this, val);
1508 }
1509
readByteVector(std::unique_ptr<std::vector<int8_t>> * val) const1510 status_t Parcel::readByteVector(std::unique_ptr<std::vector<int8_t>>* val) const {
1511 return readByteVectorInternalPtr(this, val);
1512 }
1513
readByteVector(std::unique_ptr<std::vector<uint8_t>> * val) const1514 status_t Parcel::readByteVector(std::unique_ptr<std::vector<uint8_t>>* val) const {
1515 return readByteVectorInternalPtr(this, val);
1516 }
1517
readInt32Vector(std::unique_ptr<std::vector<int32_t>> * val) const1518 status_t Parcel::readInt32Vector(std::unique_ptr<std::vector<int32_t>>* val) const {
1519 return readNullableTypedVector(val, &Parcel::readInt32);
1520 }
1521
readInt32Vector(std::vector<int32_t> * val) const1522 status_t Parcel::readInt32Vector(std::vector<int32_t>* val) const {
1523 return readTypedVector(val, &Parcel::readInt32);
1524 }
1525
readInt64Vector(std::unique_ptr<std::vector<int64_t>> * val) const1526 status_t Parcel::readInt64Vector(std::unique_ptr<std::vector<int64_t>>* val) const {
1527 return readNullableTypedVector(val, &Parcel::readInt64);
1528 }
1529
readInt64Vector(std::vector<int64_t> * val) const1530 status_t Parcel::readInt64Vector(std::vector<int64_t>* val) const {
1531 return readTypedVector(val, &Parcel::readInt64);
1532 }
1533
readFloatVector(std::unique_ptr<std::vector<float>> * val) const1534 status_t Parcel::readFloatVector(std::unique_ptr<std::vector<float>>* val) const {
1535 return readNullableTypedVector(val, &Parcel::readFloat);
1536 }
1537
readFloatVector(std::vector<float> * val) const1538 status_t Parcel::readFloatVector(std::vector<float>* val) const {
1539 return readTypedVector(val, &Parcel::readFloat);
1540 }
1541
readDoubleVector(std::unique_ptr<std::vector<double>> * val) const1542 status_t Parcel::readDoubleVector(std::unique_ptr<std::vector<double>>* val) const {
1543 return readNullableTypedVector(val, &Parcel::readDouble);
1544 }
1545
readDoubleVector(std::vector<double> * val) const1546 status_t Parcel::readDoubleVector(std::vector<double>* val) const {
1547 return readTypedVector(val, &Parcel::readDouble);
1548 }
1549
readBoolVector(std::unique_ptr<std::vector<bool>> * val) const1550 status_t Parcel::readBoolVector(std::unique_ptr<std::vector<bool>>* val) const {
1551 const int32_t start = dataPosition();
1552 int32_t size;
1553 status_t status = readInt32(&size);
1554 val->reset();
1555
1556 if (status != OK || size < 0) {
1557 return status;
1558 }
1559
1560 setDataPosition(start);
1561 val->reset(new (std::nothrow) std::vector<bool>());
1562
1563 status = readBoolVector(val->get());
1564
1565 if (status != OK) {
1566 val->reset();
1567 }
1568
1569 return status;
1570 }
1571
readBoolVector(std::vector<bool> * val) const1572 status_t Parcel::readBoolVector(std::vector<bool>* val) const {
1573 int32_t size;
1574 status_t status = readInt32(&size);
1575
1576 if (status != OK) {
1577 return status;
1578 }
1579
1580 if (size < 0) {
1581 return UNEXPECTED_NULL;
1582 }
1583
1584 val->resize(size);
1585
1586 /* C++ bool handling means a vector of bools isn't necessarily addressable
1587 * (we might use individual bits)
1588 */
1589 bool data;
1590 for (int32_t i = 0; i < size; ++i) {
1591 status = readBool(&data);
1592 (*val)[i] = data;
1593
1594 if (status != OK) {
1595 return status;
1596 }
1597 }
1598
1599 return OK;
1600 }
1601
readCharVector(std::unique_ptr<std::vector<char16_t>> * val) const1602 status_t Parcel::readCharVector(std::unique_ptr<std::vector<char16_t>>* val) const {
1603 return readNullableTypedVector(val, &Parcel::readChar);
1604 }
1605
readCharVector(std::vector<char16_t> * val) const1606 status_t Parcel::readCharVector(std::vector<char16_t>* val) const {
1607 return readTypedVector(val, &Parcel::readChar);
1608 }
1609
readString16Vector(std::unique_ptr<std::vector<std::unique_ptr<String16>>> * val) const1610 status_t Parcel::readString16Vector(
1611 std::unique_ptr<std::vector<std::unique_ptr<String16>>>* val) const {
1612 return readNullableTypedVector(val, &Parcel::readString16);
1613 }
1614
readString16Vector(std::vector<String16> * val) const1615 status_t Parcel::readString16Vector(std::vector<String16>* val) const {
1616 return readTypedVector(val, &Parcel::readString16);
1617 }
1618
readUtf8VectorFromUtf16Vector(std::unique_ptr<std::vector<std::unique_ptr<std::string>>> * val) const1619 status_t Parcel::readUtf8VectorFromUtf16Vector(
1620 std::unique_ptr<std::vector<std::unique_ptr<std::string>>>* val) const {
1621 return readNullableTypedVector(val, &Parcel::readUtf8FromUtf16);
1622 }
1623
readUtf8VectorFromUtf16Vector(std::vector<std::string> * val) const1624 status_t Parcel::readUtf8VectorFromUtf16Vector(std::vector<std::string>* val) const {
1625 return readTypedVector(val, &Parcel::readUtf8FromUtf16);
1626 }
1627
readInt32(int32_t * pArg) const1628 status_t Parcel::readInt32(int32_t *pArg) const
1629 {
1630 return readAligned(pArg);
1631 }
1632
readInt32() const1633 int32_t Parcel::readInt32() const
1634 {
1635 return readAligned<int32_t>();
1636 }
1637
readUint32(uint32_t * pArg) const1638 status_t Parcel::readUint32(uint32_t *pArg) const
1639 {
1640 return readAligned(pArg);
1641 }
1642
readUint32() const1643 uint32_t Parcel::readUint32() const
1644 {
1645 return readAligned<uint32_t>();
1646 }
1647
readInt64(int64_t * pArg) const1648 status_t Parcel::readInt64(int64_t *pArg) const
1649 {
1650 return readAligned(pArg);
1651 }
1652
1653
readInt64() const1654 int64_t Parcel::readInt64() const
1655 {
1656 return readAligned<int64_t>();
1657 }
1658
readUint64(uint64_t * pArg) const1659 status_t Parcel::readUint64(uint64_t *pArg) const
1660 {
1661 return readAligned(pArg);
1662 }
1663
readUint64() const1664 uint64_t Parcel::readUint64() const
1665 {
1666 return readAligned<uint64_t>();
1667 }
1668
readPointer(uintptr_t * pArg) const1669 status_t Parcel::readPointer(uintptr_t *pArg) const
1670 {
1671 status_t ret;
1672 binder_uintptr_t ptr;
1673 ret = readAligned(&ptr);
1674 if (!ret)
1675 *pArg = ptr;
1676 return ret;
1677 }
1678
readPointer() const1679 uintptr_t Parcel::readPointer() const
1680 {
1681 return readAligned<binder_uintptr_t>();
1682 }
1683
1684
readFloat(float * pArg) const1685 status_t Parcel::readFloat(float *pArg) const
1686 {
1687 return readAligned(pArg);
1688 }
1689
1690
readFloat() const1691 float Parcel::readFloat() const
1692 {
1693 return readAligned<float>();
1694 }
1695
1696 #if defined(__mips__) && defined(__mips_hard_float)
1697
readDouble(double * pArg) const1698 status_t Parcel::readDouble(double *pArg) const
1699 {
1700 union {
1701 double d;
1702 unsigned long long ll;
1703 } u;
1704 u.d = 0;
1705 status_t status;
1706 status = readAligned(&u.ll);
1707 *pArg = u.d;
1708 return status;
1709 }
1710
readDouble() const1711 double Parcel::readDouble() const
1712 {
1713 union {
1714 double d;
1715 unsigned long long ll;
1716 } u;
1717 u.ll = readAligned<unsigned long long>();
1718 return u.d;
1719 }
1720
1721 #else
1722
readDouble(double * pArg) const1723 status_t Parcel::readDouble(double *pArg) const
1724 {
1725 return readAligned(pArg);
1726 }
1727
readDouble() const1728 double Parcel::readDouble() const
1729 {
1730 return readAligned<double>();
1731 }
1732
1733 #endif
1734
readIntPtr(intptr_t * pArg) const1735 status_t Parcel::readIntPtr(intptr_t *pArg) const
1736 {
1737 return readAligned(pArg);
1738 }
1739
1740
readIntPtr() const1741 intptr_t Parcel::readIntPtr() const
1742 {
1743 return readAligned<intptr_t>();
1744 }
1745
readBool(bool * pArg) const1746 status_t Parcel::readBool(bool *pArg) const
1747 {
1748 int32_t tmp;
1749 status_t ret = readInt32(&tmp);
1750 *pArg = (tmp != 0);
1751 return ret;
1752 }
1753
readBool() const1754 bool Parcel::readBool() const
1755 {
1756 return readInt32() != 0;
1757 }
1758
readChar(char16_t * pArg) const1759 status_t Parcel::readChar(char16_t *pArg) const
1760 {
1761 int32_t tmp;
1762 status_t ret = readInt32(&tmp);
1763 *pArg = char16_t(tmp);
1764 return ret;
1765 }
1766
readChar() const1767 char16_t Parcel::readChar() const
1768 {
1769 return char16_t(readInt32());
1770 }
1771
readByte(int8_t * pArg) const1772 status_t Parcel::readByte(int8_t *pArg) const
1773 {
1774 int32_t tmp;
1775 status_t ret = readInt32(&tmp);
1776 *pArg = int8_t(tmp);
1777 return ret;
1778 }
1779
readByte() const1780 int8_t Parcel::readByte() const
1781 {
1782 return int8_t(readInt32());
1783 }
1784
readUtf8FromUtf16(std::string * str) const1785 status_t Parcel::readUtf8FromUtf16(std::string* str) const {
1786 size_t utf16Size = 0;
1787 const char16_t* src = readString16Inplace(&utf16Size);
1788 if (!src) {
1789 return UNEXPECTED_NULL;
1790 }
1791
1792 // Save ourselves the trouble, we're done.
1793 if (utf16Size == 0u) {
1794 str->clear();
1795 return NO_ERROR;
1796 }
1797
1798 // Allow for closing '\0'
1799 ssize_t utf8Size = utf16_to_utf8_length(src, utf16Size) + 1;
1800 if (utf8Size < 1) {
1801 return BAD_VALUE;
1802 }
1803 // Note that while it is probably safe to assume string::resize keeps a
1804 // spare byte around for the trailing null, we still pass the size including the trailing null
1805 str->resize(utf8Size);
1806 utf16_to_utf8(src, utf16Size, &((*str)[0]), utf8Size);
1807 str->resize(utf8Size - 1);
1808 return NO_ERROR;
1809 }
1810
readUtf8FromUtf16(std::unique_ptr<std::string> * str) const1811 status_t Parcel::readUtf8FromUtf16(std::unique_ptr<std::string>* str) const {
1812 const int32_t start = dataPosition();
1813 int32_t size;
1814 status_t status = readInt32(&size);
1815 str->reset();
1816
1817 if (status != OK || size < 0) {
1818 return status;
1819 }
1820
1821 setDataPosition(start);
1822 str->reset(new (std::nothrow) std::string());
1823 return readUtf8FromUtf16(str->get());
1824 }
1825
readCString() const1826 const char* Parcel::readCString() const
1827 {
1828 const size_t avail = mDataSize-mDataPos;
1829 if (avail > 0) {
1830 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
1831 // is the string's trailing NUL within the parcel's valid bounds?
1832 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
1833 if (eos) {
1834 const size_t len = eos - str;
1835 mDataPos += pad_size(len+1);
1836 ALOGV("readCString Setting data pos of %p to %zu", this, mDataPos);
1837 return str;
1838 }
1839 }
1840 return NULL;
1841 }
1842
readString8() const1843 String8 Parcel::readString8() const
1844 {
1845 int32_t size = readInt32();
1846 // watch for potential int overflow adding 1 for trailing NUL
1847 if (size > 0 && size < INT32_MAX) {
1848 const char* str = (const char*)readInplace(size+1);
1849 if (str) return String8(str, size);
1850 }
1851 return String8();
1852 }
1853
readString16() const1854 String16 Parcel::readString16() const
1855 {
1856 size_t len;
1857 const char16_t* str = readString16Inplace(&len);
1858 if (str) return String16(str, len);
1859 ALOGE("Reading a NULL string not supported here.");
1860 return String16();
1861 }
1862
readString16(std::unique_ptr<String16> * pArg) const1863 status_t Parcel::readString16(std::unique_ptr<String16>* pArg) const
1864 {
1865 const int32_t start = dataPosition();
1866 int32_t size;
1867 status_t status = readInt32(&size);
1868 pArg->reset();
1869
1870 if (status != OK || size < 0) {
1871 return status;
1872 }
1873
1874 setDataPosition(start);
1875 pArg->reset(new (std::nothrow) String16());
1876
1877 status = readString16(pArg->get());
1878
1879 if (status != OK) {
1880 pArg->reset();
1881 }
1882
1883 return status;
1884 }
1885
readString16(String16 * pArg) const1886 status_t Parcel::readString16(String16* pArg) const
1887 {
1888 size_t len;
1889 const char16_t* str = readString16Inplace(&len);
1890 if (str) {
1891 pArg->setTo(str, len);
1892 return 0;
1893 } else {
1894 *pArg = String16();
1895 return UNEXPECTED_NULL;
1896 }
1897 }
1898
readString16Inplace(size_t * outLen) const1899 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1900 {
1901 int32_t size = readInt32();
1902 // watch for potential int overflow from size+1
1903 if (size >= 0 && size < INT32_MAX) {
1904 *outLen = size;
1905 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1906 if (str != NULL) {
1907 return str;
1908 }
1909 }
1910 *outLen = 0;
1911 return NULL;
1912 }
1913
readStrongBinder(sp<IBinder> * val) const1914 status_t Parcel::readStrongBinder(sp<IBinder>* val) const
1915 {
1916 return unflatten_binder(ProcessState::self(), *this, val);
1917 }
1918
readStrongBinder() const1919 sp<IBinder> Parcel::readStrongBinder() const
1920 {
1921 sp<IBinder> val;
1922 readStrongBinder(&val);
1923 return val;
1924 }
1925
readWeakBinder() const1926 wp<IBinder> Parcel::readWeakBinder() const
1927 {
1928 wp<IBinder> val;
1929 unflatten_binder(ProcessState::self(), *this, &val);
1930 return val;
1931 }
1932
readParcelable(Parcelable * parcelable) const1933 status_t Parcel::readParcelable(Parcelable* parcelable) const {
1934 int32_t have_parcelable = 0;
1935 status_t status = readInt32(&have_parcelable);
1936 if (status != OK) {
1937 return status;
1938 }
1939 if (!have_parcelable) {
1940 return UNEXPECTED_NULL;
1941 }
1942 return parcelable->readFromParcel(this);
1943 }
1944
readExceptionCode() const1945 int32_t Parcel::readExceptionCode() const
1946 {
1947 binder::Status status;
1948 status.readFromParcel(*this);
1949 return status.exceptionCode();
1950 }
1951
readNativeHandle() const1952 native_handle* Parcel::readNativeHandle() const
1953 {
1954 int numFds, numInts;
1955 status_t err;
1956 err = readInt32(&numFds);
1957 if (err != NO_ERROR) return 0;
1958 err = readInt32(&numInts);
1959 if (err != NO_ERROR) return 0;
1960
1961 native_handle* h = native_handle_create(numFds, numInts);
1962 if (!h) {
1963 return 0;
1964 }
1965
1966 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1967 h->data[i] = dup(readFileDescriptor());
1968 if (h->data[i] < 0) {
1969 for (int j = 0; j < i; j++) {
1970 close(h->data[j]);
1971 }
1972 native_handle_delete(h);
1973 return 0;
1974 }
1975 }
1976 err = read(h->data + numFds, sizeof(int)*numInts);
1977 if (err != NO_ERROR) {
1978 native_handle_close(h);
1979 native_handle_delete(h);
1980 h = 0;
1981 }
1982 return h;
1983 }
1984
1985
readFileDescriptor() const1986 int Parcel::readFileDescriptor() const
1987 {
1988 const flat_binder_object* flat = readObject(true);
1989
1990 if (flat && flat->type == BINDER_TYPE_FD) {
1991 return flat->handle;
1992 }
1993
1994 return BAD_TYPE;
1995 }
1996
readUniqueFileDescriptor(ScopedFd * val) const1997 status_t Parcel::readUniqueFileDescriptor(ScopedFd* val) const
1998 {
1999 int got = readFileDescriptor();
2000
2001 if (got == BAD_TYPE) {
2002 return BAD_TYPE;
2003 }
2004
2005 val->reset(dup(got));
2006
2007 if (val->get() < 0) {
2008 return BAD_VALUE;
2009 }
2010
2011 return OK;
2012 }
2013
2014
readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>> * val) const2015 status_t Parcel::readUniqueFileDescriptorVector(std::unique_ptr<std::vector<ScopedFd>>* val) const {
2016 return readNullableTypedVector(val, &Parcel::readUniqueFileDescriptor);
2017 }
2018
readUniqueFileDescriptorVector(std::vector<ScopedFd> * val) const2019 status_t Parcel::readUniqueFileDescriptorVector(std::vector<ScopedFd>* val) const {
2020 return readTypedVector(val, &Parcel::readUniqueFileDescriptor);
2021 }
2022
readBlob(size_t len,ReadableBlob * outBlob) const2023 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
2024 {
2025 int32_t blobType;
2026 status_t status = readInt32(&blobType);
2027 if (status) return status;
2028
2029 if (blobType == BLOB_INPLACE) {
2030 ALOGV("readBlob: read in place");
2031 const void* ptr = readInplace(len);
2032 if (!ptr) return BAD_VALUE;
2033
2034 outBlob->init(-1, const_cast<void*>(ptr), len, false);
2035 return NO_ERROR;
2036 }
2037
2038 ALOGV("readBlob: read from ashmem");
2039 bool isMutable = (blobType == BLOB_ASHMEM_MUTABLE);
2040 int fd = readFileDescriptor();
2041 if (fd == int(BAD_TYPE)) return BAD_VALUE;
2042
2043 void* ptr = ::mmap(NULL, len, isMutable ? PROT_READ | PROT_WRITE : PROT_READ,
2044 MAP_SHARED, fd, 0);
2045 if (ptr == MAP_FAILED) return NO_MEMORY;
2046
2047 outBlob->init(fd, ptr, len, isMutable);
2048 return NO_ERROR;
2049 }
2050
read(FlattenableHelperInterface & val) const2051 status_t Parcel::read(FlattenableHelperInterface& val) const
2052 {
2053 // size
2054 const size_t len = this->readInt32();
2055 const size_t fd_count = this->readInt32();
2056
2057 if ((len > INT32_MAX) || (fd_count >= gMaxFds)) {
2058 // don't accept size_t values which may have come from an
2059 // inadvertent conversion from a negative int.
2060 return BAD_VALUE;
2061 }
2062
2063 // payload
2064 void const* const buf = this->readInplace(pad_size(len));
2065 if (buf == NULL)
2066 return BAD_VALUE;
2067
2068 int* fds = NULL;
2069 if (fd_count) {
2070 fds = new (std::nothrow) int[fd_count];
2071 if (fds == nullptr) {
2072 ALOGE("read: failed to allocate requested %zu fds", fd_count);
2073 return BAD_VALUE;
2074 }
2075 }
2076
2077 status_t err = NO_ERROR;
2078 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
2079 fds[i] = dup(this->readFileDescriptor());
2080 if (fds[i] < 0) {
2081 err = BAD_VALUE;
2082 ALOGE("dup() failed in Parcel::read, i is %zu, fds[i] is %d, fd_count is %zu, error: %s",
2083 i, fds[i], fd_count, strerror(errno));
2084 }
2085 }
2086
2087 if (err == NO_ERROR) {
2088 err = val.unflatten(buf, len, fds, fd_count);
2089 }
2090
2091 if (fd_count) {
2092 delete [] fds;
2093 }
2094
2095 return err;
2096 }
readObject(bool nullMetaData) const2097 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
2098 {
2099 const size_t DPOS = mDataPos;
2100 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
2101 const flat_binder_object* obj
2102 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
2103 mDataPos = DPOS + sizeof(flat_binder_object);
2104 if (!nullMetaData && (obj->cookie == 0 && obj->binder == 0)) {
2105 // When transferring a NULL object, we don't write it into
2106 // the object list, so we don't want to check for it when
2107 // reading.
2108 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2109 return obj;
2110 }
2111
2112 // Ensure that this object is valid...
2113 binder_size_t* const OBJS = mObjects;
2114 const size_t N = mObjectsSize;
2115 size_t opos = mNextObjectHint;
2116
2117 if (N > 0) {
2118 ALOGV("Parcel %p looking for obj at %zu, hint=%zu",
2119 this, DPOS, opos);
2120
2121 // Start at the current hint position, looking for an object at
2122 // the current data position.
2123 if (opos < N) {
2124 while (opos < (N-1) && OBJS[opos] < DPOS) {
2125 opos++;
2126 }
2127 } else {
2128 opos = N-1;
2129 }
2130 if (OBJS[opos] == DPOS) {
2131 // Found it!
2132 ALOGV("Parcel %p found obj %zu at index %zu with forward search",
2133 this, DPOS, opos);
2134 mNextObjectHint = opos+1;
2135 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2136 return obj;
2137 }
2138
2139 // Look backwards for it...
2140 while (opos > 0 && OBJS[opos] > DPOS) {
2141 opos--;
2142 }
2143 if (OBJS[opos] == DPOS) {
2144 // Found it!
2145 ALOGV("Parcel %p found obj %zu at index %zu with backward search",
2146 this, DPOS, opos);
2147 mNextObjectHint = opos+1;
2148 ALOGV("readObject Setting data pos of %p to %zu", this, mDataPos);
2149 return obj;
2150 }
2151 }
2152 ALOGW("Attempt to read object from Parcel %p at offset %zu that is not in the object list",
2153 this, DPOS);
2154 }
2155 return NULL;
2156 }
2157
closeFileDescriptors()2158 void Parcel::closeFileDescriptors()
2159 {
2160 size_t i = mObjectsSize;
2161 if (i > 0) {
2162 //ALOGI("Closing file descriptors for %zu objects...", i);
2163 }
2164 while (i > 0) {
2165 i--;
2166 const flat_binder_object* flat
2167 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2168 if (flat->type == BINDER_TYPE_FD) {
2169 //ALOGI("Closing fd: %ld", flat->handle);
2170 close(flat->handle);
2171 }
2172 }
2173 }
2174
ipcData() const2175 uintptr_t Parcel::ipcData() const
2176 {
2177 return reinterpret_cast<uintptr_t>(mData);
2178 }
2179
ipcDataSize() const2180 size_t Parcel::ipcDataSize() const
2181 {
2182 return (mDataSize > mDataPos ? mDataSize : mDataPos);
2183 }
2184
ipcObjects() const2185 uintptr_t Parcel::ipcObjects() const
2186 {
2187 return reinterpret_cast<uintptr_t>(mObjects);
2188 }
2189
ipcObjectsCount() const2190 size_t Parcel::ipcObjectsCount() const
2191 {
2192 return mObjectsSize;
2193 }
2194
ipcSetDataReference(const uint8_t * data,size_t dataSize,const binder_size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)2195 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
2196 const binder_size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
2197 {
2198 binder_size_t minOffset = 0;
2199 freeDataNoInit();
2200 mError = NO_ERROR;
2201 mData = const_cast<uint8_t*>(data);
2202 mDataSize = mDataCapacity = dataSize;
2203 //ALOGI("setDataReference Setting data size of %p to %lu (pid=%d)", this, mDataSize, getpid());
2204 mDataPos = 0;
2205 ALOGV("setDataReference Setting data pos of %p to %zu", this, mDataPos);
2206 mObjects = const_cast<binder_size_t*>(objects);
2207 mObjectsSize = mObjectsCapacity = objectsCount;
2208 mNextObjectHint = 0;
2209 mOwner = relFunc;
2210 mOwnerCookie = relCookie;
2211 for (size_t i = 0; i < mObjectsSize; i++) {
2212 binder_size_t offset = mObjects[i];
2213 if (offset < minOffset) {
2214 ALOGE("%s: bad object offset %" PRIu64 " < %" PRIu64 "\n",
2215 __func__, (uint64_t)offset, (uint64_t)minOffset);
2216 mObjectsSize = 0;
2217 break;
2218 }
2219 minOffset = offset + sizeof(flat_binder_object);
2220 }
2221 scanForFds();
2222 }
2223
print(TextOutput & to,uint32_t) const2224 void Parcel::print(TextOutput& to, uint32_t /*flags*/) const
2225 {
2226 to << "Parcel(";
2227
2228 if (errorCheck() != NO_ERROR) {
2229 const status_t err = errorCheck();
2230 to << "Error: " << (void*)(intptr_t)err << " \"" << strerror(-err) << "\"";
2231 } else if (dataSize() > 0) {
2232 const uint8_t* DATA = data();
2233 to << indent << HexDump(DATA, dataSize()) << dedent;
2234 const binder_size_t* OBJS = objects();
2235 const size_t N = objectsCount();
2236 for (size_t i=0; i<N; i++) {
2237 const flat_binder_object* flat
2238 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
2239 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
2240 << TypeCode(flat->type & 0x7f7f7f00)
2241 << " = " << flat->binder;
2242 }
2243 } else {
2244 to << "NULL";
2245 }
2246
2247 to << ")";
2248 }
2249
releaseObjects()2250 void Parcel::releaseObjects()
2251 {
2252 const sp<ProcessState> proc(ProcessState::self());
2253 size_t i = mObjectsSize;
2254 uint8_t* const data = mData;
2255 binder_size_t* const objects = mObjects;
2256 while (i > 0) {
2257 i--;
2258 const flat_binder_object* flat
2259 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2260 release_object(proc, *flat, this, &mOpenAshmemSize);
2261 }
2262 }
2263
acquireObjects()2264 void Parcel::acquireObjects()
2265 {
2266 const sp<ProcessState> proc(ProcessState::self());
2267 size_t i = mObjectsSize;
2268 uint8_t* const data = mData;
2269 binder_size_t* const objects = mObjects;
2270 while (i > 0) {
2271 i--;
2272 const flat_binder_object* flat
2273 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
2274 acquire_object(proc, *flat, this, &mOpenAshmemSize);
2275 }
2276 }
2277
freeData()2278 void Parcel::freeData()
2279 {
2280 freeDataNoInit();
2281 initState();
2282 }
2283
freeDataNoInit()2284 void Parcel::freeDataNoInit()
2285 {
2286 if (mOwner) {
2287 LOG_ALLOC("Parcel %p: freeing other owner data", this);
2288 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2289 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2290 } else {
2291 LOG_ALLOC("Parcel %p: freeing allocated data", this);
2292 releaseObjects();
2293 if (mData) {
2294 LOG_ALLOC("Parcel %p: freeing with %zu capacity", this, mDataCapacity);
2295 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2296 if (mDataCapacity <= gParcelGlobalAllocSize) {
2297 gParcelGlobalAllocSize = gParcelGlobalAllocSize - mDataCapacity;
2298 } else {
2299 gParcelGlobalAllocSize = 0;
2300 }
2301 if (gParcelGlobalAllocCount > 0) {
2302 gParcelGlobalAllocCount--;
2303 }
2304 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2305 free(mData);
2306 }
2307 if (mObjects) free(mObjects);
2308 }
2309 }
2310
growData(size_t len)2311 status_t Parcel::growData(size_t len)
2312 {
2313 if (len > INT32_MAX) {
2314 // don't accept size_t values which may have come from an
2315 // inadvertent conversion from a negative int.
2316 return BAD_VALUE;
2317 }
2318
2319 size_t newSize = ((mDataSize+len)*3)/2;
2320 return (newSize <= mDataSize)
2321 ? (status_t) NO_MEMORY
2322 : continueWrite(newSize);
2323 }
2324
restartWrite(size_t desired)2325 status_t Parcel::restartWrite(size_t desired)
2326 {
2327 if (desired > INT32_MAX) {
2328 // don't accept size_t values which may have come from an
2329 // inadvertent conversion from a negative int.
2330 return BAD_VALUE;
2331 }
2332
2333 if (mOwner) {
2334 freeData();
2335 return continueWrite(desired);
2336 }
2337
2338 uint8_t* data = (uint8_t*)realloc(mData, desired);
2339 if (!data && desired > mDataCapacity) {
2340 mError = NO_MEMORY;
2341 return NO_MEMORY;
2342 }
2343
2344 releaseObjects();
2345
2346 if (data) {
2347 LOG_ALLOC("Parcel %p: restart from %zu to %zu capacity", this, mDataCapacity, desired);
2348 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2349 gParcelGlobalAllocSize += desired;
2350 gParcelGlobalAllocSize -= mDataCapacity;
2351 if (!mData) {
2352 gParcelGlobalAllocCount++;
2353 }
2354 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2355 mData = data;
2356 mDataCapacity = desired;
2357 }
2358
2359 mDataSize = mDataPos = 0;
2360 ALOGV("restartWrite Setting data size of %p to %zu", this, mDataSize);
2361 ALOGV("restartWrite Setting data pos of %p to %zu", this, mDataPos);
2362
2363 free(mObjects);
2364 mObjects = NULL;
2365 mObjectsSize = mObjectsCapacity = 0;
2366 mNextObjectHint = 0;
2367 mHasFds = false;
2368 mFdsKnown = true;
2369 mAllowFds = true;
2370
2371 return NO_ERROR;
2372 }
2373
continueWrite(size_t desired)2374 status_t Parcel::continueWrite(size_t desired)
2375 {
2376 if (desired > INT32_MAX) {
2377 // don't accept size_t values which may have come from an
2378 // inadvertent conversion from a negative int.
2379 return BAD_VALUE;
2380 }
2381
2382 // If shrinking, first adjust for any objects that appear
2383 // after the new data size.
2384 size_t objectsSize = mObjectsSize;
2385 if (desired < mDataSize) {
2386 if (desired == 0) {
2387 objectsSize = 0;
2388 } else {
2389 while (objectsSize > 0) {
2390 if (mObjects[objectsSize-1] < desired)
2391 break;
2392 objectsSize--;
2393 }
2394 }
2395 }
2396
2397 if (mOwner) {
2398 // If the size is going to zero, just release the owner's data.
2399 if (desired == 0) {
2400 freeData();
2401 return NO_ERROR;
2402 }
2403
2404 // If there is a different owner, we need to take
2405 // posession.
2406 uint8_t* data = (uint8_t*)malloc(desired);
2407 if (!data) {
2408 mError = NO_MEMORY;
2409 return NO_MEMORY;
2410 }
2411 binder_size_t* objects = NULL;
2412
2413 if (objectsSize) {
2414 objects = (binder_size_t*)calloc(objectsSize, sizeof(binder_size_t));
2415 if (!objects) {
2416 free(data);
2417
2418 mError = NO_MEMORY;
2419 return NO_MEMORY;
2420 }
2421
2422 // Little hack to only acquire references on objects
2423 // we will be keeping.
2424 size_t oldObjectsSize = mObjectsSize;
2425 mObjectsSize = objectsSize;
2426 acquireObjects();
2427 mObjectsSize = oldObjectsSize;
2428 }
2429
2430 if (mData) {
2431 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
2432 }
2433 if (objects && mObjects) {
2434 memcpy(objects, mObjects, objectsSize*sizeof(binder_size_t));
2435 }
2436 //ALOGI("Freeing data ref of %p (pid=%d)", this, getpid());
2437 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
2438 mOwner = NULL;
2439
2440 LOG_ALLOC("Parcel %p: taking ownership of %zu capacity", this, desired);
2441 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2442 gParcelGlobalAllocSize += desired;
2443 gParcelGlobalAllocCount++;
2444 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2445
2446 mData = data;
2447 mObjects = objects;
2448 mDataSize = (mDataSize < desired) ? mDataSize : desired;
2449 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2450 mDataCapacity = desired;
2451 mObjectsSize = mObjectsCapacity = objectsSize;
2452 mNextObjectHint = 0;
2453
2454 } else if (mData) {
2455 if (objectsSize < mObjectsSize) {
2456 // Need to release refs on any objects we are dropping.
2457 const sp<ProcessState> proc(ProcessState::self());
2458 for (size_t i=objectsSize; i<mObjectsSize; i++) {
2459 const flat_binder_object* flat
2460 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
2461 if (flat->type == BINDER_TYPE_FD) {
2462 // will need to rescan because we may have lopped off the only FDs
2463 mFdsKnown = false;
2464 }
2465 release_object(proc, *flat, this, &mOpenAshmemSize);
2466 }
2467 binder_size_t* objects =
2468 (binder_size_t*)realloc(mObjects, objectsSize*sizeof(binder_size_t));
2469 if (objects) {
2470 mObjects = objects;
2471 }
2472 mObjectsSize = objectsSize;
2473 mNextObjectHint = 0;
2474 }
2475
2476 // We own the data, so we can just do a realloc().
2477 if (desired > mDataCapacity) {
2478 uint8_t* data = (uint8_t*)realloc(mData, desired);
2479 if (data) {
2480 LOG_ALLOC("Parcel %p: continue from %zu to %zu capacity", this, mDataCapacity,
2481 desired);
2482 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2483 gParcelGlobalAllocSize += desired;
2484 gParcelGlobalAllocSize -= mDataCapacity;
2485 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2486 mData = data;
2487 mDataCapacity = desired;
2488 } else if (desired > mDataCapacity) {
2489 mError = NO_MEMORY;
2490 return NO_MEMORY;
2491 }
2492 } else {
2493 if (mDataSize > desired) {
2494 mDataSize = desired;
2495 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2496 }
2497 if (mDataPos > desired) {
2498 mDataPos = desired;
2499 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2500 }
2501 }
2502
2503 } else {
2504 // This is the first data. Easy!
2505 uint8_t* data = (uint8_t*)malloc(desired);
2506 if (!data) {
2507 mError = NO_MEMORY;
2508 return NO_MEMORY;
2509 }
2510
2511 if(!(mDataCapacity == 0 && mObjects == NULL
2512 && mObjectsCapacity == 0)) {
2513 ALOGE("continueWrite: %zu/%p/%zu/%zu", mDataCapacity, mObjects, mObjectsCapacity, desired);
2514 }
2515
2516 LOG_ALLOC("Parcel %p: allocating with %zu capacity", this, desired);
2517 pthread_mutex_lock(&gParcelGlobalAllocSizeLock);
2518 gParcelGlobalAllocSize += desired;
2519 gParcelGlobalAllocCount++;
2520 pthread_mutex_unlock(&gParcelGlobalAllocSizeLock);
2521
2522 mData = data;
2523 mDataSize = mDataPos = 0;
2524 ALOGV("continueWrite Setting data size of %p to %zu", this, mDataSize);
2525 ALOGV("continueWrite Setting data pos of %p to %zu", this, mDataPos);
2526 mDataCapacity = desired;
2527 }
2528
2529 return NO_ERROR;
2530 }
2531
initState()2532 void Parcel::initState()
2533 {
2534 LOG_ALLOC("Parcel %p: initState", this);
2535 mError = NO_ERROR;
2536 mData = 0;
2537 mDataSize = 0;
2538 mDataCapacity = 0;
2539 mDataPos = 0;
2540 ALOGV("initState Setting data size of %p to %zu", this, mDataSize);
2541 ALOGV("initState Setting data pos of %p to %zu", this, mDataPos);
2542 mObjects = NULL;
2543 mObjectsSize = 0;
2544 mObjectsCapacity = 0;
2545 mNextObjectHint = 0;
2546 mHasFds = false;
2547 mFdsKnown = true;
2548 mAllowFds = true;
2549 mOwner = NULL;
2550 mOpenAshmemSize = 0;
2551
2552 // racing multiple init leads only to multiple identical write
2553 if (gMaxFds == 0) {
2554 struct rlimit result;
2555 if (!getrlimit(RLIMIT_NOFILE, &result)) {
2556 gMaxFds = (size_t)result.rlim_cur;
2557 //ALOGI("parcel fd limit set to %zu", gMaxFds);
2558 } else {
2559 ALOGW("Unable to getrlimit: %s", strerror(errno));
2560 gMaxFds = 1024;
2561 }
2562 }
2563 }
2564
scanForFds() const2565 void Parcel::scanForFds() const
2566 {
2567 bool hasFds = false;
2568 for (size_t i=0; i<mObjectsSize; i++) {
2569 const flat_binder_object* flat
2570 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
2571 if (flat->type == BINDER_TYPE_FD) {
2572 hasFds = true;
2573 break;
2574 }
2575 }
2576 mHasFds = hasFds;
2577 mFdsKnown = true;
2578 }
2579
getBlobAshmemSize() const2580 size_t Parcel::getBlobAshmemSize() const
2581 {
2582 // This used to return the size of all blobs that were written to ashmem, now we're returning
2583 // the ashmem currently referenced by this Parcel, which should be equivalent.
2584 // TODO: Remove method once ABI can be changed.
2585 return mOpenAshmemSize;
2586 }
2587
getOpenAshmemSize() const2588 size_t Parcel::getOpenAshmemSize() const
2589 {
2590 return mOpenAshmemSize;
2591 }
2592
2593 // --- Parcel::Blob ---
2594
Blob()2595 Parcel::Blob::Blob() :
2596 mFd(-1), mData(NULL), mSize(0), mMutable(false) {
2597 }
2598
~Blob()2599 Parcel::Blob::~Blob() {
2600 release();
2601 }
2602
release()2603 void Parcel::Blob::release() {
2604 if (mFd != -1 && mData) {
2605 ::munmap(mData, mSize);
2606 }
2607 clear();
2608 }
2609
init(int fd,void * data,size_t size,bool isMutable)2610 void Parcel::Blob::init(int fd, void* data, size_t size, bool isMutable) {
2611 mFd = fd;
2612 mData = data;
2613 mSize = size;
2614 mMutable = isMutable;
2615 }
2616
clear()2617 void Parcel::Blob::clear() {
2618 mFd = -1;
2619 mData = NULL;
2620 mSize = 0;
2621 mMutable = false;
2622 }
2623
2624 }; // namespace android
2625