1 /*
2 * Copyright (C) 2005 The Android Open Source Project
3 *
4 * Licensed under the Apache License, Version 2.0 (the "License");
5 * you may not use this file except in compliance with the License.
6 * You may obtain a copy of the License at
7 *
8 * http://www.apache.org/licenses/LICENSE-2.0
9 *
10 * Unless required by applicable law or agreed to in writing, software
11 * distributed under the License is distributed on an "AS IS" BASIS,
12 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 * See the License for the specific language governing permissions and
14 * limitations under the License.
15 */
16
17 #define LOG_TAG "Parcel"
18 //#define LOG_NDEBUG 0
19
20 #include <binder/Parcel.h>
21
22 #include <binder/IPCThreadState.h>
23 #include <binder/Binder.h>
24 #include <binder/BpBinder.h>
25 #include <utils/Debug.h>
26 #include <binder/ProcessState.h>
27 #include <utils/Log.h>
28 #include <utils/String8.h>
29 #include <utils/String16.h>
30 #include <utils/TextOutput.h>
31 #include <utils/misc.h>
32 #include <utils/Flattenable.h>
33 #include <cutils/ashmem.h>
34
35 #include <private/binder/binder_module.h>
36
37 #include <stdio.h>
38 #include <stdlib.h>
39 #include <stdint.h>
40 #include <sys/mman.h>
41
42 #ifndef INT32_MAX
43 #define INT32_MAX ((int32_t)(2147483647))
44 #endif
45
46 #define LOG_REFS(...)
47 //#define LOG_REFS(...) LOG(LOG_DEBUG, "Parcel", __VA_ARGS__)
48
49 // ---------------------------------------------------------------------------
50
51 #define PAD_SIZE(s) (((s)+3)&~3)
52
53 // Note: must be kept in sync with android/os/StrictMode.java's PENALTY_GATHER
54 #define STRICT_MODE_PENALTY_GATHER 0x100
55
56 // Note: must be kept in sync with android/os/Parcel.java's EX_HAS_REPLY_HEADER
57 #define EX_HAS_REPLY_HEADER -128
58
59 // Maximum size of a blob to transfer in-place.
60 static const size_t IN_PLACE_BLOB_LIMIT = 40 * 1024;
61
62 // XXX This can be made public if we want to provide
63 // support for typed data.
64 struct small_flat_data
65 {
66 uint32_t type;
67 uint32_t data;
68 };
69
70 namespace android {
71
acquire_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)72 void acquire_object(const sp<ProcessState>& proc,
73 const flat_binder_object& obj, const void* who)
74 {
75 switch (obj.type) {
76 case BINDER_TYPE_BINDER:
77 if (obj.binder) {
78 LOG_REFS("Parcel %p acquiring reference on local %p", who, obj.cookie);
79 static_cast<IBinder*>(obj.cookie)->incStrong(who);
80 }
81 return;
82 case BINDER_TYPE_WEAK_BINDER:
83 if (obj.binder)
84 static_cast<RefBase::weakref_type*>(obj.binder)->incWeak(who);
85 return;
86 case BINDER_TYPE_HANDLE: {
87 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
88 if (b != NULL) {
89 LOG_REFS("Parcel %p acquiring reference on remote %p", who, b.get());
90 b->incStrong(who);
91 }
92 return;
93 }
94 case BINDER_TYPE_WEAK_HANDLE: {
95 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
96 if (b != NULL) b.get_refs()->incWeak(who);
97 return;
98 }
99 case BINDER_TYPE_FD: {
100 // intentionally blank -- nothing to do to acquire this, but we do
101 // recognize it as a legitimate object type.
102 return;
103 }
104 }
105
106 LOGD("Invalid object type 0x%08lx", obj.type);
107 }
108
release_object(const sp<ProcessState> & proc,const flat_binder_object & obj,const void * who)109 void release_object(const sp<ProcessState>& proc,
110 const flat_binder_object& obj, const void* who)
111 {
112 switch (obj.type) {
113 case BINDER_TYPE_BINDER:
114 if (obj.binder) {
115 LOG_REFS("Parcel %p releasing reference on local %p", who, obj.cookie);
116 static_cast<IBinder*>(obj.cookie)->decStrong(who);
117 }
118 return;
119 case BINDER_TYPE_WEAK_BINDER:
120 if (obj.binder)
121 static_cast<RefBase::weakref_type*>(obj.binder)->decWeak(who);
122 return;
123 case BINDER_TYPE_HANDLE: {
124 const sp<IBinder> b = proc->getStrongProxyForHandle(obj.handle);
125 if (b != NULL) {
126 LOG_REFS("Parcel %p releasing reference on remote %p", who, b.get());
127 b->decStrong(who);
128 }
129 return;
130 }
131 case BINDER_TYPE_WEAK_HANDLE: {
132 const wp<IBinder> b = proc->getWeakProxyForHandle(obj.handle);
133 if (b != NULL) b.get_refs()->decWeak(who);
134 return;
135 }
136 case BINDER_TYPE_FD: {
137 if (obj.cookie != (void*)0) close(obj.handle);
138 return;
139 }
140 }
141
142 LOGE("Invalid object type 0x%08lx", obj.type);
143 }
144
finish_flatten_binder(const sp<IBinder> & binder,const flat_binder_object & flat,Parcel * out)145 inline static status_t finish_flatten_binder(
146 const sp<IBinder>& binder, const flat_binder_object& flat, Parcel* out)
147 {
148 return out->writeObject(flat, false);
149 }
150
flatten_binder(const sp<ProcessState> & proc,const sp<IBinder> & binder,Parcel * out)151 status_t flatten_binder(const sp<ProcessState>& proc,
152 const sp<IBinder>& binder, Parcel* out)
153 {
154 flat_binder_object obj;
155
156 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
157 if (binder != NULL) {
158 IBinder *local = binder->localBinder();
159 if (!local) {
160 BpBinder *proxy = binder->remoteBinder();
161 if (proxy == NULL) {
162 LOGE("null proxy");
163 }
164 const int32_t handle = proxy ? proxy->handle() : 0;
165 obj.type = BINDER_TYPE_HANDLE;
166 obj.handle = handle;
167 obj.cookie = NULL;
168 } else {
169 obj.type = BINDER_TYPE_BINDER;
170 obj.binder = local->getWeakRefs();
171 obj.cookie = local;
172 }
173 } else {
174 obj.type = BINDER_TYPE_BINDER;
175 obj.binder = NULL;
176 obj.cookie = NULL;
177 }
178
179 return finish_flatten_binder(binder, obj, out);
180 }
181
flatten_binder(const sp<ProcessState> & proc,const wp<IBinder> & binder,Parcel * out)182 status_t flatten_binder(const sp<ProcessState>& proc,
183 const wp<IBinder>& binder, Parcel* out)
184 {
185 flat_binder_object obj;
186
187 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
188 if (binder != NULL) {
189 sp<IBinder> real = binder.promote();
190 if (real != NULL) {
191 IBinder *local = real->localBinder();
192 if (!local) {
193 BpBinder *proxy = real->remoteBinder();
194 if (proxy == NULL) {
195 LOGE("null proxy");
196 }
197 const int32_t handle = proxy ? proxy->handle() : 0;
198 obj.type = BINDER_TYPE_WEAK_HANDLE;
199 obj.handle = handle;
200 obj.cookie = NULL;
201 } else {
202 obj.type = BINDER_TYPE_WEAK_BINDER;
203 obj.binder = binder.get_refs();
204 obj.cookie = binder.unsafe_get();
205 }
206 return finish_flatten_binder(real, obj, out);
207 }
208
209 // XXX How to deal? In order to flatten the given binder,
210 // we need to probe it for information, which requires a primary
211 // reference... but we don't have one.
212 //
213 // The OpenBinder implementation uses a dynamic_cast<> here,
214 // but we can't do that with the different reference counting
215 // implementation we are using.
216 LOGE("Unable to unflatten Binder weak reference!");
217 obj.type = BINDER_TYPE_BINDER;
218 obj.binder = NULL;
219 obj.cookie = NULL;
220 return finish_flatten_binder(NULL, obj, out);
221
222 } else {
223 obj.type = BINDER_TYPE_BINDER;
224 obj.binder = NULL;
225 obj.cookie = NULL;
226 return finish_flatten_binder(NULL, obj, out);
227 }
228 }
229
finish_unflatten_binder(BpBinder * proxy,const flat_binder_object & flat,const Parcel & in)230 inline static status_t finish_unflatten_binder(
231 BpBinder* proxy, const flat_binder_object& flat, const Parcel& in)
232 {
233 return NO_ERROR;
234 }
235
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,sp<IBinder> * out)236 status_t unflatten_binder(const sp<ProcessState>& proc,
237 const Parcel& in, sp<IBinder>* out)
238 {
239 const flat_binder_object* flat = in.readObject(false);
240
241 if (flat) {
242 switch (flat->type) {
243 case BINDER_TYPE_BINDER:
244 *out = static_cast<IBinder*>(flat->cookie);
245 return finish_unflatten_binder(NULL, *flat, in);
246 case BINDER_TYPE_HANDLE:
247 *out = proc->getStrongProxyForHandle(flat->handle);
248 return finish_unflatten_binder(
249 static_cast<BpBinder*>(out->get()), *flat, in);
250 }
251 }
252 return BAD_TYPE;
253 }
254
unflatten_binder(const sp<ProcessState> & proc,const Parcel & in,wp<IBinder> * out)255 status_t unflatten_binder(const sp<ProcessState>& proc,
256 const Parcel& in, wp<IBinder>* out)
257 {
258 const flat_binder_object* flat = in.readObject(false);
259
260 if (flat) {
261 switch (flat->type) {
262 case BINDER_TYPE_BINDER:
263 *out = static_cast<IBinder*>(flat->cookie);
264 return finish_unflatten_binder(NULL, *flat, in);
265 case BINDER_TYPE_WEAK_BINDER:
266 if (flat->binder != NULL) {
267 out->set_object_and_refs(
268 static_cast<IBinder*>(flat->cookie),
269 static_cast<RefBase::weakref_type*>(flat->binder));
270 } else {
271 *out = NULL;
272 }
273 return finish_unflatten_binder(NULL, *flat, in);
274 case BINDER_TYPE_HANDLE:
275 case BINDER_TYPE_WEAK_HANDLE:
276 *out = proc->getWeakProxyForHandle(flat->handle);
277 return finish_unflatten_binder(
278 static_cast<BpBinder*>(out->unsafe_get()), *flat, in);
279 }
280 }
281 return BAD_TYPE;
282 }
283
284 // ---------------------------------------------------------------------------
285
Parcel()286 Parcel::Parcel()
287 {
288 initState();
289 }
290
~Parcel()291 Parcel::~Parcel()
292 {
293 freeDataNoInit();
294 }
295
data() const296 const uint8_t* Parcel::data() const
297 {
298 return mData;
299 }
300
dataSize() const301 size_t Parcel::dataSize() const
302 {
303 return (mDataSize > mDataPos ? mDataSize : mDataPos);
304 }
305
dataAvail() const306 size_t Parcel::dataAvail() const
307 {
308 // TODO: decide what to do about the possibility that this can
309 // report an available-data size that exceeds a Java int's max
310 // positive value, causing havoc. Fortunately this will only
311 // happen if someone constructs a Parcel containing more than two
312 // gigabytes of data, which on typical phone hardware is simply
313 // not possible.
314 return dataSize() - dataPosition();
315 }
316
dataPosition() const317 size_t Parcel::dataPosition() const
318 {
319 return mDataPos;
320 }
321
dataCapacity() const322 size_t Parcel::dataCapacity() const
323 {
324 return mDataCapacity;
325 }
326
setDataSize(size_t size)327 status_t Parcel::setDataSize(size_t size)
328 {
329 status_t err;
330 err = continueWrite(size);
331 if (err == NO_ERROR) {
332 mDataSize = size;
333 LOGV("setDataSize Setting data size of %p to %d\n", this, mDataSize);
334 }
335 return err;
336 }
337
setDataPosition(size_t pos) const338 void Parcel::setDataPosition(size_t pos) const
339 {
340 mDataPos = pos;
341 mNextObjectHint = 0;
342 }
343
setDataCapacity(size_t size)344 status_t Parcel::setDataCapacity(size_t size)
345 {
346 if (size > mDataCapacity) return continueWrite(size);
347 return NO_ERROR;
348 }
349
setData(const uint8_t * buffer,size_t len)350 status_t Parcel::setData(const uint8_t* buffer, size_t len)
351 {
352 status_t err = restartWrite(len);
353 if (err == NO_ERROR) {
354 memcpy(const_cast<uint8_t*>(data()), buffer, len);
355 mDataSize = len;
356 mFdsKnown = false;
357 }
358 return err;
359 }
360
appendFrom(const Parcel * parcel,size_t offset,size_t len)361 status_t Parcel::appendFrom(const Parcel *parcel, size_t offset, size_t len)
362 {
363 const sp<ProcessState> proc(ProcessState::self());
364 status_t err;
365 const uint8_t *data = parcel->mData;
366 const size_t *objects = parcel->mObjects;
367 size_t size = parcel->mObjectsSize;
368 int startPos = mDataPos;
369 int firstIndex = -1, lastIndex = -2;
370
371 if (len == 0) {
372 return NO_ERROR;
373 }
374
375 // range checks against the source parcel size
376 if ((offset > parcel->mDataSize)
377 || (len > parcel->mDataSize)
378 || (offset + len > parcel->mDataSize)) {
379 return BAD_VALUE;
380 }
381
382 // Count objects in range
383 for (int i = 0; i < (int) size; i++) {
384 size_t off = objects[i];
385 if ((off >= offset) && (off < offset + len)) {
386 if (firstIndex == -1) {
387 firstIndex = i;
388 }
389 lastIndex = i;
390 }
391 }
392 int numObjects = lastIndex - firstIndex + 1;
393
394 if ((mDataSize+len) > mDataCapacity) {
395 // grow data
396 err = growData(len);
397 if (err != NO_ERROR) {
398 return err;
399 }
400 }
401
402 // append data
403 memcpy(mData + mDataPos, data + offset, len);
404 mDataPos += len;
405 mDataSize += len;
406
407 err = NO_ERROR;
408
409 if (numObjects > 0) {
410 // grow objects
411 if (mObjectsCapacity < mObjectsSize + numObjects) {
412 int newSize = ((mObjectsSize + numObjects)*3)/2;
413 size_t *objects =
414 (size_t*)realloc(mObjects, newSize*sizeof(size_t));
415 if (objects == (size_t*)0) {
416 return NO_MEMORY;
417 }
418 mObjects = objects;
419 mObjectsCapacity = newSize;
420 }
421
422 // append and acquire objects
423 int idx = mObjectsSize;
424 for (int i = firstIndex; i <= lastIndex; i++) {
425 size_t off = objects[i] - offset + startPos;
426 mObjects[idx++] = off;
427 mObjectsSize++;
428
429 flat_binder_object* flat
430 = reinterpret_cast<flat_binder_object*>(mData + off);
431 acquire_object(proc, *flat, this);
432
433 if (flat->type == BINDER_TYPE_FD) {
434 // If this is a file descriptor, we need to dup it so the
435 // new Parcel now owns its own fd, and can declare that we
436 // officially know we have fds.
437 flat->handle = dup(flat->handle);
438 flat->cookie = (void*)1;
439 mHasFds = mFdsKnown = true;
440 if (!mAllowFds) {
441 err = FDS_NOT_ALLOWED;
442 }
443 }
444 }
445 }
446
447 return err;
448 }
449
pushAllowFds(bool allowFds)450 bool Parcel::pushAllowFds(bool allowFds)
451 {
452 const bool origValue = mAllowFds;
453 if (!allowFds) {
454 mAllowFds = false;
455 }
456 return origValue;
457 }
458
restoreAllowFds(bool lastValue)459 void Parcel::restoreAllowFds(bool lastValue)
460 {
461 mAllowFds = lastValue;
462 }
463
hasFileDescriptors() const464 bool Parcel::hasFileDescriptors() const
465 {
466 if (!mFdsKnown) {
467 scanForFds();
468 }
469 return mHasFds;
470 }
471
472 // Write RPC headers. (previously just the interface token)
writeInterfaceToken(const String16 & interface)473 status_t Parcel::writeInterfaceToken(const String16& interface)
474 {
475 writeInt32(IPCThreadState::self()->getStrictModePolicy() |
476 STRICT_MODE_PENALTY_GATHER);
477 // currently the interface identification token is just its name as a string
478 return writeString16(interface);
479 }
480
checkInterface(IBinder * binder) const481 bool Parcel::checkInterface(IBinder* binder) const
482 {
483 return enforceInterface(binder->getInterfaceDescriptor());
484 }
485
enforceInterface(const String16 & interface,IPCThreadState * threadState) const486 bool Parcel::enforceInterface(const String16& interface,
487 IPCThreadState* threadState) const
488 {
489 int32_t strictPolicy = readInt32();
490 if (threadState == NULL) {
491 threadState = IPCThreadState::self();
492 }
493 if ((threadState->getLastTransactionBinderFlags() &
494 IBinder::FLAG_ONEWAY) != 0) {
495 // For one-way calls, the callee is running entirely
496 // disconnected from the caller, so disable StrictMode entirely.
497 // Not only does disk/network usage not impact the caller, but
498 // there's no way to commuicate back any violations anyway.
499 threadState->setStrictModePolicy(0);
500 } else {
501 threadState->setStrictModePolicy(strictPolicy);
502 }
503 const String16 str(readString16());
504 if (str == interface) {
505 return true;
506 } else {
507 LOGW("**** enforceInterface() expected '%s' but read '%s'\n",
508 String8(interface).string(), String8(str).string());
509 return false;
510 }
511 }
512
objects() const513 const size_t* Parcel::objects() const
514 {
515 return mObjects;
516 }
517
objectsCount() const518 size_t Parcel::objectsCount() const
519 {
520 return mObjectsSize;
521 }
522
errorCheck() const523 status_t Parcel::errorCheck() const
524 {
525 return mError;
526 }
527
setError(status_t err)528 void Parcel::setError(status_t err)
529 {
530 mError = err;
531 }
532
finishWrite(size_t len)533 status_t Parcel::finishWrite(size_t len)
534 {
535 //printf("Finish write of %d\n", len);
536 mDataPos += len;
537 LOGV("finishWrite Setting data pos of %p to %d\n", this, mDataPos);
538 if (mDataPos > mDataSize) {
539 mDataSize = mDataPos;
540 LOGV("finishWrite Setting data size of %p to %d\n", this, mDataSize);
541 }
542 //printf("New pos=%d, size=%d\n", mDataPos, mDataSize);
543 return NO_ERROR;
544 }
545
writeUnpadded(const void * data,size_t len)546 status_t Parcel::writeUnpadded(const void* data, size_t len)
547 {
548 size_t end = mDataPos + len;
549 if (end < mDataPos) {
550 // integer overflow
551 return BAD_VALUE;
552 }
553
554 if (end <= mDataCapacity) {
555 restart_write:
556 memcpy(mData+mDataPos, data, len);
557 return finishWrite(len);
558 }
559
560 status_t err = growData(len);
561 if (err == NO_ERROR) goto restart_write;
562 return err;
563 }
564
write(const void * data,size_t len)565 status_t Parcel::write(const void* data, size_t len)
566 {
567 void* const d = writeInplace(len);
568 if (d) {
569 memcpy(d, data, len);
570 return NO_ERROR;
571 }
572 return mError;
573 }
574
writeInplace(size_t len)575 void* Parcel::writeInplace(size_t len)
576 {
577 const size_t padded = PAD_SIZE(len);
578
579 // sanity check for integer overflow
580 if (mDataPos+padded < mDataPos) {
581 return NULL;
582 }
583
584 if ((mDataPos+padded) <= mDataCapacity) {
585 restart_write:
586 //printf("Writing %ld bytes, padded to %ld\n", len, padded);
587 uint8_t* const data = mData+mDataPos;
588
589 // Need to pad at end?
590 if (padded != len) {
591 #if BYTE_ORDER == BIG_ENDIAN
592 static const uint32_t mask[4] = {
593 0x00000000, 0xffffff00, 0xffff0000, 0xff000000
594 };
595 #endif
596 #if BYTE_ORDER == LITTLE_ENDIAN
597 static const uint32_t mask[4] = {
598 0x00000000, 0x00ffffff, 0x0000ffff, 0x000000ff
599 };
600 #endif
601 //printf("Applying pad mask: %p to %p\n", (void*)mask[padded-len],
602 // *reinterpret_cast<void**>(data+padded-4));
603 *reinterpret_cast<uint32_t*>(data+padded-4) &= mask[padded-len];
604 }
605
606 finishWrite(padded);
607 return data;
608 }
609
610 status_t err = growData(padded);
611 if (err == NO_ERROR) goto restart_write;
612 return NULL;
613 }
614
writeInt32(int32_t val)615 status_t Parcel::writeInt32(int32_t val)
616 {
617 return writeAligned(val);
618 }
619
writeInt64(int64_t val)620 status_t Parcel::writeInt64(int64_t val)
621 {
622 return writeAligned(val);
623 }
624
writeFloat(float val)625 status_t Parcel::writeFloat(float val)
626 {
627 return writeAligned(val);
628 }
629
writeDouble(double val)630 status_t Parcel::writeDouble(double val)
631 {
632 return writeAligned(val);
633 }
634
writeIntPtr(intptr_t val)635 status_t Parcel::writeIntPtr(intptr_t val)
636 {
637 return writeAligned(val);
638 }
639
writeCString(const char * str)640 status_t Parcel::writeCString(const char* str)
641 {
642 return write(str, strlen(str)+1);
643 }
644
writeString8(const String8 & str)645 status_t Parcel::writeString8(const String8& str)
646 {
647 status_t err = writeInt32(str.bytes());
648 // only write string if its length is more than zero characters,
649 // as readString8 will only read if the length field is non-zero.
650 // this is slightly different from how writeString16 works.
651 if (str.bytes() > 0 && err == NO_ERROR) {
652 err = write(str.string(), str.bytes()+1);
653 }
654 return err;
655 }
656
writeString16(const String16 & str)657 status_t Parcel::writeString16(const String16& str)
658 {
659 return writeString16(str.string(), str.size());
660 }
661
writeString16(const char16_t * str,size_t len)662 status_t Parcel::writeString16(const char16_t* str, size_t len)
663 {
664 if (str == NULL) return writeInt32(-1);
665
666 status_t err = writeInt32(len);
667 if (err == NO_ERROR) {
668 len *= sizeof(char16_t);
669 uint8_t* data = (uint8_t*)writeInplace(len+sizeof(char16_t));
670 if (data) {
671 memcpy(data, str, len);
672 *reinterpret_cast<char16_t*>(data+len) = 0;
673 return NO_ERROR;
674 }
675 err = mError;
676 }
677 return err;
678 }
679
writeStrongBinder(const sp<IBinder> & val)680 status_t Parcel::writeStrongBinder(const sp<IBinder>& val)
681 {
682 return flatten_binder(ProcessState::self(), val, this);
683 }
684
writeWeakBinder(const wp<IBinder> & val)685 status_t Parcel::writeWeakBinder(const wp<IBinder>& val)
686 {
687 return flatten_binder(ProcessState::self(), val, this);
688 }
689
writeNativeHandle(const native_handle * handle)690 status_t Parcel::writeNativeHandle(const native_handle* handle)
691 {
692 if (!handle || handle->version != sizeof(native_handle))
693 return BAD_TYPE;
694
695 status_t err;
696 err = writeInt32(handle->numFds);
697 if (err != NO_ERROR) return err;
698
699 err = writeInt32(handle->numInts);
700 if (err != NO_ERROR) return err;
701
702 for (int i=0 ; err==NO_ERROR && i<handle->numFds ; i++)
703 err = writeDupFileDescriptor(handle->data[i]);
704
705 if (err != NO_ERROR) {
706 LOGD("write native handle, write dup fd failed");
707 return err;
708 }
709 err = write(handle->data + handle->numFds, sizeof(int)*handle->numInts);
710 return err;
711 }
712
writeFileDescriptor(int fd,bool takeOwnership)713 status_t Parcel::writeFileDescriptor(int fd, bool takeOwnership)
714 {
715 flat_binder_object obj;
716 obj.type = BINDER_TYPE_FD;
717 obj.flags = 0x7f | FLAT_BINDER_FLAG_ACCEPTS_FDS;
718 obj.handle = fd;
719 obj.cookie = (void*) (takeOwnership ? 1 : 0);
720 return writeObject(obj, true);
721 }
722
writeDupFileDescriptor(int fd)723 status_t Parcel::writeDupFileDescriptor(int fd)
724 {
725 return writeFileDescriptor(dup(fd), true /*takeOwnership*/);
726 }
727
writeBlob(size_t len,WritableBlob * outBlob)728 status_t Parcel::writeBlob(size_t len, WritableBlob* outBlob)
729 {
730 status_t status;
731
732 if (!mAllowFds || len <= IN_PLACE_BLOB_LIMIT) {
733 LOGV("writeBlob: write in place");
734 status = writeInt32(0);
735 if (status) return status;
736
737 void* ptr = writeInplace(len);
738 if (!ptr) return NO_MEMORY;
739
740 outBlob->init(false /*mapped*/, ptr, len);
741 return NO_ERROR;
742 }
743
744 LOGV("writeBlob: write to ashmem");
745 int fd = ashmem_create_region("Parcel Blob", len);
746 if (fd < 0) return NO_MEMORY;
747
748 int result = ashmem_set_prot_region(fd, PROT_READ | PROT_WRITE);
749 if (result < 0) {
750 status = result;
751 } else {
752 void* ptr = ::mmap(NULL, len, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
753 if (ptr == MAP_FAILED) {
754 status = -errno;
755 } else {
756 result = ashmem_set_prot_region(fd, PROT_READ);
757 if (result < 0) {
758 status = result;
759 } else {
760 status = writeInt32(1);
761 if (!status) {
762 status = writeFileDescriptor(fd, true /*takeOwnership*/);
763 if (!status) {
764 outBlob->init(true /*mapped*/, ptr, len);
765 return NO_ERROR;
766 }
767 }
768 }
769 }
770 ::munmap(ptr, len);
771 }
772 ::close(fd);
773 return status;
774 }
775
write(const Flattenable & val)776 status_t Parcel::write(const Flattenable& val)
777 {
778 status_t err;
779
780 // size if needed
781 size_t len = val.getFlattenedSize();
782 size_t fd_count = val.getFdCount();
783
784 err = this->writeInt32(len);
785 if (err) return err;
786
787 err = this->writeInt32(fd_count);
788 if (err) return err;
789
790 // payload
791 void* buf = this->writeInplace(PAD_SIZE(len));
792 if (buf == NULL)
793 return BAD_VALUE;
794
795 int* fds = NULL;
796 if (fd_count) {
797 fds = new int[fd_count];
798 }
799
800 err = val.flatten(buf, len, fds, fd_count);
801 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
802 err = this->writeDupFileDescriptor( fds[i] );
803 }
804
805 if (fd_count) {
806 delete [] fds;
807 }
808
809 return err;
810 }
811
writeObject(const flat_binder_object & val,bool nullMetaData)812 status_t Parcel::writeObject(const flat_binder_object& val, bool nullMetaData)
813 {
814 const bool enoughData = (mDataPos+sizeof(val)) <= mDataCapacity;
815 const bool enoughObjects = mObjectsSize < mObjectsCapacity;
816 if (enoughData && enoughObjects) {
817 restart_write:
818 *reinterpret_cast<flat_binder_object*>(mData+mDataPos) = val;
819
820 // Need to write meta-data?
821 if (nullMetaData || val.binder != NULL) {
822 mObjects[mObjectsSize] = mDataPos;
823 acquire_object(ProcessState::self(), val, this);
824 mObjectsSize++;
825 }
826
827 // remember if it's a file descriptor
828 if (val.type == BINDER_TYPE_FD) {
829 if (!mAllowFds) {
830 return FDS_NOT_ALLOWED;
831 }
832 mHasFds = mFdsKnown = true;
833 }
834
835 return finishWrite(sizeof(flat_binder_object));
836 }
837
838 if (!enoughData) {
839 const status_t err = growData(sizeof(val));
840 if (err != NO_ERROR) return err;
841 }
842 if (!enoughObjects) {
843 size_t newSize = ((mObjectsSize+2)*3)/2;
844 size_t* objects = (size_t*)realloc(mObjects, newSize*sizeof(size_t));
845 if (objects == NULL) return NO_MEMORY;
846 mObjects = objects;
847 mObjectsCapacity = newSize;
848 }
849
850 goto restart_write;
851 }
852
writeNoException()853 status_t Parcel::writeNoException()
854 {
855 return writeInt32(0);
856 }
857
remove(size_t start,size_t amt)858 void Parcel::remove(size_t start, size_t amt)
859 {
860 LOG_ALWAYS_FATAL("Parcel::remove() not yet implemented!");
861 }
862
read(void * outData,size_t len) const863 status_t Parcel::read(void* outData, size_t len) const
864 {
865 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize) {
866 memcpy(outData, mData+mDataPos, len);
867 mDataPos += PAD_SIZE(len);
868 LOGV("read Setting data pos of %p to %d\n", this, mDataPos);
869 return NO_ERROR;
870 }
871 return NOT_ENOUGH_DATA;
872 }
873
readInplace(size_t len) const874 const void* Parcel::readInplace(size_t len) const
875 {
876 if ((mDataPos+PAD_SIZE(len)) >= mDataPos && (mDataPos+PAD_SIZE(len)) <= mDataSize) {
877 const void* data = mData+mDataPos;
878 mDataPos += PAD_SIZE(len);
879 LOGV("readInplace Setting data pos of %p to %d\n", this, mDataPos);
880 return data;
881 }
882 return NULL;
883 }
884
885 template<class T>
readAligned(T * pArg) const886 status_t Parcel::readAligned(T *pArg) const {
887 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
888
889 if ((mDataPos+sizeof(T)) <= mDataSize) {
890 const void* data = mData+mDataPos;
891 mDataPos += sizeof(T);
892 *pArg = *reinterpret_cast<const T*>(data);
893 return NO_ERROR;
894 } else {
895 return NOT_ENOUGH_DATA;
896 }
897 }
898
899 template<class T>
readAligned() const900 T Parcel::readAligned() const {
901 T result;
902 if (readAligned(&result) != NO_ERROR) {
903 result = 0;
904 }
905
906 return result;
907 }
908
909 template<class T>
writeAligned(T val)910 status_t Parcel::writeAligned(T val) {
911 COMPILE_TIME_ASSERT_FUNCTION_SCOPE(PAD_SIZE(sizeof(T)) == sizeof(T));
912
913 if ((mDataPos+sizeof(val)) <= mDataCapacity) {
914 restart_write:
915 *reinterpret_cast<T*>(mData+mDataPos) = val;
916 return finishWrite(sizeof(val));
917 }
918
919 status_t err = growData(sizeof(val));
920 if (err == NO_ERROR) goto restart_write;
921 return err;
922 }
923
readInt32(int32_t * pArg) const924 status_t Parcel::readInt32(int32_t *pArg) const
925 {
926 return readAligned(pArg);
927 }
928
readInt32() const929 int32_t Parcel::readInt32() const
930 {
931 return readAligned<int32_t>();
932 }
933
934
readInt64(int64_t * pArg) const935 status_t Parcel::readInt64(int64_t *pArg) const
936 {
937 return readAligned(pArg);
938 }
939
940
readInt64() const941 int64_t Parcel::readInt64() const
942 {
943 return readAligned<int64_t>();
944 }
945
readFloat(float * pArg) const946 status_t Parcel::readFloat(float *pArg) const
947 {
948 return readAligned(pArg);
949 }
950
951
readFloat() const952 float Parcel::readFloat() const
953 {
954 return readAligned<float>();
955 }
956
readDouble(double * pArg) const957 status_t Parcel::readDouble(double *pArg) const
958 {
959 return readAligned(pArg);
960 }
961
962
readDouble() const963 double Parcel::readDouble() const
964 {
965 return readAligned<double>();
966 }
967
readIntPtr(intptr_t * pArg) const968 status_t Parcel::readIntPtr(intptr_t *pArg) const
969 {
970 return readAligned(pArg);
971 }
972
973
readIntPtr() const974 intptr_t Parcel::readIntPtr() const
975 {
976 return readAligned<intptr_t>();
977 }
978
979
readCString() const980 const char* Parcel::readCString() const
981 {
982 const size_t avail = mDataSize-mDataPos;
983 if (avail > 0) {
984 const char* str = reinterpret_cast<const char*>(mData+mDataPos);
985 // is the string's trailing NUL within the parcel's valid bounds?
986 const char* eos = reinterpret_cast<const char*>(memchr(str, 0, avail));
987 if (eos) {
988 const size_t len = eos - str;
989 mDataPos += PAD_SIZE(len+1);
990 LOGV("readCString Setting data pos of %p to %d\n", this, mDataPos);
991 return str;
992 }
993 }
994 return NULL;
995 }
996
readString8() const997 String8 Parcel::readString8() const
998 {
999 int32_t size = readInt32();
1000 // watch for potential int overflow adding 1 for trailing NUL
1001 if (size > 0 && size < INT32_MAX) {
1002 const char* str = (const char*)readInplace(size+1);
1003 if (str) return String8(str, size);
1004 }
1005 return String8();
1006 }
1007
readString16() const1008 String16 Parcel::readString16() const
1009 {
1010 size_t len;
1011 const char16_t* str = readString16Inplace(&len);
1012 if (str) return String16(str, len);
1013 LOGE("Reading a NULL string not supported here.");
1014 return String16();
1015 }
1016
readString16Inplace(size_t * outLen) const1017 const char16_t* Parcel::readString16Inplace(size_t* outLen) const
1018 {
1019 int32_t size = readInt32();
1020 // watch for potential int overflow from size+1
1021 if (size >= 0 && size < INT32_MAX) {
1022 *outLen = size;
1023 const char16_t* str = (const char16_t*)readInplace((size+1)*sizeof(char16_t));
1024 if (str != NULL) {
1025 return str;
1026 }
1027 }
1028 *outLen = 0;
1029 return NULL;
1030 }
1031
readStrongBinder() const1032 sp<IBinder> Parcel::readStrongBinder() const
1033 {
1034 sp<IBinder> val;
1035 unflatten_binder(ProcessState::self(), *this, &val);
1036 return val;
1037 }
1038
readWeakBinder() const1039 wp<IBinder> Parcel::readWeakBinder() const
1040 {
1041 wp<IBinder> val;
1042 unflatten_binder(ProcessState::self(), *this, &val);
1043 return val;
1044 }
1045
readExceptionCode() const1046 int32_t Parcel::readExceptionCode() const
1047 {
1048 int32_t exception_code = readAligned<int32_t>();
1049 if (exception_code == EX_HAS_REPLY_HEADER) {
1050 int32_t header_size = readAligned<int32_t>();
1051 // Skip over fat responses headers. Not used (or propagated) in
1052 // native code
1053 setDataPosition(dataPosition() + header_size);
1054 // And fat response headers are currently only used when there are no
1055 // exceptions, so return no error:
1056 return 0;
1057 }
1058 return exception_code;
1059 }
1060
readNativeHandle() const1061 native_handle* Parcel::readNativeHandle() const
1062 {
1063 int numFds, numInts;
1064 status_t err;
1065 err = readInt32(&numFds);
1066 if (err != NO_ERROR) return 0;
1067 err = readInt32(&numInts);
1068 if (err != NO_ERROR) return 0;
1069
1070 native_handle* h = native_handle_create(numFds, numInts);
1071 for (int i=0 ; err==NO_ERROR && i<numFds ; i++) {
1072 h->data[i] = dup(readFileDescriptor());
1073 if (h->data[i] < 0) err = BAD_VALUE;
1074 }
1075 err = read(h->data + numFds, sizeof(int)*numInts);
1076 if (err != NO_ERROR) {
1077 native_handle_close(h);
1078 native_handle_delete(h);
1079 h = 0;
1080 }
1081 return h;
1082 }
1083
1084
readFileDescriptor() const1085 int Parcel::readFileDescriptor() const
1086 {
1087 const flat_binder_object* flat = readObject(true);
1088 if (flat) {
1089 switch (flat->type) {
1090 case BINDER_TYPE_FD:
1091 //LOGI("Returning file descriptor %ld from parcel %p\n", flat->handle, this);
1092 return flat->handle;
1093 }
1094 }
1095 return BAD_TYPE;
1096 }
1097
readBlob(size_t len,ReadableBlob * outBlob) const1098 status_t Parcel::readBlob(size_t len, ReadableBlob* outBlob) const
1099 {
1100 int32_t useAshmem;
1101 status_t status = readInt32(&useAshmem);
1102 if (status) return status;
1103
1104 if (!useAshmem) {
1105 LOGV("readBlob: read in place");
1106 const void* ptr = readInplace(len);
1107 if (!ptr) return BAD_VALUE;
1108
1109 outBlob->init(false /*mapped*/, const_cast<void*>(ptr), len);
1110 return NO_ERROR;
1111 }
1112
1113 LOGV("readBlob: read from ashmem");
1114 int fd = readFileDescriptor();
1115 if (fd == int(BAD_TYPE)) return BAD_VALUE;
1116
1117 void* ptr = ::mmap(NULL, len, PROT_READ, MAP_SHARED, fd, 0);
1118 if (!ptr) return NO_MEMORY;
1119
1120 outBlob->init(true /*mapped*/, ptr, len);
1121 return NO_ERROR;
1122 }
1123
read(Flattenable & val) const1124 status_t Parcel::read(Flattenable& val) const
1125 {
1126 // size
1127 const size_t len = this->readInt32();
1128 const size_t fd_count = this->readInt32();
1129
1130 // payload
1131 void const* buf = this->readInplace(PAD_SIZE(len));
1132 if (buf == NULL)
1133 return BAD_VALUE;
1134
1135 int* fds = NULL;
1136 if (fd_count) {
1137 fds = new int[fd_count];
1138 }
1139
1140 status_t err = NO_ERROR;
1141 for (size_t i=0 ; i<fd_count && err==NO_ERROR ; i++) {
1142 fds[i] = dup(this->readFileDescriptor());
1143 if (fds[i] < 0) err = BAD_VALUE;
1144 }
1145
1146 if (err == NO_ERROR) {
1147 err = val.unflatten(buf, len, fds, fd_count);
1148 }
1149
1150 if (fd_count) {
1151 delete [] fds;
1152 }
1153
1154 return err;
1155 }
readObject(bool nullMetaData) const1156 const flat_binder_object* Parcel::readObject(bool nullMetaData) const
1157 {
1158 const size_t DPOS = mDataPos;
1159 if ((DPOS+sizeof(flat_binder_object)) <= mDataSize) {
1160 const flat_binder_object* obj
1161 = reinterpret_cast<const flat_binder_object*>(mData+DPOS);
1162 mDataPos = DPOS + sizeof(flat_binder_object);
1163 if (!nullMetaData && (obj->cookie == NULL && obj->binder == NULL)) {
1164 // When transferring a NULL object, we don't write it into
1165 // the object list, so we don't want to check for it when
1166 // reading.
1167 LOGV("readObject Setting data pos of %p to %d\n", this, mDataPos);
1168 return obj;
1169 }
1170
1171 // Ensure that this object is valid...
1172 size_t* const OBJS = mObjects;
1173 const size_t N = mObjectsSize;
1174 size_t opos = mNextObjectHint;
1175
1176 if (N > 0) {
1177 LOGV("Parcel %p looking for obj at %d, hint=%d\n",
1178 this, DPOS, opos);
1179
1180 // Start at the current hint position, looking for an object at
1181 // the current data position.
1182 if (opos < N) {
1183 while (opos < (N-1) && OBJS[opos] < DPOS) {
1184 opos++;
1185 }
1186 } else {
1187 opos = N-1;
1188 }
1189 if (OBJS[opos] == DPOS) {
1190 // Found it!
1191 LOGV("Parcel found obj %d at index %d with forward search",
1192 this, DPOS, opos);
1193 mNextObjectHint = opos+1;
1194 LOGV("readObject Setting data pos of %p to %d\n", this, mDataPos);
1195 return obj;
1196 }
1197
1198 // Look backwards for it...
1199 while (opos > 0 && OBJS[opos] > DPOS) {
1200 opos--;
1201 }
1202 if (OBJS[opos] == DPOS) {
1203 // Found it!
1204 LOGV("Parcel found obj %d at index %d with backward search",
1205 this, DPOS, opos);
1206 mNextObjectHint = opos+1;
1207 LOGV("readObject Setting data pos of %p to %d\n", this, mDataPos);
1208 return obj;
1209 }
1210 }
1211 LOGW("Attempt to read object from Parcel %p at offset %d that is not in the object list",
1212 this, DPOS);
1213 }
1214 return NULL;
1215 }
1216
closeFileDescriptors()1217 void Parcel::closeFileDescriptors()
1218 {
1219 size_t i = mObjectsSize;
1220 if (i > 0) {
1221 //LOGI("Closing file descriptors for %d objects...", mObjectsSize);
1222 }
1223 while (i > 0) {
1224 i--;
1225 const flat_binder_object* flat
1226 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1227 if (flat->type == BINDER_TYPE_FD) {
1228 //LOGI("Closing fd: %ld\n", flat->handle);
1229 close(flat->handle);
1230 }
1231 }
1232 }
1233
ipcData() const1234 const uint8_t* Parcel::ipcData() const
1235 {
1236 return mData;
1237 }
1238
ipcDataSize() const1239 size_t Parcel::ipcDataSize() const
1240 {
1241 return (mDataSize > mDataPos ? mDataSize : mDataPos);
1242 }
1243
ipcObjects() const1244 const size_t* Parcel::ipcObjects() const
1245 {
1246 return mObjects;
1247 }
1248
ipcObjectsCount() const1249 size_t Parcel::ipcObjectsCount() const
1250 {
1251 return mObjectsSize;
1252 }
1253
ipcSetDataReference(const uint8_t * data,size_t dataSize,const size_t * objects,size_t objectsCount,release_func relFunc,void * relCookie)1254 void Parcel::ipcSetDataReference(const uint8_t* data, size_t dataSize,
1255 const size_t* objects, size_t objectsCount, release_func relFunc, void* relCookie)
1256 {
1257 freeDataNoInit();
1258 mError = NO_ERROR;
1259 mData = const_cast<uint8_t*>(data);
1260 mDataSize = mDataCapacity = dataSize;
1261 //LOGI("setDataReference Setting data size of %p to %lu (pid=%d)\n", this, mDataSize, getpid());
1262 mDataPos = 0;
1263 LOGV("setDataReference Setting data pos of %p to %d\n", this, mDataPos);
1264 mObjects = const_cast<size_t*>(objects);
1265 mObjectsSize = mObjectsCapacity = objectsCount;
1266 mNextObjectHint = 0;
1267 mOwner = relFunc;
1268 mOwnerCookie = relCookie;
1269 scanForFds();
1270 }
1271
print(TextOutput & to,uint32_t flags) const1272 void Parcel::print(TextOutput& to, uint32_t flags) const
1273 {
1274 to << "Parcel(";
1275
1276 if (errorCheck() != NO_ERROR) {
1277 const status_t err = errorCheck();
1278 to << "Error: " << (void*)err << " \"" << strerror(-err) << "\"";
1279 } else if (dataSize() > 0) {
1280 const uint8_t* DATA = data();
1281 to << indent << HexDump(DATA, dataSize()) << dedent;
1282 const size_t* OBJS = objects();
1283 const size_t N = objectsCount();
1284 for (size_t i=0; i<N; i++) {
1285 const flat_binder_object* flat
1286 = reinterpret_cast<const flat_binder_object*>(DATA+OBJS[i]);
1287 to << endl << "Object #" << i << " @ " << (void*)OBJS[i] << ": "
1288 << TypeCode(flat->type & 0x7f7f7f00)
1289 << " = " << flat->binder;
1290 }
1291 } else {
1292 to << "NULL";
1293 }
1294
1295 to << ")";
1296 }
1297
releaseObjects()1298 void Parcel::releaseObjects()
1299 {
1300 const sp<ProcessState> proc(ProcessState::self());
1301 size_t i = mObjectsSize;
1302 uint8_t* const data = mData;
1303 size_t* const objects = mObjects;
1304 while (i > 0) {
1305 i--;
1306 const flat_binder_object* flat
1307 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1308 release_object(proc, *flat, this);
1309 }
1310 }
1311
acquireObjects()1312 void Parcel::acquireObjects()
1313 {
1314 const sp<ProcessState> proc(ProcessState::self());
1315 size_t i = mObjectsSize;
1316 uint8_t* const data = mData;
1317 size_t* const objects = mObjects;
1318 while (i > 0) {
1319 i--;
1320 const flat_binder_object* flat
1321 = reinterpret_cast<flat_binder_object*>(data+objects[i]);
1322 acquire_object(proc, *flat, this);
1323 }
1324 }
1325
freeData()1326 void Parcel::freeData()
1327 {
1328 freeDataNoInit();
1329 initState();
1330 }
1331
freeDataNoInit()1332 void Parcel::freeDataNoInit()
1333 {
1334 if (mOwner) {
1335 //LOGI("Freeing data ref of %p (pid=%d)\n", this, getpid());
1336 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1337 } else {
1338 releaseObjects();
1339 if (mData) free(mData);
1340 if (mObjects) free(mObjects);
1341 }
1342 }
1343
growData(size_t len)1344 status_t Parcel::growData(size_t len)
1345 {
1346 size_t newSize = ((mDataSize+len)*3)/2;
1347 return (newSize <= mDataSize)
1348 ? (status_t) NO_MEMORY
1349 : continueWrite(newSize);
1350 }
1351
restartWrite(size_t desired)1352 status_t Parcel::restartWrite(size_t desired)
1353 {
1354 if (mOwner) {
1355 freeData();
1356 return continueWrite(desired);
1357 }
1358
1359 uint8_t* data = (uint8_t*)realloc(mData, desired);
1360 if (!data && desired > mDataCapacity) {
1361 mError = NO_MEMORY;
1362 return NO_MEMORY;
1363 }
1364
1365 releaseObjects();
1366
1367 if (data) {
1368 mData = data;
1369 mDataCapacity = desired;
1370 }
1371
1372 mDataSize = mDataPos = 0;
1373 LOGV("restartWrite Setting data size of %p to %d\n", this, mDataSize);
1374 LOGV("restartWrite Setting data pos of %p to %d\n", this, mDataPos);
1375
1376 free(mObjects);
1377 mObjects = NULL;
1378 mObjectsSize = mObjectsCapacity = 0;
1379 mNextObjectHint = 0;
1380 mHasFds = false;
1381 mFdsKnown = true;
1382 mAllowFds = true;
1383
1384 return NO_ERROR;
1385 }
1386
continueWrite(size_t desired)1387 status_t Parcel::continueWrite(size_t desired)
1388 {
1389 // If shrinking, first adjust for any objects that appear
1390 // after the new data size.
1391 size_t objectsSize = mObjectsSize;
1392 if (desired < mDataSize) {
1393 if (desired == 0) {
1394 objectsSize = 0;
1395 } else {
1396 while (objectsSize > 0) {
1397 if (mObjects[objectsSize-1] < desired)
1398 break;
1399 objectsSize--;
1400 }
1401 }
1402 }
1403
1404 if (mOwner) {
1405 // If the size is going to zero, just release the owner's data.
1406 if (desired == 0) {
1407 freeData();
1408 return NO_ERROR;
1409 }
1410
1411 // If there is a different owner, we need to take
1412 // posession.
1413 uint8_t* data = (uint8_t*)malloc(desired);
1414 if (!data) {
1415 mError = NO_MEMORY;
1416 return NO_MEMORY;
1417 }
1418 size_t* objects = NULL;
1419
1420 if (objectsSize) {
1421 objects = (size_t*)malloc(objectsSize*sizeof(size_t));
1422 if (!objects) {
1423 mError = NO_MEMORY;
1424 return NO_MEMORY;
1425 }
1426
1427 // Little hack to only acquire references on objects
1428 // we will be keeping.
1429 size_t oldObjectsSize = mObjectsSize;
1430 mObjectsSize = objectsSize;
1431 acquireObjects();
1432 mObjectsSize = oldObjectsSize;
1433 }
1434
1435 if (mData) {
1436 memcpy(data, mData, mDataSize < desired ? mDataSize : desired);
1437 }
1438 if (objects && mObjects) {
1439 memcpy(objects, mObjects, objectsSize*sizeof(size_t));
1440 }
1441 //LOGI("Freeing data ref of %p (pid=%d)\n", this, getpid());
1442 mOwner(this, mData, mDataSize, mObjects, mObjectsSize, mOwnerCookie);
1443 mOwner = NULL;
1444
1445 mData = data;
1446 mObjects = objects;
1447 mDataSize = (mDataSize < desired) ? mDataSize : desired;
1448 LOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize);
1449 mDataCapacity = desired;
1450 mObjectsSize = mObjectsCapacity = objectsSize;
1451 mNextObjectHint = 0;
1452
1453 } else if (mData) {
1454 if (objectsSize < mObjectsSize) {
1455 // Need to release refs on any objects we are dropping.
1456 const sp<ProcessState> proc(ProcessState::self());
1457 for (size_t i=objectsSize; i<mObjectsSize; i++) {
1458 const flat_binder_object* flat
1459 = reinterpret_cast<flat_binder_object*>(mData+mObjects[i]);
1460 if (flat->type == BINDER_TYPE_FD) {
1461 // will need to rescan because we may have lopped off the only FDs
1462 mFdsKnown = false;
1463 }
1464 release_object(proc, *flat, this);
1465 }
1466 size_t* objects =
1467 (size_t*)realloc(mObjects, objectsSize*sizeof(size_t));
1468 if (objects) {
1469 mObjects = objects;
1470 }
1471 mObjectsSize = objectsSize;
1472 mNextObjectHint = 0;
1473 }
1474
1475 // We own the data, so we can just do a realloc().
1476 if (desired > mDataCapacity) {
1477 uint8_t* data = (uint8_t*)realloc(mData, desired);
1478 if (data) {
1479 mData = data;
1480 mDataCapacity = desired;
1481 } else if (desired > mDataCapacity) {
1482 mError = NO_MEMORY;
1483 return NO_MEMORY;
1484 }
1485 } else {
1486 if (mDataSize > desired) {
1487 mDataSize = desired;
1488 LOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize);
1489 }
1490 if (mDataPos > desired) {
1491 mDataPos = desired;
1492 LOGV("continueWrite Setting data pos of %p to %d\n", this, mDataPos);
1493 }
1494 }
1495
1496 } else {
1497 // This is the first data. Easy!
1498 uint8_t* data = (uint8_t*)malloc(desired);
1499 if (!data) {
1500 mError = NO_MEMORY;
1501 return NO_MEMORY;
1502 }
1503
1504 if(!(mDataCapacity == 0 && mObjects == NULL
1505 && mObjectsCapacity == 0)) {
1506 LOGE("continueWrite: %d/%p/%d/%d", mDataCapacity, mObjects, mObjectsCapacity, desired);
1507 }
1508
1509 mData = data;
1510 mDataSize = mDataPos = 0;
1511 LOGV("continueWrite Setting data size of %p to %d\n", this, mDataSize);
1512 LOGV("continueWrite Setting data pos of %p to %d\n", this, mDataPos);
1513 mDataCapacity = desired;
1514 }
1515
1516 return NO_ERROR;
1517 }
1518
initState()1519 void Parcel::initState()
1520 {
1521 mError = NO_ERROR;
1522 mData = 0;
1523 mDataSize = 0;
1524 mDataCapacity = 0;
1525 mDataPos = 0;
1526 LOGV("initState Setting data size of %p to %d\n", this, mDataSize);
1527 LOGV("initState Setting data pos of %p to %d\n", this, mDataPos);
1528 mObjects = NULL;
1529 mObjectsSize = 0;
1530 mObjectsCapacity = 0;
1531 mNextObjectHint = 0;
1532 mHasFds = false;
1533 mFdsKnown = true;
1534 mAllowFds = true;
1535 mOwner = NULL;
1536 }
1537
scanForFds() const1538 void Parcel::scanForFds() const
1539 {
1540 bool hasFds = false;
1541 for (size_t i=0; i<mObjectsSize; i++) {
1542 const flat_binder_object* flat
1543 = reinterpret_cast<const flat_binder_object*>(mData + mObjects[i]);
1544 if (flat->type == BINDER_TYPE_FD) {
1545 hasFds = true;
1546 break;
1547 }
1548 }
1549 mHasFds = hasFds;
1550 mFdsKnown = true;
1551 }
1552
1553 // --- Parcel::Blob ---
1554
Blob()1555 Parcel::Blob::Blob() :
1556 mMapped(false), mData(NULL), mSize(0) {
1557 }
1558
~Blob()1559 Parcel::Blob::~Blob() {
1560 release();
1561 }
1562
release()1563 void Parcel::Blob::release() {
1564 if (mMapped && mData) {
1565 ::munmap(mData, mSize);
1566 }
1567 clear();
1568 }
1569
init(bool mapped,void * data,size_t size)1570 void Parcel::Blob::init(bool mapped, void* data, size_t size) {
1571 mMapped = mapped;
1572 mData = data;
1573 mSize = size;
1574 }
1575
clear()1576 void Parcel::Blob::clear() {
1577 mMapped = false;
1578 mData = NULL;
1579 mSize = 0;
1580 }
1581
1582 }; // namespace android
1583