/* * Copyright (C) 2020 The Android Open Source Project * * Licensed under the Apache License, Version 2.0 (the "License"); * you may not use this file except in compliance with the License. * You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ #pragma once #include #include #include #include #include #include #include #include #include #include namespace android { struct RpcWireHeader; /** * Log a lot more information about RPC calls, when debugging issues. Usually, * you would want to enable this in only one process. If repeated issues require * a specific subset of logs to debug, this could be broken up like * IPCThreadState's. */ #define SHOULD_LOG_RPC_DETAIL false #if SHOULD_LOG_RPC_DETAIL #define LOG_RPC_DETAIL(...) ALOGI(__VA_ARGS__) #else #define LOG_RPC_DETAIL(...) ALOGV(__VA_ARGS__) // for type checking #endif #define RPC_FLAKE_PRONE false #if RPC_FLAKE_PRONE void rpcMaybeWaitToFlake(); #define MAYBE_WAIT_IN_FLAKE_MODE rpcMaybeWaitToFlake() #else #define MAYBE_WAIT_IN_FLAKE_MODE do {} while (false) #endif /** * Abstracts away management of ref counts and the wire format from * RpcSession */ class RpcState { public: RpcState(); ~RpcState(); [[nodiscard]] static bool validateProtocolVersion(uint32_t version); [[nodiscard]] status_t readNewSessionResponse(const sp& connection, const sp& session, uint32_t* version); [[nodiscard]] status_t sendConnectionInit(const sp& connection, const sp& session); [[nodiscard]] status_t readConnectionInit(const sp& connection, const sp& session); // TODO(b/182940634): combine some special transactions into one "getServerInfo" call? sp getRootObject(const sp& connection, const sp& session); [[nodiscard]] status_t getMaxThreads(const sp& connection, const sp& session, size_t* maxThreadsOut); [[nodiscard]] status_t getSessionId(const sp& connection, const sp& session, std::vector* sessionIdOut); [[nodiscard]] status_t transact(const sp& connection, const sp& address, uint32_t code, const Parcel& data, const sp& session, Parcel* reply, uint32_t flags); [[nodiscard]] status_t transactAddress(const sp& connection, uint64_t address, uint32_t code, const Parcel& data, const sp& session, Parcel* reply, uint32_t flags); /** * The ownership model here carries an implicit strong refcount whenever a * binder is sent across processes. Since we have a local strong count in * sp<> over these objects, we only ever need to keep one of these. So, * typically we tell the remote process that we drop all the implicit dec * strongs, and we hold onto the last one. 'target' here is the target * timesRecd (the number of remaining reference counts) we wish to keep. * Typically this should be '0' or '1'. The target is used instead of an * explicit decrement count in order to allow multiple threads to lower the * number of counts simultaneously. Since we only lower the count to 0 when * a binder is deleted, targets of '1' should only be sent when the caller * owns a local strong reference to the binder. Larger targets may be used * for testing, and to make the function generic, but generally this should * be avoided because it would be hard to guarantee another thread doesn't * lower the number of held refcounts to '1'. Note also, these refcounts * must be sent actively. If they are sent when binders are deleted, this * can cause leaks, since even remote binders carry an implicit strong ref * when they are sent to another process. */ [[nodiscard]] status_t sendDecStrongToTarget(const sp& connection, const sp& session, uint64_t address, size_t target); enum class CommandType { ANY, CONTROL_ONLY, }; [[nodiscard]] status_t getAndExecuteCommand(const sp& connection, const sp& session, CommandType type); [[nodiscard]] status_t drainCommands(const sp& connection, const sp& session, CommandType type); /** * Called by Parcel for outgoing binders. This implies one refcount of * ownership to the outgoing binder. */ [[nodiscard]] status_t onBinderLeaving(const sp& session, const sp& binder, uint64_t* outAddress); /** * Called by Parcel for incoming binders. This either returns the refcount * to the process, if this process already has one, or it takes ownership of * that refcount */ [[nodiscard]] status_t onBinderEntering(const sp& session, uint64_t address, sp* out); /** * Called on incoming binders to update refcounting information. This should * only be called when it is done as part of making progress on a * transaction. */ [[nodiscard]] status_t flushExcessBinderRefs(const sp& session, uint64_t address, const sp& binder); /** * Called when the RpcSession is shutdown. * Send obituaries for each known remote binder with this session. */ [[nodiscard]] status_t sendObituaries(const sp& session); LIBBINDER_INTERNAL_EXPORTED size_t countBinders(); LIBBINDER_INTERNAL_EXPORTED void dump(); /** * Called when reading or writing data to a session fails to clean up * data associated with the session in order to cleanup binders. * Specifically, we have a strong dependency cycle, since BpBinder is * OBJECT_LIFETIME_WEAK (so that onAttemptIncStrong may return true). * * BpBinder -> RpcSession -> RpcState * ^-----------------------------/ * * In the success case, eventually all refcounts should be propagated over * the session, though this could also be called to eagerly cleanup * the session. * * WARNING: RpcState is responsible for calling this when the session is * no longer recoverable. */ void clear(); private: void clear(RpcMutexUniqueLock nodeLock); void dumpLocked(); // Alternative to std::vector that doesn't abort on allocation failure and caps // large allocations to avoid being requested from allocating too much data. struct CommandData { explicit CommandData(size_t size); bool valid() { return mSize == 0 || mData != nullptr; } size_t size() { return mSize; } uint8_t* data() { return mData.get(); } uint8_t* release() { return mData.release(); } private: std::unique_ptr mData; size_t mSize; }; [[nodiscard]] status_t rpcSend( const sp& connection, const sp& session, const char* what, iovec* iovs, int niovs, const std::optional>& altPoll, const std::vector>* ancillaryFds = nullptr); [[nodiscard]] status_t rpcRec(const sp& connection, const sp& session, const char* what, iovec* iovs, int niovs, std::vector>* ancillaryFds = nullptr); [[nodiscard]] status_t waitForReply(const sp& connection, const sp& session, Parcel* reply); [[nodiscard]] status_t processCommand( const sp& connection, const sp& session, const RpcWireHeader& command, CommandType type, std::vector>&& ancillaryFds); [[nodiscard]] status_t processTransact( const sp& connection, const sp& session, const RpcWireHeader& command, std::vector>&& ancillaryFds); [[nodiscard]] status_t processTransactInternal( const sp& connection, const sp& session, CommandData transactionData, std::vector>&& ancillaryFds); [[nodiscard]] status_t processDecStrong(const sp& connection, const sp& session, const RpcWireHeader& command); // Whether `parcel` is compatible with `session`. [[nodiscard]] static status_t validateParcel(const sp& session, const Parcel& parcel, std::string* errorMsg); struct BinderNode { // Two cases: // A - local binder we are serving // B - remote binder, we are sending transactions to wp binder; // if timesSent > 0, this will be equal to binder.promote() sp sentRef; // Number of times we've sent this binder out of process, which // translates to an implicit strong count. A client must send RPC binder // socket's dec ref for each time it is sent out of process in order to // deallocate it. Note, a proxy binder we are holding onto might be // sent (this is important when the only remaining refcount of this // binder is the one associated with a transaction sending it back to // its server) size_t timesSent = 0; // Number of times we've received this binder, each time corresponds to // a reference we hold over the wire (not a local incStrong/decStrong) size_t timesRecd = 0; // transaction ID, for async transactions uint64_t asyncNumber = 0; // // CASE A - local binder we are serving // // async transaction queue, _only_ for local binder struct AsyncTodo { sp ref; CommandData data; std::vector> ancillaryFds; uint64_t asyncNumber = 0; bool operator<(const AsyncTodo& o) const { return asyncNumber > /* !!! */ o.asyncNumber; } }; std::priority_queue asyncTodo; // // CASE B - remote binder, we are sending transactions to // // (no additional data specific to remote binders) std::string toString() const; }; // Checks if there is any reference left to a node and erases it. If this // is the last node, shuts down the session. // // Node lock is passed here for convenience, so that we can release it // and terminate the session, but we could leave it up to the caller // by returning a continuation if we needed to erase multiple specific // nodes. It may be tempting to allow the client to keep on holding the // lock and instead just return whether or not we should shutdown, but // this introduces the posssibility that another thread calls // getRootBinder and thinks it is valid, rather than immediately getting // an error. sp tryEraseNode(const sp& session, RpcMutexUniqueLock nodeLock, std::map::iterator& it); // true - success // false - session shutdown, halt [[nodiscard]] bool nodeProgressAsyncNumber(BinderNode* node); RpcMutex mNodeMutex; bool mTerminated = false; uint32_t mNextId = 0; // binders known by both sides of a session std::map mNodeForAddress; }; } // namespace android