• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <android-base/unique_fd.h>
19 #include <binder/IBinder.h>
20 #include <binder/Parcel.h>
21 #include <binder/RpcSession.h>
22 
23 #include <map>
24 #include <optional>
25 #include <queue>
26 
27 #include <sys/uio.h>
28 
29 namespace android {
30 
31 struct RpcWireHeader;
32 
33 /**
34  * Log a lot more information about RPC calls, when debugging issues. Usually,
35  * you would want to enable this in only one process. If repeated issues require
36  * a specific subset of logs to debug, this could be broken up like
37  * IPCThreadState's.
38  */
39 #define SHOULD_LOG_RPC_DETAIL false
40 
41 #if SHOULD_LOG_RPC_DETAIL
42 #define LOG_RPC_DETAIL(...) ALOGI(__VA_ARGS__)
43 #else
44 #define LOG_RPC_DETAIL(...) ALOGV(__VA_ARGS__) // for type checking
45 #endif
46 
47 #define RPC_FLAKE_PRONE false
48 
49 #if RPC_FLAKE_PRONE
50 void rpcMaybeWaitToFlake();
51 #define MAYBE_WAIT_IN_FLAKE_MODE rpcMaybeWaitToFlake()
52 #else
53 #define MAYBE_WAIT_IN_FLAKE_MODE do {} while (false)
54 #endif
55 
56 /**
57  * Abstracts away management of ref counts and the wire format from
58  * RpcSession
59  */
60 class RpcState {
61 public:
62     RpcState();
63     ~RpcState();
64 
65     [[nodiscard]] status_t readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
66                                                   const sp<RpcSession>& session, uint32_t* version);
67     [[nodiscard]] status_t sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
68                                               const sp<RpcSession>& session);
69     [[nodiscard]] status_t readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
70                                               const sp<RpcSession>& session);
71 
72     // TODO(b/182940634): combine some special transactions into one "getServerInfo" call?
73     sp<IBinder> getRootObject(const sp<RpcSession::RpcConnection>& connection,
74                               const sp<RpcSession>& session);
75     [[nodiscard]] status_t getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
76                                          const sp<RpcSession>& session, size_t* maxThreadsOut);
77     [[nodiscard]] status_t getSessionId(const sp<RpcSession::RpcConnection>& connection,
78                                         const sp<RpcSession>& session,
79                                         std::vector<uint8_t>* sessionIdOut);
80 
81     [[nodiscard]] status_t transact(const sp<RpcSession::RpcConnection>& connection,
82                                     const sp<IBinder>& address, uint32_t code, const Parcel& data,
83                                     const sp<RpcSession>& session, Parcel* reply, uint32_t flags);
84     [[nodiscard]] status_t transactAddress(const sp<RpcSession::RpcConnection>& connection,
85                                            uint64_t address, uint32_t code, const Parcel& data,
86                                            const sp<RpcSession>& session, Parcel* reply,
87                                            uint32_t flags);
88 
89     /**
90      * The ownership model here carries an implicit strong refcount whenever a
91      * binder is sent across processes. Since we have a local strong count in
92      * sp<> over these objects, we only ever need to keep one of these. So,
93      * typically we tell the remote process that we drop all the implicit dec
94      * strongs, and we hold onto the last one. 'target' here is the target
95      * timesRecd (the number of remaining reference counts) we wish to keep.
96      * Typically this should be '0' or '1'. The target is used instead of an
97      * explicit decrement count in order to allow multiple threads to lower the
98      * number of counts simultaneously. Since we only lower the count to 0 when
99      * a binder is deleted, targets of '1' should only be sent when the caller
100      * owns a local strong reference to the binder. Larger targets may be used
101      * for testing, and to make the function generic, but generally this should
102      * be avoided because it would be hard to guarantee another thread doesn't
103      * lower the number of held refcounts to '1'. Note also, these refcounts
104      * must be sent actively. If they are sent when binders are deleted, this
105      * can cause leaks, since even remote binders carry an implicit strong ref
106      * when they are sent to another process.
107      */
108     [[nodiscard]] status_t sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
109                                                  const sp<RpcSession>& session, uint64_t address,
110                                                  size_t target);
111 
112     enum class CommandType {
113         ANY,
114         CONTROL_ONLY,
115     };
116     [[nodiscard]] status_t getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
117                                                 const sp<RpcSession>& session, CommandType type);
118     [[nodiscard]] status_t drainCommands(const sp<RpcSession::RpcConnection>& connection,
119                                          const sp<RpcSession>& session, CommandType type);
120 
121     /**
122      * Called by Parcel for outgoing binders. This implies one refcount of
123      * ownership to the outgoing binder.
124      */
125     [[nodiscard]] status_t onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
126                                            uint64_t* outAddress);
127 
128     /**
129      * Called by Parcel for incoming binders. This either returns the refcount
130      * to the process, if this process already has one, or it takes ownership of
131      * that refcount
132      */
133     [[nodiscard]] status_t onBinderEntering(const sp<RpcSession>& session, uint64_t address,
134                                             sp<IBinder>* out);
135     /**
136      * Called on incoming binders to update refcounting information. This should
137      * only be called when it is done as part of making progress on a
138      * transaction.
139      */
140     [[nodiscard]] status_t flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
141                                                  const sp<IBinder>& binder);
142 
143     size_t countBinders();
144     void dump();
145 
146     /**
147      * Called when reading or writing data to a session fails to clean up
148      * data associated with the session in order to cleanup binders.
149      * Specifically, we have a strong dependency cycle, since BpBinder is
150      * OBJECT_LIFETIME_WEAK (so that onAttemptIncStrong may return true).
151      *
152      *     BpBinder -> RpcSession -> RpcState
153      *      ^-----------------------------/
154      *
155      * In the success case, eventually all refcounts should be propagated over
156      * the session, though this could also be called to eagerly cleanup
157      * the session.
158      *
159      * WARNING: RpcState is responsible for calling this when the session is
160      * no longer recoverable.
161      */
162     void clear();
163 
164 private:
165     void dumpLocked();
166 
167     // Alternative to std::vector<uint8_t> that doesn't abort on allocation failure and caps
168     // large allocations to avoid being requested from allocating too much data.
169     struct CommandData {
170         explicit CommandData(size_t size);
validCommandData171         bool valid() { return mSize == 0 || mData != nullptr; }
sizeCommandData172         size_t size() { return mSize; }
dataCommandData173         uint8_t* data() { return mData.get(); }
releaseCommandData174         uint8_t* release() { return mData.release(); }
175 
176     private:
177         std::unique_ptr<uint8_t[]> mData;
178         size_t mSize;
179     };
180 
181     [[nodiscard]] status_t rpcSend(const sp<RpcSession::RpcConnection>& connection,
182                                    const sp<RpcSession>& session, const char* what, iovec* iovs,
183                                    int niovs, const std::function<status_t()>& altPoll = nullptr);
184     [[nodiscard]] status_t rpcRec(const sp<RpcSession::RpcConnection>& connection,
185                                   const sp<RpcSession>& session, const char* what, iovec* iovs,
186                                   int niovs);
187 
188     [[nodiscard]] status_t waitForReply(const sp<RpcSession::RpcConnection>& connection,
189                                         const sp<RpcSession>& session, Parcel* reply);
190     [[nodiscard]] status_t processCommand(const sp<RpcSession::RpcConnection>& connection,
191                                           const sp<RpcSession>& session,
192                                           const RpcWireHeader& command, CommandType type);
193     [[nodiscard]] status_t processTransact(const sp<RpcSession::RpcConnection>& connection,
194                                            const sp<RpcSession>& session,
195                                            const RpcWireHeader& command);
196     [[nodiscard]] status_t processTransactInternal(const sp<RpcSession::RpcConnection>& connection,
197                                                    const sp<RpcSession>& session,
198                                                    CommandData transactionData);
199     [[nodiscard]] status_t processDecStrong(const sp<RpcSession::RpcConnection>& connection,
200                                             const sp<RpcSession>& session,
201                                             const RpcWireHeader& command);
202 
203     struct BinderNode {
204         // Two cases:
205         // A - local binder we are serving
206         // B - remote binder, we are sending transactions to
207         wp<IBinder> binder;
208 
209         // if timesSent > 0, this will be equal to binder.promote()
210         sp<IBinder> sentRef;
211 
212         // Number of times we've sent this binder out of process, which
213         // translates to an implicit strong count. A client must send RPC binder
214         // socket's dec ref for each time it is sent out of process in order to
215         // deallocate it. Note, a proxy binder we are holding onto might be
216         // sent (this is important when the only remaining refcount of this
217         // binder is the one associated with a transaction sending it back to
218         // its server)
219         size_t timesSent = 0;
220 
221         // Number of times we've received this binder, each time corresponds to
222         // a reference we hold over the wire (not a local incStrong/decStrong)
223         size_t timesRecd = 0;
224 
225         // transaction ID, for async transactions
226         uint64_t asyncNumber = 0;
227 
228         //
229         // CASE A - local binder we are serving
230         //
231 
232         // async transaction queue, _only_ for local binder
233         struct AsyncTodo {
234             sp<IBinder> ref;
235             CommandData data;
236             uint64_t asyncNumber = 0;
237 
238             bool operator<(const AsyncTodo& o) const {
239                 return asyncNumber > /* !!! */ o.asyncNumber;
240             }
241         };
242         std::priority_queue<AsyncTodo> asyncTodo;
243 
244         //
245         // CASE B - remote binder, we are sending transactions to
246         //
247 
248         // (no additional data specific to remote binders)
249     };
250 
251     // checks if there is any reference left to a node and erases it. If erase
252     // happens, and there is a strong reference to the binder kept by
253     // binderNode, this returns that strong reference, so that it can be
254     // dropped after any locks are removed.
255     sp<IBinder> tryEraseNode(std::map<uint64_t, BinderNode>::iterator& it);
256     // true - success
257     // false - session shutdown, halt
258     [[nodiscard]] bool nodeProgressAsyncNumber(BinderNode* node);
259 
260     std::mutex mNodeMutex;
261     bool mTerminated = false;
262     uint32_t mNextId = 0;
263     // binders known by both sides of a session
264     std::map<uint64_t, BinderNode> mNodeForAddress;
265 };
266 
267 } // namespace android
268