• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 #pragma once
17 
18 #include <binder/Functional.h>
19 #include <binder/IBinder.h>
20 #include <binder/Parcel.h>
21 #include <binder/RpcSession.h>
22 #include <binder/RpcThreads.h>
23 #include <binder/unique_fd.h>
24 
25 #include <map>
26 #include <optional>
27 #include <queue>
28 
29 #include <sys/uio.h>
30 
31 namespace android {
32 
33 struct RpcWireHeader;
34 
35 /**
36  * Log a lot more information about RPC calls, when debugging issues. Usually,
37  * you would want to enable this in only one process. If repeated issues require
38  * a specific subset of logs to debug, this could be broken up like
39  * IPCThreadState's.
40  */
41 #define SHOULD_LOG_RPC_DETAIL false
42 
43 #if SHOULD_LOG_RPC_DETAIL
44 #define LOG_RPC_DETAIL(...) ALOGI(__VA_ARGS__)
45 #else
46 #define LOG_RPC_DETAIL(...) ALOGV(__VA_ARGS__) // for type checking
47 #endif
48 
49 #define RPC_FLAKE_PRONE false
50 
51 #if RPC_FLAKE_PRONE
52 void rpcMaybeWaitToFlake();
53 #define MAYBE_WAIT_IN_FLAKE_MODE rpcMaybeWaitToFlake()
54 #else
55 #define MAYBE_WAIT_IN_FLAKE_MODE do {} while (false)
56 #endif
57 
58 /**
59  * Abstracts away management of ref counts and the wire format from
60  * RpcSession
61  */
62 class RpcState {
63 public:
64     RpcState();
65     ~RpcState();
66 
67     [[nodiscard]] static bool validateProtocolVersion(uint32_t version);
68 
69     [[nodiscard]] status_t readNewSessionResponse(const sp<RpcSession::RpcConnection>& connection,
70                                                   const sp<RpcSession>& session, uint32_t* version);
71     [[nodiscard]] status_t sendConnectionInit(const sp<RpcSession::RpcConnection>& connection,
72                                               const sp<RpcSession>& session);
73     [[nodiscard]] status_t readConnectionInit(const sp<RpcSession::RpcConnection>& connection,
74                                               const sp<RpcSession>& session);
75 
76     // TODO(b/182940634): combine some special transactions into one "getServerInfo" call?
77     sp<IBinder> getRootObject(const sp<RpcSession::RpcConnection>& connection,
78                               const sp<RpcSession>& session);
79     [[nodiscard]] status_t getMaxThreads(const sp<RpcSession::RpcConnection>& connection,
80                                          const sp<RpcSession>& session, size_t* maxThreadsOut);
81     [[nodiscard]] status_t getSessionId(const sp<RpcSession::RpcConnection>& connection,
82                                         const sp<RpcSession>& session,
83                                         std::vector<uint8_t>* sessionIdOut);
84 
85     [[nodiscard]] status_t transact(const sp<RpcSession::RpcConnection>& connection,
86                                     const sp<IBinder>& address, uint32_t code, const Parcel& data,
87                                     const sp<RpcSession>& session, Parcel* reply, uint32_t flags);
88     [[nodiscard]] status_t transactAddress(const sp<RpcSession::RpcConnection>& connection,
89                                            uint64_t address, uint32_t code, const Parcel& data,
90                                            const sp<RpcSession>& session, Parcel* reply,
91                                            uint32_t flags);
92 
93     /**
94      * The ownership model here carries an implicit strong refcount whenever a
95      * binder is sent across processes. Since we have a local strong count in
96      * sp<> over these objects, we only ever need to keep one of these. So,
97      * typically we tell the remote process that we drop all the implicit dec
98      * strongs, and we hold onto the last one. 'target' here is the target
99      * timesRecd (the number of remaining reference counts) we wish to keep.
100      * Typically this should be '0' or '1'. The target is used instead of an
101      * explicit decrement count in order to allow multiple threads to lower the
102      * number of counts simultaneously. Since we only lower the count to 0 when
103      * a binder is deleted, targets of '1' should only be sent when the caller
104      * owns a local strong reference to the binder. Larger targets may be used
105      * for testing, and to make the function generic, but generally this should
106      * be avoided because it would be hard to guarantee another thread doesn't
107      * lower the number of held refcounts to '1'. Note also, these refcounts
108      * must be sent actively. If they are sent when binders are deleted, this
109      * can cause leaks, since even remote binders carry an implicit strong ref
110      * when they are sent to another process.
111      */
112     [[nodiscard]] status_t sendDecStrongToTarget(const sp<RpcSession::RpcConnection>& connection,
113                                                  const sp<RpcSession>& session, uint64_t address,
114                                                  size_t target);
115 
116     enum class CommandType {
117         ANY,
118         CONTROL_ONLY,
119     };
120     [[nodiscard]] status_t getAndExecuteCommand(const sp<RpcSession::RpcConnection>& connection,
121                                                 const sp<RpcSession>& session, CommandType type);
122     [[nodiscard]] status_t drainCommands(const sp<RpcSession::RpcConnection>& connection,
123                                          const sp<RpcSession>& session, CommandType type);
124 
125     /**
126      * Called by Parcel for outgoing binders. This implies one refcount of
127      * ownership to the outgoing binder.
128      */
129     [[nodiscard]] status_t onBinderLeaving(const sp<RpcSession>& session, const sp<IBinder>& binder,
130                                            uint64_t* outAddress);
131 
132     /**
133      * Called by Parcel for incoming binders. This either returns the refcount
134      * to the process, if this process already has one, or it takes ownership of
135      * that refcount
136      */
137     [[nodiscard]] status_t onBinderEntering(const sp<RpcSession>& session, uint64_t address,
138                                             sp<IBinder>* out);
139     /**
140      * Called on incoming binders to update refcounting information. This should
141      * only be called when it is done as part of making progress on a
142      * transaction.
143      */
144     [[nodiscard]] status_t flushExcessBinderRefs(const sp<RpcSession>& session, uint64_t address,
145                                                  const sp<IBinder>& binder);
146     /**
147      * Called when the RpcSession is shutdown.
148      * Send obituaries for each known remote binder with this session.
149      */
150     [[nodiscard]] status_t sendObituaries(const sp<RpcSession>& session);
151 
152     LIBBINDER_INTERNAL_EXPORTED size_t countBinders();
153     LIBBINDER_INTERNAL_EXPORTED void dump();
154 
155     /**
156      * Called when reading or writing data to a session fails to clean up
157      * data associated with the session in order to cleanup binders.
158      * Specifically, we have a strong dependency cycle, since BpBinder is
159      * OBJECT_LIFETIME_WEAK (so that onAttemptIncStrong may return true).
160      *
161      *     BpBinder -> RpcSession -> RpcState
162      *      ^-----------------------------/
163      *
164      * In the success case, eventually all refcounts should be propagated over
165      * the session, though this could also be called to eagerly cleanup
166      * the session.
167      *
168      * WARNING: RpcState is responsible for calling this when the session is
169      * no longer recoverable.
170      */
171     void clear();
172 
173 private:
174     void clear(RpcMutexUniqueLock nodeLock);
175     void dumpLocked();
176 
177     // Alternative to std::vector<uint8_t> that doesn't abort on allocation failure and caps
178     // large allocations to avoid being requested from allocating too much data.
179     struct CommandData {
180         explicit CommandData(size_t size);
validCommandData181         bool valid() { return mSize == 0 || mData != nullptr; }
sizeCommandData182         size_t size() { return mSize; }
dataCommandData183         uint8_t* data() { return mData.get(); }
releaseCommandData184         uint8_t* release() { return mData.release(); }
185 
186     private:
187         std::unique_ptr<uint8_t[]> mData;
188         size_t mSize;
189     };
190 
191     [[nodiscard]] status_t rpcSend(
192             const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
193             const char* what, iovec* iovs, int niovs,
194             const std::optional<binder::impl::SmallFunction<status_t()>>& altPoll,
195             const std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>>* ancillaryFds =
196                     nullptr);
197     [[nodiscard]] status_t rpcRec(const sp<RpcSession::RpcConnection>& connection,
198                                   const sp<RpcSession>& session, const char* what, iovec* iovs,
199                                   int niovs,
200                                   std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>>*
201                                           ancillaryFds = nullptr);
202 
203     [[nodiscard]] status_t waitForReply(const sp<RpcSession::RpcConnection>& connection,
204                                         const sp<RpcSession>& session, Parcel* reply);
205     [[nodiscard]] status_t processCommand(
206             const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
207             const RpcWireHeader& command, CommandType type,
208             std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>>&& ancillaryFds);
209     [[nodiscard]] status_t processTransact(
210             const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
211             const RpcWireHeader& command,
212             std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>>&& ancillaryFds);
213     [[nodiscard]] status_t processTransactInternal(
214             const sp<RpcSession::RpcConnection>& connection, const sp<RpcSession>& session,
215             CommandData transactionData,
216             std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>>&& ancillaryFds);
217     [[nodiscard]] status_t processDecStrong(const sp<RpcSession::RpcConnection>& connection,
218                                             const sp<RpcSession>& session,
219                                             const RpcWireHeader& command);
220 
221     // Whether `parcel` is compatible with `session`.
222     [[nodiscard]] static status_t validateParcel(const sp<RpcSession>& session,
223                                                  const Parcel& parcel, std::string* errorMsg);
224 
225     struct BinderNode {
226         // Two cases:
227         // A - local binder we are serving
228         // B - remote binder, we are sending transactions to
229         wp<IBinder> binder;
230 
231         // if timesSent > 0, this will be equal to binder.promote()
232         sp<IBinder> sentRef;
233 
234         // Number of times we've sent this binder out of process, which
235         // translates to an implicit strong count. A client must send RPC binder
236         // socket's dec ref for each time it is sent out of process in order to
237         // deallocate it. Note, a proxy binder we are holding onto might be
238         // sent (this is important when the only remaining refcount of this
239         // binder is the one associated with a transaction sending it back to
240         // its server)
241         size_t timesSent = 0;
242 
243         // Number of times we've received this binder, each time corresponds to
244         // a reference we hold over the wire (not a local incStrong/decStrong)
245         size_t timesRecd = 0;
246 
247         // transaction ID, for async transactions
248         uint64_t asyncNumber = 0;
249 
250         //
251         // CASE A - local binder we are serving
252         //
253 
254         // async transaction queue, _only_ for local binder
255         struct AsyncTodo {
256             sp<IBinder> ref;
257             CommandData data;
258             std::vector<std::variant<binder::unique_fd, binder::borrowed_fd>> ancillaryFds;
259             uint64_t asyncNumber = 0;
260 
261             bool operator<(const AsyncTodo& o) const {
262                 return asyncNumber > /* !!! */ o.asyncNumber;
263             }
264         };
265         std::priority_queue<AsyncTodo> asyncTodo;
266 
267         //
268         // CASE B - remote binder, we are sending transactions to
269         //
270 
271         // (no additional data specific to remote binders)
272 
273         std::string toString() const;
274     };
275 
276     // Checks if there is any reference left to a node and erases it. If this
277     // is the last node, shuts down the session.
278     //
279     // Node lock is passed here for convenience, so that we can release it
280     // and terminate the session, but we could leave it up to the caller
281     // by returning a continuation if we needed to erase multiple specific
282     // nodes. It may be tempting to allow the client to keep on holding the
283     // lock and instead just return whether or not we should shutdown, but
284     // this introduces the posssibility that another thread calls
285     // getRootBinder and thinks it is valid, rather than immediately getting
286     // an error.
287     sp<IBinder> tryEraseNode(const sp<RpcSession>& session, RpcMutexUniqueLock nodeLock,
288                              std::map<uint64_t, BinderNode>::iterator& it);
289 
290     // true - success
291     // false - session shutdown, halt
292     [[nodiscard]] bool nodeProgressAsyncNumber(BinderNode* node);
293 
294     RpcMutex mNodeMutex;
295     bool mTerminated = false;
296     uint32_t mNextId = 0;
297     // binders known by both sides of a session
298     std::map<uint64_t, BinderNode> mNodeForAddress;
299 };
300 
301 } // namespace android
302