• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2020 The Android Open Source Project
3  *
4  * Licensed under the Apache License, Version 2.0 (the "License");
5  * you may not use this file except in compliance with the License.
6  * You may obtain a copy of the License at
7  *
8  *      http://www.apache.org/licenses/LICENSE-2.0
9  *
10  * Unless required by applicable law or agreed to in writing, software
11  * distributed under the License is distributed on an "AS IS" BASIS,
12  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13  * See the License for the specific language governing permissions and
14  * limitations under the License.
15  */
16 
17 #define LOG_TAG "RpcSession"
18 
19 #include <binder/RpcSession.h>
20 
21 #include <inttypes.h>
22 #include <unistd.h>
23 
24 #include <string_view>
25 
26 #include <binder/Parcel.h>
27 #include <binder/RpcServer.h>
28 #include <binder/Stability.h>
29 #include <utils/String8.h>
30 
31 #include "RpcSocketAddress.h"
32 #include "RpcState.h"
33 #include "RpcWireFormat.h"
34 
35 #ifdef __GLIBC__
36 extern "C" pid_t gettid();
37 #endif
38 
39 namespace android {
40 
41 using base::unique_fd;
42 
RpcSession()43 RpcSession::RpcSession() {
44     LOG_RPC_DETAIL("RpcSession created %p", this);
45 
46     mState = std::make_unique<RpcState>();
47 }
~RpcSession()48 RpcSession::~RpcSession() {
49     LOG_RPC_DETAIL("RpcSession destroyed %p", this);
50 
51     std::lock_guard<std::mutex> _l(mMutex);
52     LOG_ALWAYS_FATAL_IF(mServerConnections.size() != 0,
53                         "Should not be able to destroy a session with servers in use.");
54 }
55 
make()56 sp<RpcSession> RpcSession::make() {
57     return sp<RpcSession>::make();
58 }
59 
setupUnixDomainClient(const char * path)60 bool RpcSession::setupUnixDomainClient(const char* path) {
61     return setupSocketClient(UnixSocketAddress(path));
62 }
63 
setupVsockClient(unsigned int cid,unsigned int port)64 bool RpcSession::setupVsockClient(unsigned int cid, unsigned int port) {
65     return setupSocketClient(VsockSocketAddress(cid, port));
66 }
67 
setupInetClient(const char * addr,unsigned int port)68 bool RpcSession::setupInetClient(const char* addr, unsigned int port) {
69     auto aiStart = InetSocketAddress::getAddrInfo(addr, port);
70     if (aiStart == nullptr) return false;
71     for (auto ai = aiStart.get(); ai != nullptr; ai = ai->ai_next) {
72         InetSocketAddress socketAddress(ai->ai_addr, ai->ai_addrlen, addr, port);
73         if (setupSocketClient(socketAddress)) return true;
74     }
75     ALOGE("None of the socket address resolved for %s:%u can be added as inet client.", addr, port);
76     return false;
77 }
78 
addNullDebuggingClient()79 bool RpcSession::addNullDebuggingClient() {
80     unique_fd serverFd(TEMP_FAILURE_RETRY(open("/dev/null", O_WRONLY | O_CLOEXEC)));
81 
82     if (serverFd == -1) {
83         ALOGE("Could not connect to /dev/null: %s", strerror(errno));
84         return false;
85     }
86 
87     addClientConnection(std::move(serverFd));
88     return true;
89 }
90 
getRootObject()91 sp<IBinder> RpcSession::getRootObject() {
92     ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
93     return state()->getRootObject(connection.fd(), sp<RpcSession>::fromExisting(this));
94 }
95 
getRemoteMaxThreads(size_t * maxThreads)96 status_t RpcSession::getRemoteMaxThreads(size_t* maxThreads) {
97     ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
98     return state()->getMaxThreads(connection.fd(), sp<RpcSession>::fromExisting(this), maxThreads);
99 }
100 
transact(const RpcAddress & address,uint32_t code,const Parcel & data,Parcel * reply,uint32_t flags)101 status_t RpcSession::transact(const RpcAddress& address, uint32_t code, const Parcel& data,
102                               Parcel* reply, uint32_t flags) {
103     ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
104                                    (flags & IBinder::FLAG_ONEWAY) ? ConnectionUse::CLIENT_ASYNC
105                                                                   : ConnectionUse::CLIENT);
106     return state()->transact(connection.fd(), address, code, data,
107                              sp<RpcSession>::fromExisting(this), reply, flags);
108 }
109 
sendDecStrong(const RpcAddress & address)110 status_t RpcSession::sendDecStrong(const RpcAddress& address) {
111     ExclusiveConnection connection(sp<RpcSession>::fromExisting(this),
112                                    ConnectionUse::CLIENT_REFCOUNT);
113     return state()->sendDecStrong(connection.fd(), address);
114 }
115 
readId()116 status_t RpcSession::readId() {
117     {
118         std::lock_guard<std::mutex> _l(mMutex);
119         LOG_ALWAYS_FATAL_IF(mForServer != nullptr, "Can only update ID for client.");
120     }
121 
122     int32_t id;
123 
124     ExclusiveConnection connection(sp<RpcSession>::fromExisting(this), ConnectionUse::CLIENT);
125     status_t status =
126             state()->getSessionId(connection.fd(), sp<RpcSession>::fromExisting(this), &id);
127     if (status != OK) return status;
128 
129     LOG_RPC_DETAIL("RpcSession %p has id %d", this, id);
130     mId = id;
131     return OK;
132 }
133 
preJoin(std::thread thread)134 void RpcSession::preJoin(std::thread thread) {
135     LOG_ALWAYS_FATAL_IF(thread.get_id() != std::this_thread::get_id(), "Must own this thread");
136 
137     {
138         std::lock_guard<std::mutex> _l(mMutex);
139         mThreads[thread.get_id()] = std::move(thread);
140     }
141 }
142 
join(unique_fd client)143 void RpcSession::join(unique_fd client) {
144     // must be registered to allow arbitrary client code executing commands to
145     // be able to do nested calls (we can't only read from it)
146     sp<RpcConnection> connection = assignServerToThisThread(std::move(client));
147 
148     while (true) {
149         status_t error =
150                 state()->getAndExecuteCommand(connection->fd, sp<RpcSession>::fromExisting(this));
151 
152         if (error != OK) {
153             ALOGI("Binder connection thread closing w/ status %s", statusToString(error).c_str());
154             break;
155         }
156     }
157 
158     LOG_ALWAYS_FATAL_IF(!removeServerConnection(connection),
159                         "bad state: connection object guaranteed to be in list");
160 
161     {
162         std::lock_guard<std::mutex> _l(mMutex);
163         auto it = mThreads.find(std::this_thread::get_id());
164         LOG_ALWAYS_FATAL_IF(it == mThreads.end());
165         it->second.detach();
166         mThreads.erase(it);
167     }
168 }
169 
terminateLocked()170 void RpcSession::terminateLocked() {
171     // TODO(b/185167543):
172     // - kindly notify other side of the connection of termination (can't be
173     // locked)
174     // - prevent new client/servers from being added
175     // - stop all threads which are currently reading/writing
176     // - terminate RpcState?
177 
178     if (mTerminated) return;
179 
180     sp<RpcServer> server = mForServer.promote();
181     if (server) {
182         server->onSessionTerminating(sp<RpcSession>::fromExisting(this));
183     }
184 }
185 
server()186 wp<RpcServer> RpcSession::server() {
187     return mForServer;
188 }
189 
setupSocketClient(const RpcSocketAddress & addr)190 bool RpcSession::setupSocketClient(const RpcSocketAddress& addr) {
191     {
192         std::lock_guard<std::mutex> _l(mMutex);
193         LOG_ALWAYS_FATAL_IF(mClientConnections.size() != 0,
194                             "Must only setup session once, but already has %zu clients",
195                             mClientConnections.size());
196     }
197 
198     if (!setupOneSocketClient(addr, RPC_SESSION_ID_NEW)) return false;
199 
200     // TODO(b/185167543): we should add additional sessions dynamically
201     // instead of all at once.
202     // TODO(b/186470974): first risk of blocking
203     size_t numThreadsAvailable;
204     if (status_t status = getRemoteMaxThreads(&numThreadsAvailable); status != OK) {
205         ALOGE("Could not get max threads after initial session to %s: %s", addr.toString().c_str(),
206               statusToString(status).c_str());
207         return false;
208     }
209 
210     if (status_t status = readId(); status != OK) {
211         ALOGE("Could not get session id after initial session to %s; %s", addr.toString().c_str(),
212               statusToString(status).c_str());
213         return false;
214     }
215 
216     // we've already setup one client
217     for (size_t i = 0; i + 1 < numThreadsAvailable; i++) {
218         // TODO(b/185167543): shutdown existing connections?
219         if (!setupOneSocketClient(addr, mId.value())) return false;
220     }
221 
222     return true;
223 }
224 
setupOneSocketClient(const RpcSocketAddress & addr,int32_t id)225 bool RpcSession::setupOneSocketClient(const RpcSocketAddress& addr, int32_t id) {
226     for (size_t tries = 0; tries < 5; tries++) {
227         if (tries > 0) usleep(10000);
228 
229         unique_fd serverFd(
230                 TEMP_FAILURE_RETRY(socket(addr.addr()->sa_family, SOCK_STREAM | SOCK_CLOEXEC, 0)));
231         if (serverFd == -1) {
232             int savedErrno = errno;
233             ALOGE("Could not create socket at %s: %s", addr.toString().c_str(),
234                   strerror(savedErrno));
235             return false;
236         }
237 
238         if (0 != TEMP_FAILURE_RETRY(connect(serverFd.get(), addr.addr(), addr.addrSize()))) {
239             if (errno == ECONNRESET) {
240                 ALOGW("Connection reset on %s", addr.toString().c_str());
241                 continue;
242             }
243             int savedErrno = errno;
244             ALOGE("Could not connect socket at %s: %s", addr.toString().c_str(),
245                   strerror(savedErrno));
246             return false;
247         }
248 
249         if (sizeof(id) != TEMP_FAILURE_RETRY(write(serverFd.get(), &id, sizeof(id)))) {
250             int savedErrno = errno;
251             ALOGE("Could not write id to socket at %s: %s", addr.toString().c_str(),
252                   strerror(savedErrno));
253             return false;
254         }
255 
256         LOG_RPC_DETAIL("Socket at %s client with fd %d", addr.toString().c_str(), serverFd.get());
257 
258         addClientConnection(std::move(serverFd));
259         return true;
260     }
261 
262     ALOGE("Ran out of retries to connect to %s", addr.toString().c_str());
263     return false;
264 }
265 
addClientConnection(unique_fd fd)266 void RpcSession::addClientConnection(unique_fd fd) {
267     std::lock_guard<std::mutex> _l(mMutex);
268     sp<RpcConnection> session = sp<RpcConnection>::make();
269     session->fd = std::move(fd);
270     mClientConnections.push_back(session);
271 }
272 
setForServer(const wp<RpcServer> & server,int32_t sessionId)273 void RpcSession::setForServer(const wp<RpcServer>& server, int32_t sessionId) {
274     mId = sessionId;
275     mForServer = server;
276 }
277 
assignServerToThisThread(unique_fd fd)278 sp<RpcSession::RpcConnection> RpcSession::assignServerToThisThread(unique_fd fd) {
279     std::lock_guard<std::mutex> _l(mMutex);
280     sp<RpcConnection> session = sp<RpcConnection>::make();
281     session->fd = std::move(fd);
282     session->exclusiveTid = gettid();
283     mServerConnections.push_back(session);
284 
285     return session;
286 }
287 
removeServerConnection(const sp<RpcConnection> & connection)288 bool RpcSession::removeServerConnection(const sp<RpcConnection>& connection) {
289     std::lock_guard<std::mutex> _l(mMutex);
290     if (auto it = std::find(mServerConnections.begin(), mServerConnections.end(), connection);
291         it != mServerConnections.end()) {
292         mServerConnections.erase(it);
293         if (mServerConnections.size() == 0) {
294             terminateLocked();
295         }
296         return true;
297     }
298     return false;
299 }
300 
ExclusiveConnection(const sp<RpcSession> & session,ConnectionUse use)301 RpcSession::ExclusiveConnection::ExclusiveConnection(const sp<RpcSession>& session,
302                                                      ConnectionUse use)
303       : mSession(session) {
304     pid_t tid = gettid();
305     std::unique_lock<std::mutex> _l(mSession->mMutex);
306 
307     mSession->mWaitingThreads++;
308     while (true) {
309         sp<RpcConnection> exclusive;
310         sp<RpcConnection> available;
311 
312         // CHECK FOR DEDICATED CLIENT SOCKET
313         //
314         // A server/looper should always use a dedicated session if available
315         findConnection(tid, &exclusive, &available, mSession->mClientConnections,
316                        mSession->mClientConnectionsOffset);
317 
318         // WARNING: this assumes a server cannot request its client to send
319         // a transaction, as mServerConnections is excluded below.
320         //
321         // Imagine we have more than one thread in play, and a single thread
322         // sends a synchronous, then an asynchronous command. Imagine the
323         // asynchronous command is sent on the first client connection. Then, if
324         // we naively send a synchronous command to that same connection, the
325         // thread on the far side might be busy processing the asynchronous
326         // command. So, we move to considering the second available thread
327         // for subsequent calls.
328         if (use == ConnectionUse::CLIENT_ASYNC && (exclusive != nullptr || available != nullptr)) {
329             mSession->mClientConnectionsOffset =
330                     (mSession->mClientConnectionsOffset + 1) % mSession->mClientConnections.size();
331         }
332 
333         // USE SERVING SOCKET (for nested transaction)
334         //
335         // asynchronous calls cannot be nested
336         if (use != ConnectionUse::CLIENT_ASYNC) {
337             // server connections are always assigned to a thread
338             findConnection(tid, &exclusive, nullptr /*available*/, mSession->mServerConnections,
339                            0 /* index hint */);
340         }
341 
342         // if our thread is already using a session, prioritize using that
343         if (exclusive != nullptr) {
344             mConnection = exclusive;
345             mReentrant = true;
346             break;
347         } else if (available != nullptr) {
348             mConnection = available;
349             mConnection->exclusiveTid = tid;
350             break;
351         }
352 
353         // in regular binder, this would usually be a deadlock :)
354         LOG_ALWAYS_FATAL_IF(mSession->mClientConnections.size() == 0,
355                             "Not a client of any session. You must create a session to an "
356                             "RPC server to make any non-nested (e.g. oneway or on another thread) "
357                             "calls.");
358 
359         LOG_RPC_DETAIL("No available session (have %zu clients and %zu servers). Waiting...",
360                        mSession->mClientConnections.size(), mSession->mServerConnections.size());
361         mSession->mAvailableConnectionCv.wait(_l);
362     }
363     mSession->mWaitingThreads--;
364 }
365 
findConnection(pid_t tid,sp<RpcConnection> * exclusive,sp<RpcConnection> * available,std::vector<sp<RpcConnection>> & sockets,size_t socketsIndexHint)366 void RpcSession::ExclusiveConnection::findConnection(pid_t tid, sp<RpcConnection>* exclusive,
367                                                      sp<RpcConnection>* available,
368                                                      std::vector<sp<RpcConnection>>& sockets,
369                                                      size_t socketsIndexHint) {
370     LOG_ALWAYS_FATAL_IF(sockets.size() > 0 && socketsIndexHint >= sockets.size(),
371                         "Bad index %zu >= %zu", socketsIndexHint, sockets.size());
372 
373     if (*exclusive != nullptr) return; // consistent with break below
374 
375     for (size_t i = 0; i < sockets.size(); i++) {
376         sp<RpcConnection>& socket = sockets[(i + socketsIndexHint) % sockets.size()];
377 
378         // take first available session (intuition = caching)
379         if (available && *available == nullptr && socket->exclusiveTid == std::nullopt) {
380             *available = socket;
381             continue;
382         }
383 
384         // though, prefer to take session which is already inuse by this thread
385         // (nested transactions)
386         if (exclusive && socket->exclusiveTid == tid) {
387             *exclusive = socket;
388             break; // consistent with return above
389         }
390     }
391 }
392 
~ExclusiveConnection()393 RpcSession::ExclusiveConnection::~ExclusiveConnection() {
394     // reentrant use of a session means something less deep in the call stack
395     // is using this fd, and it retains the right to it. So, we don't give up
396     // exclusive ownership, and no thread is freed.
397     if (!mReentrant) {
398         std::unique_lock<std::mutex> _l(mSession->mMutex);
399         mConnection->exclusiveTid = std::nullopt;
400         if (mSession->mWaitingThreads > 0) {
401             _l.unlock();
402             mSession->mAvailableConnectionCv.notify_one();
403         }
404     }
405 }
406 
407 } // namespace android
408