1 /*
2 * Copyright (C) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "jdwp.h"
16 #include <sys/eventfd.h>
17 #include <thread>
18
19 namespace Hdc {
HdcJdwp(uv_loop_t * loopIn)20 HdcJdwp::HdcJdwp(uv_loop_t *loopIn)
21 {
22 listenPipe.data = this;
23 loop = loopIn;
24 refCount = 0;
25 uv_rwlock_init(&lockMapContext);
26 uv_rwlock_init(&lockJdwpTrack);
27 awakenPollFd = -1;
28 stop = false;
29 }
30
~HdcJdwp()31 HdcJdwp::~HdcJdwp()
32 {
33 Base::CloseFd(awakenPollFd);
34 uv_rwlock_destroy(&lockMapContext);
35 uv_rwlock_destroy(&lockJdwpTrack);
36 }
37
ReadyForRelease()38 bool HdcJdwp::ReadyForRelease()
39 {
40 return refCount == 0;
41 }
42
Stop()43 void HdcJdwp::Stop()
44 {
45 stop = true;
46 WakePollThread();
47 auto funcListenPipeClose = [](uv_handle_t *handle) -> void {
48 HdcJdwp *thisClass = (HdcJdwp *)handle->data;
49 --thisClass->refCount;
50 };
51 Base::TryCloseHandle((const uv_handle_t *)&listenPipe, funcListenPipeClose);
52 freeContextMutex.lock();
53 for (auto &&obj : mapCtxJdwp) {
54 HCtxJdwp v = obj.second;
55 FreeContext(v);
56 }
57 AdminContext(OP_CLEAR, 0, nullptr);
58 freeContextMutex.unlock();
59 }
60
MallocContext()61 void *HdcJdwp::MallocContext()
62 {
63 HCtxJdwp ctx = nullptr;
64 if ((ctx = new ContextJdwp()) == nullptr) {
65 return nullptr;
66 }
67 ctx->thisClass = this;
68 ctx->pipe.data = ctx;
69 ++refCount;
70 return ctx;
71 }
72
73 // Single thread, two parameters can be used
FreeContext(HCtxJdwp ctx)74 void HdcJdwp::FreeContext(HCtxJdwp ctx)
75 {
76 if (ctx->finish) {
77 return;
78 }
79 ctx->finish = true;
80 WRITE_LOG(LOG_INFO, "FreeContext for targetPID :%d", ctx->pid);
81 Base::TryCloseHandle((const uv_handle_t *)&ctx->pipe);
82 if (!stop) {
83 AdminContext(OP_REMOVE, ctx->pid, nullptr);
84 }
85 auto funcReqClose = [](uv_idle_t *handle) -> void {
86 HCtxJdwp ctx = (HCtxJdwp)handle->data;
87 --ctx->thisClass->refCount;
88 Base::TryCloseHandle((uv_handle_t *)handle, Base::CloseIdleCallback);
89 delete ctx;
90 };
91 Base::IdleUvTask(loop, ctx, funcReqClose);
92 }
93
RemoveFdFromPollList(uint32_t pid)94 void HdcJdwp::RemoveFdFromPollList(uint32_t pid)
95 {
96 for (auto &&pair : pollNodeMap) {
97 if (pair.second.ppid == pid) {
98 WRITE_LOG(LOG_INFO, "RemoveFdFromPollList for pid:%d.", pid);
99 pollNodeMap.erase(pair.second.pollfd.fd);
100 break;
101 }
102 }
103 }
104
ReadStream(uv_stream_t * pipe,ssize_t nread,const uv_buf_t * buf)105 void HdcJdwp::ReadStream(uv_stream_t *pipe, ssize_t nread, const uv_buf_t *buf)
106 {
107 bool ret = true;
108 if (nread == UV_ENOBUFS) { // It is definite enough, usually only 4 bytes
109 ret = false;
110 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream IOBuf max");
111 } else if (nread == 0) {
112 return;
113 #ifdef JS_JDWP_CONNECT
114 } else if (nread < JS_PKG_MIN_SIZE || nread > JS_PKG_MX_SIZE) { // valid Js package size
115 #else
116 } else if (nread < 0 || nread != 4) { // 4 : 4 bytes
117 #endif // JS_JDWP_CONNECT
118 ret = false;
119 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid package nread:%d.", nread);
120 }
121
122 HCtxJdwp ctxJdwp = static_cast<HCtxJdwp>(pipe->data);
123 HdcJdwp *thisClass = static_cast<HdcJdwp *>(ctxJdwp->thisClass);
124 if (ret) {
125 uint32_t pid = 0;
126 char *p = ctxJdwp->buf;
127 if (nread == sizeof(uint32_t)) { // Java: pid
128 pid = atoi(p);
129 } else { // JS:pid PkgName
130 #ifdef JS_JDWP_CONNECT
131 struct JsMsgHeader *jsMsg = reinterpret_cast<struct JsMsgHeader *>(p);
132 if (jsMsg->msgLen == nread) {
133 pid = jsMsg->pid;
134 string pkgName = string((char *)p + sizeof(JsMsgHeader), jsMsg->msgLen - sizeof(JsMsgHeader));
135 ctxJdwp->pkgName = pkgName;
136 } else {
137 ret = false;
138 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid js package size %d:%d.", jsMsg->msgLen, nread);
139 }
140 #endif // JS_JDWP_CONNECT
141 }
142 if (pid > 0) {
143 ctxJdwp->pid = pid;
144 #ifdef JS_JDWP_CONNECT
145 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d-pkg:%s", pid, ctxJdwp->pkgName.c_str());
146 #else
147 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d", pid);
148 #endif // JS_JDWP_CONNECT
149 thisClass->AdminContext(OP_ADD, pid, ctxJdwp);
150 ret = true;
151 int fd = -1;
152 if (uv_fileno(reinterpret_cast<uv_handle_t *>(&(ctxJdwp->pipe)), &fd) < 0) {
153 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream uv_fileno fail.");
154 } else {
155 thisClass->freeContextMutex.lock();
156 thisClass->pollNodeMap.emplace(fd, PollNode(fd, pid));
157 thisClass->freeContextMutex.unlock();
158 thisClass->WakePollThread();
159 }
160 }
161 }
162 Base::ZeroArray(ctxJdwp->buf);
163 if (!ret) {
164 WRITE_LOG(LOG_INFO, "ReadStream proc:%d err, free it.", ctxJdwp->pid);
165 thisClass->freeContextMutex.lock();
166 thisClass->FreeContext(ctxJdwp);
167 thisClass->freeContextMutex.unlock();
168 }
169 }
170
171 #ifdef JS_JDWP_CONNECT
GetProcessListExtendPkgName()172 string HdcJdwp::GetProcessListExtendPkgName()
173 {
174 string ret;
175 uv_rwlock_rdlock(&lockMapContext);
176 for (auto &&v : mapCtxJdwp) {
177 HCtxJdwp hj = v.second;
178 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
179 }
180 uv_rwlock_rdunlock(&lockMapContext);
181 return ret;
182 }
183 #endif // JS_JDWP_CONNECT
184
AcceptClient(uv_stream_t * server,int status)185 void HdcJdwp::AcceptClient(uv_stream_t *server, int status)
186 {
187 uv_pipe_t *listenPipe = (uv_pipe_t *)server;
188 HdcJdwp *thisClass = (HdcJdwp *)listenPipe->data;
189 HCtxJdwp ctxJdwp = (HCtxJdwp)thisClass->MallocContext();
190 if (!ctxJdwp) {
191 return;
192 }
193 uv_pipe_init(thisClass->loop, &ctxJdwp->pipe, 1);
194 if (uv_accept(server, (uv_stream_t *)&ctxJdwp->pipe) < 0) {
195 WRITE_LOG(LOG_DEBUG, "uv_accept failed");
196 thisClass->freeContextMutex.lock();
197 thisClass->FreeContext(ctxJdwp);
198 thisClass->freeContextMutex.unlock();
199 return;
200 }
201 auto funAlloc = [](uv_handle_t *handle, size_t sizeSuggested, uv_buf_t *buf) -> void {
202 HCtxJdwp ctxJdwp = (HCtxJdwp)handle->data;
203 buf->base = (char *)ctxJdwp->buf;
204 buf->len = sizeof(ctxJdwp->buf);
205 };
206 uv_read_start((uv_stream_t *)&ctxJdwp->pipe, funAlloc, ReadStream);
207 }
208
209 // Test bash connnet(UNIX-domain sockets):nc -U path/ohjpid-control < hexpid.file
210 // Test uv connect(pipe): 'uv_pipe_connect'
JdwpListen()211 bool HdcJdwp::JdwpListen()
212 {
213 #ifdef HDC_PCDEBUG
214 // if test, can be enabled
215 return true;
216 const char jdwpCtrlName[] = { 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
217 unlink(jdwpCtrlName);
218 #else
219 const char jdwpCtrlName[] = { '\0', 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
220 #endif
221 const int DEFAULT_BACKLOG = 4;
222 bool ret = false;
223 while (true) {
224 uv_pipe_init(loop, &listenPipe, 0);
225 listenPipe.data = this;
226 if (UvPipeBind(&listenPipe, jdwpCtrlName, sizeof(jdwpCtrlName))) {
227 WRITE_LOG(LOG_FATAL, "UvPipeBind failed");
228 return ret;
229 }
230 if (uv_listen((uv_stream_t *)&listenPipe, DEFAULT_BACKLOG, AcceptClient)) {
231 break;
232 }
233 ++refCount;
234 ret = true;
235 break;
236 }
237 // listenPipe close by stop
238 return ret;
239 }
240
UvPipeBind(uv_pipe_t * handle,const char * name,size_t size)241 int HdcJdwp::UvPipeBind(uv_pipe_t* handle, const char* name, size_t size)
242 {
243 char buffer[BUF_SIZE_DEFAULT] = { 0 };
244
245 if (handle->io_watcher.fd >= 0) {
246 WRITE_LOG(LOG_FATAL, "socket already bound %d", handle->io_watcher.fd);
247 return -1;
248 }
249
250 int type = SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC;
251 int sockfd = socket(AF_UNIX, type, 0);
252 if (sockfd < 0) {
253 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
254 WRITE_LOG(LOG_FATAL, "socket failed errno:%d %s", errno, buffer);
255 return -1;
256 }
257
258 #if defined(SO_NOSIGPIPE)
259 int on = 1;
260 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
261 #endif
262
263 struct sockaddr_un saddr;
264 Base::ZeroStruct(saddr);
265 size_t capacity = sizeof(saddr.sun_path);
266 size_t min = size < capacity ? size : capacity;
267 for (size_t i = 0; i < min; i++) {
268 saddr.sun_path[i] = name[i];
269 }
270 saddr.sun_path[capacity - 1] = '\0';
271 saddr.sun_family = AF_UNIX;
272 size_t saddrLen = sizeof(saddr.sun_family) + size - 1;
273 int err = bind(sockfd, reinterpret_cast<struct sockaddr*>(&saddr), saddrLen);
274 if (err != 0) {
275 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
276 WRITE_LOG(LOG_FATAL, "bind failed errno:%d %s", errno, buffer);
277 close(sockfd);
278 return -1;
279 }
280 constexpr uint32_t uvHandleBound = 0x00002000;
281 handle->flags |= uvHandleBound;
282 handle->io_watcher.fd = sockfd;
283 return 0;
284 }
285
286 // Working in the main thread, but will be accessed by each session thread, so we need to set thread lock
AdminContext(const uint8_t op,const uint32_t pid,HCtxJdwp ctxJdwp)287 void *HdcJdwp::AdminContext(const uint8_t op, const uint32_t pid, HCtxJdwp ctxJdwp)
288 {
289 HCtxJdwp hRet = nullptr;
290 switch (op) {
291 case OP_ADD: {
292 uv_rwlock_wrlock(&lockMapContext);
293 const int maxMapSize = 1024;
294 if (mapCtxJdwp.size() < maxMapSize) {
295 mapCtxJdwp[pid] = ctxJdwp;
296 }
297 uv_rwlock_wrunlock(&lockMapContext);
298 break;
299 }
300 case OP_REMOVE:
301 uv_rwlock_wrlock(&lockMapContext);
302 mapCtxJdwp.erase(pid);
303 RemoveFdFromPollList(pid);
304 uv_rwlock_wrunlock(&lockMapContext);
305 break;
306 case OP_QUERY: {
307 uv_rwlock_rdlock(&lockMapContext);
308 if (mapCtxJdwp.count(pid)) {
309 hRet = mapCtxJdwp[pid];
310 }
311 uv_rwlock_rdunlock(&lockMapContext);
312 break;
313 }
314 case OP_CLEAR: {
315 uv_rwlock_wrlock(&lockMapContext);
316 mapCtxJdwp.clear();
317 pollNodeMap.clear();
318 uv_rwlock_wrunlock(&lockMapContext);
319 break;
320 }
321 default:
322 break;
323 }
324 if (op == OP_ADD || op == OP_REMOVE || op == OP_CLEAR) {
325 uv_rwlock_wrlock(&lockJdwpTrack);
326 ProcessListUpdated();
327 uv_rwlock_wrunlock(&lockJdwpTrack);
328 }
329 return hRet;
330 }
331
332 // work on main thread
SendCallbackJdwpNewFD(uv_write_t * req,int status)333 void HdcJdwp::SendCallbackJdwpNewFD(uv_write_t *req, int status)
334 {
335 // It usually works successful, not notify session work
336 HCtxJdwp ctx = (HCtxJdwp)req->data;
337 if (status >= 0) {
338 WRITE_LOG(LOG_DEBUG, "SendCallbackJdwpNewFD successful %d, active jdwp forward", ctx->pid);
339 } else {
340 WRITE_LOG(LOG_WARN, "SendCallbackJdwpNewFD failed %d", ctx->pid);
341 }
342 // close my process's fd
343 Base::TryCloseHandle((const uv_handle_t *)&ctx->jvmTCP);
344 delete req;
345 --ctx->thisClass->refCount;
346 }
347
348 // Each session calls the interface through the main thread message queue, which cannot be called directly across
349 // threads
350 // work on main thread
SendJdwpNewFD(uint32_t targetPID,int fd)351 bool HdcJdwp::SendJdwpNewFD(uint32_t targetPID, int fd)
352 {
353 bool ret = false;
354 while (true) {
355 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
356 if (!ctx) {
357 break;
358 }
359 ctx->dummy = static_cast<uint8_t>('!');
360 if (uv_tcp_init(loop, &ctx->jvmTCP)) {
361 break;
362 }
363 if (uv_tcp_open(&ctx->jvmTCP, fd)) {
364 break;
365 }
366 // transfer fd to jvm
367 // clang-format off
368 if (Base::SendToStreamEx((uv_stream_t *)&ctx->pipe, (uint8_t *)&ctx->dummy, 1, (uv_stream_t *)&ctx->jvmTCP,
369 (void *)SendCallbackJdwpNewFD, (const void *)ctx) < 0) {
370 break;
371 }
372 // clang-format on
373 ++refCount;
374 ret = true;
375 WRITE_LOG(LOG_DEBUG, "SendJdwpNewFD successful targetPID:%d fd%d", targetPID, fd);
376 break;
377 }
378 return ret;
379 }
380
381 // cross thread call begin
CheckPIDExist(uint32_t targetPID)382 bool HdcJdwp::CheckPIDExist(uint32_t targetPID)
383 {
384 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
385 return ctx != nullptr;
386 }
387
GetProcessList()388 string HdcJdwp::GetProcessList()
389 {
390 string ret;
391 uv_rwlock_rdlock(&lockMapContext);
392 for (auto &&v : mapCtxJdwp) {
393 ret += std::to_string(v.first) + "\n";
394 }
395 uv_rwlock_rdunlock(&lockMapContext);
396 return ret;
397 }
398 // cross thread call finish
399
JdwpProcessListMsg(char * buffer,size_t bufferlen)400 size_t HdcJdwp::JdwpProcessListMsg(char *buffer, size_t bufferlen)
401 {
402 // Message is length-prefixed with 4 hex digits in ASCII.
403 static constexpr size_t headerLen = 5;
404 char head[headerLen + 2];
405 #ifdef JS_JDWP_CONNECT
406 string result = GetProcessListExtendPkgName();
407 #else
408 string result = GetProcessList();
409 #endif // JS_JDWP_CONNECT
410
411 size_t len = result.length();
412 if (bufferlen < (len + headerLen)) {
413 WRITE_LOG(LOG_WARN, "truncating JDWP process list (max len = %zu) ", bufferlen);
414 len = bufferlen;
415 }
416 if (snprintf_s(head, sizeof head, sizeof head - 1, "%04zx\n", len) < 0) {
417 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg head fail.");
418 return 0;
419 }
420 if (memcpy_s(buffer, bufferlen, head, headerLen) != EOK) {
421 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get head fail.");
422 return 0;
423 }
424 if (memcpy_s(buffer + headerLen, (bufferlen - headerLen), result.c_str(), len) != EOK) {
425 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get data fail.");
426 return 0;
427 }
428 return len + headerLen;
429 }
430
SendProcessList(HTaskInfo t,string data)431 void HdcJdwp::SendProcessList(HTaskInfo t, string data)
432 {
433 if (t == nullptr || data.size() == 0) {
434 WRITE_LOG(LOG_WARN, " SendProcessList, Nothing needs to be sent.");
435 return;
436 }
437 void *clsSession = t->ownerSessionClass;
438 HdcSessionBase *sessionBase = static_cast<HdcSessionBase *>(clsSession);
439 sessionBase->LogMsg(t->sessionId, t->channelId, MSG_OK, data.c_str());
440 WRITE_LOG(LOG_INFO, "SendProcessList channelId:%u data:%s", t->channelId, data.c_str());
441 }
442
ProcessListUpdated(HTaskInfo task)443 void HdcJdwp::ProcessListUpdated(HTaskInfo task)
444 {
445 if (jdwpTrackers.size() <= 0) {
446 WRITE_LOG(LOG_DEBUG, "None jdwpTrackers.");
447 return;
448 }
449 #ifdef JS_JDWP_CONNECT
450 static constexpr uint32_t jpidTrackListSize = 1024 * 4;
451 #else
452 static constexpr uint32_t jpidTrackListSize = 1024;
453 #endif // JS_JDWP_CONNECT
454 std::string data;
455 data.resize(jpidTrackListSize);
456 size_t len = JdwpProcessListMsg(&data[0], data.size());
457 if (len <= 0) {
458 return;
459 }
460 data.resize(len);
461 if (task != nullptr) {
462 SendProcessList(task, data);
463 return;
464 }
465 for (auto iter = jdwpTrackers.begin(); iter != jdwpTrackers.end();) {
466 if (*iter == nullptr) {
467 continue;
468 }
469 // The channel for the track-jpid has been stopped.
470 if ((*iter)->taskStop || (*iter)->taskFree || !(*iter)->taskClass) {
471 iter = jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *iter), jdwpTrackers.end());
472 if (jdwpTrackers.size() == 0) {
473 return;
474 }
475 } else {
476 SendProcessList(*iter, data);
477 iter++;
478 }
479 }
480 }
481
CreateJdwpTracker(HTaskInfo taskInfo)482 bool HdcJdwp::CreateJdwpTracker(HTaskInfo taskInfo)
483 {
484 if (taskInfo == nullptr) {
485 return false;
486 }
487 uv_rwlock_wrlock(&lockJdwpTrack);
488 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
489 if (it == jdwpTrackers.end()) {
490 jdwpTrackers.push_back(taskInfo);
491 }
492 ProcessListUpdated(taskInfo);
493 uv_rwlock_wrunlock(&lockJdwpTrack);
494 return true;
495 }
496
RemoveJdwpTracker(HTaskInfo taskInfo)497 void HdcJdwp::RemoveJdwpTracker(HTaskInfo taskInfo)
498 {
499 if (taskInfo == nullptr) {
500 return;
501 }
502 uv_rwlock_wrlock(&lockJdwpTrack);
503 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
504 if (it != jdwpTrackers.end()) {
505 WRITE_LOG(LOG_DEBUG, "RemoveJdwpTracker channelId:%d, taskType:%d.", taskInfo->channelId, taskInfo->taskType);
506 jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *it), jdwpTrackers.end());
507 }
508 uv_rwlock_wrunlock(&lockJdwpTrack);
509 }
510
DrainAwakenPollThread() const511 void HdcJdwp::DrainAwakenPollThread() const
512 {
513 uint64_t value = 0;
514 ssize_t retVal = read(awakenPollFd, &value, sizeof(value));
515 if (retVal < 0) {
516 WRITE_LOG(LOG_FATAL, "DrainAwakenPollThread: Failed to read data from awaken pipe %d", retVal);
517 }
518 }
519
WakePollThread()520 void HdcJdwp::WakePollThread()
521 {
522 if (awakenPollFd < 0) {
523 WRITE_LOG(LOG_DEBUG, "awakenPollFd: MUST initialized before notifying");
524 return;
525 }
526 static const uint64_t increment = 1;
527 ssize_t retVal = write(awakenPollFd, &increment, sizeof(increment));
528 if (retVal < 0) {
529 WRITE_LOG(LOG_FATAL, "WakePollThread: Failed to write data into awaken pipe %d", retVal);
530 }
531 }
532
FdEventPollThread(void * args)533 void *HdcJdwp::FdEventPollThread(void *args)
534 {
535 auto thisClass = static_cast<HdcJdwp *>(args);
536 std::vector<struct pollfd> pollfds;
537 size_t size = 0;
538 while (!thisClass->stop) {
539 thisClass->freeContextMutex.lock();
540 if (size != thisClass->pollNodeMap.size() || thisClass->pollNodeMap.size() == 0) {
541 pollfds.clear();
542 struct pollfd pollFd;
543 for (const auto &pair : thisClass->pollNodeMap) {
544 pollFd.fd = pair.second.pollfd.fd;
545 pollFd.events = pair.second.pollfd.events;
546 pollFd.revents = pair.second.pollfd.revents;
547 pollfds.push_back(pollFd);
548 }
549 pollFd.fd = thisClass->awakenPollFd;
550 pollFd.events = POLLIN;
551 pollFd.revents = 0;
552 pollfds.push_back(pollFd);
553 size = pollfds.size();
554 }
555 thisClass->freeContextMutex.unlock();
556 poll(&pollfds[0], size, -1);
557 for (const auto &pollfdsing : pollfds) {
558 if (pollfdsing.revents & (POLLNVAL | POLLRDHUP | POLLHUP | POLLERR)) { // POLLNVAL:fd not open
559 thisClass->freeContextMutex.lock();
560 auto it = thisClass->pollNodeMap.find(pollfdsing.fd);
561 if (it != thisClass->pollNodeMap.end()) {
562 uint32_t targetPID = it->second.ppid;
563 HCtxJdwp ctx = static_cast<HCtxJdwp>(thisClass->AdminContext(OP_QUERY, targetPID, nullptr));
564 if (ctx != nullptr) {
565 thisClass->AdminContext(OP_REMOVE, targetPID, nullptr);
566 }
567 }
568 thisClass->freeContextMutex.unlock();
569 } else if (pollfdsing.revents & POLLIN) {
570 if (pollfdsing.fd == thisClass->awakenPollFd) {
571 thisClass->DrainAwakenPollThread();
572 }
573 }
574 }
575 }
576 return nullptr;
577 }
578
CreateFdEventPoll()579 int HdcJdwp::CreateFdEventPoll()
580 {
581 pthread_t tid;
582 Base::CloseFd(awakenPollFd);
583 awakenPollFd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
584 if (awakenPollFd < 0) {
585 WRITE_LOG(LOG_FATAL, "CreateFdEventPoll : Failed to create awakenPollFd");
586 return ERR_GENERIC;
587 }
588 int tret = pthread_create(&tid, nullptr, FdEventPollThread, this);
589 if (tret != 0) {
590 WRITE_LOG(LOG_INFO, "FdEventPollThread create fail.");
591 return tret;
592 }
593 return RET_SUCCESS;
594 }
595
596 // jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=8000
Initial()597 int HdcJdwp::Initial()
598 {
599 freeContextMutex.lock();
600 pollNodeMap.clear();
601 freeContextMutex.unlock();
602 if (!JdwpListen()) {
603 WRITE_LOG(LOG_FATAL, "JdwpListen failed");
604 return ERR_MODULE_JDWP_FAILED;
605 }
606 if (CreateFdEventPoll() < 0) {
607 return ERR_MODULE_JDWP_FAILED;
608 }
609 return RET_SUCCESS;
610 }
611 }
612