1 /*
2 * Copyright (C) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "jdwp.h"
16 #include <sys/eventfd.h>
17 #include <thread>
18
19 namespace Hdc {
HdcJdwp(uv_loop_t * loopIn)20 HdcJdwp::HdcJdwp(uv_loop_t *loopIn)
21 {
22 listenPipe.data = this;
23 loop = loopIn;
24 refCount = 0;
25 stop = false;
26 awakenPollFd = -1;
27 uv_rwlock_init(&lockMapContext);
28 uv_rwlock_init(&lockJdwpTrack);
29 awakenPollFd = -1;
30 stop = false;
31 }
32
~HdcJdwp()33 HdcJdwp::~HdcJdwp()
34 {
35 Base::CloseFd(awakenPollFd);
36 uv_rwlock_destroy(&lockMapContext);
37 uv_rwlock_destroy(&lockJdwpTrack);
38 }
39
ReadyForRelease()40 bool HdcJdwp::ReadyForRelease()
41 {
42 return refCount == 0;
43 }
44
Stop()45 void HdcJdwp::Stop()
46 {
47 stop = true;
48 WakePollThread();
49 auto funcListenPipeClose = [](uv_handle_t *handle) -> void {
50 HdcJdwp *thisClass = (HdcJdwp *)handle->data;
51 --thisClass->refCount;
52 };
53 Base::TryCloseHandle((const uv_handle_t *)&listenPipe, funcListenPipeClose);
54 freeContextMutex.lock();
55 for (auto &&obj : mapCtxJdwp) {
56 HCtxJdwp v = obj.second;
57 FreeContext(v);
58 }
59 AdminContext(OP_CLEAR, 0, nullptr);
60 freeContextMutex.unlock();
61 }
62
MallocContext()63 void *HdcJdwp::MallocContext()
64 {
65 HCtxJdwp ctx = nullptr;
66 if ((ctx = new ContextJdwp()) == nullptr) {
67 return nullptr;
68 }
69 ctx->thisClass = this;
70 ctx->pipe.data = ctx;
71 ++refCount;
72 return ctx;
73 }
74
75 // Single thread, two parameters can be used
FreeContext(HCtxJdwp ctx)76 void HdcJdwp::FreeContext(HCtxJdwp ctx)
77 {
78 if (ctx->finish) {
79 return;
80 }
81 ctx->finish = true;
82 WRITE_LOG(LOG_INFO, "FreeContext for targetPID :%d", ctx->pid);
83 Base::TryCloseHandle((const uv_handle_t *)&ctx->pipe);
84 AdminContext(OP_REMOVE, ctx->pid, nullptr);
85 auto funcReqClose = [](uv_idle_t *handle) -> void {
86 HCtxJdwp ctx = (HCtxJdwp)handle->data;
87 --ctx->thisClass->refCount;
88 Base::TryCloseHandle((uv_handle_t *)handle, Base::CloseIdleCallback);
89 delete ctx;
90 };
91 Base::IdleUvTask(loop, ctx, funcReqClose);
92 }
93
RemoveFdFromPollList(uint32_t pid)94 void HdcJdwp::RemoveFdFromPollList(uint32_t pid)
95 {
96 for (auto &&pair : pollNodeMap) {
97 if (pair.second.ppid == pid) {
98 WRITE_LOG(LOG_INFO, "RemoveFdFromPollList for pid:%d.", pid);
99 pollNodeMap.erase(pair.second.pollfd.fd);
100 break;
101 }
102 }
103 }
104
ReadStream(uv_stream_t * pipe,ssize_t nread,const uv_buf_t * buf)105 void HdcJdwp::ReadStream(uv_stream_t *pipe, ssize_t nread, const uv_buf_t *buf)
106 {
107 bool ret = true;
108 if (nread == UV_ENOBUFS) { // It is definite enough, usually only 4 bytes
109 ret = false;
110 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream IOBuf max");
111 } else if (nread == 0) {
112 return;
113 #ifdef JS_JDWP_CONNECT
114 } else if (nread < JS_PKG_MIN_SIZE || nread > JS_PKG_MX_SIZE) { // valid Js package size
115 #else
116 } else if (nread < 0 || nread != 4) { // 4 : 4 bytes
117 #endif // JS_JDWP_CONNECT
118 ret = false;
119 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid package nread:%d.", nread);
120 }
121
122 HCtxJdwp ctxJdwp = static_cast<HCtxJdwp>(pipe->data);
123 HdcJdwp *thisClass = static_cast<HdcJdwp *>(ctxJdwp->thisClass);
124 if (ret) {
125 uint32_t pid = 0;
126 char *p = ctxJdwp->buf;
127 if (nread == sizeof(uint32_t)) { // Java: pid
128 pid = atoi(p);
129 } else { // JS:pid PkgName
130 #ifdef JS_JDWP_CONNECT
131 struct JsMsgHeader *jsMsg = reinterpret_cast<struct JsMsgHeader *>(p);
132 if (jsMsg->msgLen == nread) {
133 pid = jsMsg->pid;
134 string pkgName = string((char *)p + sizeof(JsMsgHeader), jsMsg->msgLen - sizeof(JsMsgHeader));
135 ctxJdwp->pkgName = pkgName;
136 } else {
137 ret = false;
138 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid js package size %d:%d.", jsMsg->msgLen, nread);
139 }
140 #endif // JS_JDWP_CONNECT
141 }
142 if (pid > 0) {
143 ctxJdwp->pid = pid;
144 #ifdef JS_JDWP_CONNECT
145 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d-pkg:%s", pid, ctxJdwp->pkgName.c_str());
146 #else
147 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d", pid);
148 #endif // JS_JDWP_CONNECT
149 thisClass->AdminContext(OP_ADD, pid, ctxJdwp);
150 ret = true;
151 int fd = -1;
152 if (uv_fileno(reinterpret_cast<uv_handle_t *>(&(ctxJdwp->pipe)), &fd) < 0) {
153 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream uv_fileno fail.");
154 } else {
155 thisClass->freeContextMutex.lock();
156 thisClass->pollNodeMap.emplace(fd, PollNode(fd, pid));
157 thisClass->freeContextMutex.unlock();
158 thisClass->WakePollThread();
159 }
160 }
161 }
162 Base::ZeroArray(ctxJdwp->buf);
163 if (!ret) {
164 WRITE_LOG(LOG_INFO, "ReadStream proc:%d err, free it.", ctxJdwp->pid);
165 thisClass->freeContextMutex.lock();
166 thisClass->FreeContext(ctxJdwp);
167 thisClass->freeContextMutex.unlock();
168 }
169 }
170
171 #ifdef JS_JDWP_CONNECT
GetProcessListExtendPkgName()172 string HdcJdwp::GetProcessListExtendPkgName()
173 {
174 string ret;
175 uv_rwlock_rdlock(&lockMapContext);
176 for (auto &&v : mapCtxJdwp) {
177 HCtxJdwp hj = v.second;
178 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
179 }
180 uv_rwlock_rdunlock(&lockMapContext);
181 return ret;
182 }
183 #endif // JS_JDWP_CONNECT
184
AcceptClient(uv_stream_t * server,int status)185 void HdcJdwp::AcceptClient(uv_stream_t *server, int status)
186 {
187 uv_pipe_t *listenPipe = (uv_pipe_t *)server;
188 HdcJdwp *thisClass = (HdcJdwp *)listenPipe->data;
189 HCtxJdwp ctxJdwp = (HCtxJdwp)thisClass->MallocContext();
190 if (!ctxJdwp) {
191 return;
192 }
193 uv_pipe_init(thisClass->loop, &ctxJdwp->pipe, 1);
194 if (uv_accept(server, (uv_stream_t *)&ctxJdwp->pipe) < 0) {
195 WRITE_LOG(LOG_DEBUG, "uv_accept failed");
196 thisClass->freeContextMutex.lock();
197 thisClass->FreeContext(ctxJdwp);
198 thisClass->freeContextMutex.unlock();
199 return;
200 }
201 auto funAlloc = [](uv_handle_t *handle, size_t sizeSuggested, uv_buf_t *buf) -> void {
202 HCtxJdwp ctxJdwp = (HCtxJdwp)handle->data;
203 buf->base = (char *)ctxJdwp->buf;
204 buf->len = sizeof(ctxJdwp->buf);
205 };
206 uv_read_start((uv_stream_t *)&ctxJdwp->pipe, funAlloc, ReadStream);
207 }
208
209 // Test bash connnet(UNIX-domain sockets):nc -U path/ohjpid-control < hexpid.file
210 // Test uv connect(pipe): 'uv_pipe_connect'
JdwpListen()211 bool HdcJdwp::JdwpListen()
212 {
213 #ifdef HDC_PCDEBUG
214 // if test, can be enabled
215 return true;
216 const char jdwpCtrlName[] = { 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
217 unlink(jdwpCtrlName);
218 #else
219 const char jdwpCtrlName[] = { '\0', 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
220 #endif
221 const int DEFAULT_BACKLOG = 4;
222 bool ret = false;
223 while (true) {
224 uv_pipe_init(loop, &listenPipe, 0);
225 listenPipe.data = this;
226 if (UvPipeBind(&listenPipe, jdwpCtrlName, sizeof(jdwpCtrlName))) {
227 WRITE_LOG(LOG_FATAL, "UvPipeBind failed");
228 return ret;
229 }
230 if (uv_listen((uv_stream_t *)&listenPipe, DEFAULT_BACKLOG, AcceptClient)) {
231 break;
232 }
233 ++refCount;
234 ret = true;
235 break;
236 }
237 // listenPipe close by stop
238 return ret;
239 }
240
UvPipeBind(uv_pipe_t * handle,const char * name,size_t size)241 int HdcJdwp::UvPipeBind(uv_pipe_t* handle, const char* name, size_t size)
242 {
243 char buffer[BUF_SIZE_DEFAULT] = { 0 };
244
245 if (handle->io_watcher.fd >= 0) {
246 WRITE_LOG(LOG_FATAL, "socket already bound %d", handle->io_watcher.fd);
247 return -1;
248 }
249
250 int type = SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC;
251 int sockfd = socket(AF_UNIX, type, 0);
252 if (sockfd < 0) {
253 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
254 WRITE_LOG(LOG_FATAL, "socket failed errno:%d %s", errno, buffer);
255 return -1;
256 }
257
258 #if defined(SO_NOSIGPIPE)
259 int on = 1;
260 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
261 #endif
262
263 struct sockaddr_un saddr;
264 Base::ZeroStruct(saddr);
265 size_t capacity = sizeof(saddr.sun_path);
266 size_t min = size < capacity ? size : capacity;
267 for (size_t i = 0; i < min; i++) {
268 saddr.sun_path[i] = name[i];
269 }
270 saddr.sun_path[capacity - 1] = '\0';
271 saddr.sun_family = AF_UNIX;
272 size_t saddrLen = sizeof(saddr.sun_family) + size - 1;
273 int err = bind(sockfd, reinterpret_cast<struct sockaddr*>(&saddr), saddrLen);
274 if (err != 0) {
275 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
276 WRITE_LOG(LOG_FATAL, "bind failed errno:%d %s", errno, buffer);
277 close(sockfd);
278 return -1;
279 }
280 constexpr uint32_t uvHandleBound = 0x00002000;
281 handle->flags |= uvHandleBound;
282 handle->io_watcher.fd = sockfd;
283 return 0;
284 }
285
286 // Working in the main thread, but will be accessed by each session thread, so we need to set thread lock
AdminContext(const uint8_t op,const uint32_t pid,HCtxJdwp ctxJdwp)287 void *HdcJdwp::AdminContext(const uint8_t op, const uint32_t pid, HCtxJdwp ctxJdwp)
288 {
289 HCtxJdwp hRet = nullptr;
290 switch (op) {
291 case OP_ADD: {
292 uv_rwlock_wrlock(&lockMapContext);
293 mapCtxJdwp[pid] = ctxJdwp;
294 uv_rwlock_wrunlock(&lockMapContext);
295 break;
296 }
297 case OP_REMOVE:
298 uv_rwlock_wrlock(&lockMapContext);
299 mapCtxJdwp.erase(pid);
300 RemoveFdFromPollList(pid);
301 uv_rwlock_wrunlock(&lockMapContext);
302 break;
303 case OP_QUERY: {
304 uv_rwlock_rdlock(&lockMapContext);
305 if (mapCtxJdwp.count(pid)) {
306 hRet = mapCtxJdwp[pid];
307 }
308 uv_rwlock_rdunlock(&lockMapContext);
309 break;
310 }
311 case OP_CLEAR: {
312 uv_rwlock_wrlock(&lockMapContext);
313 mapCtxJdwp.clear();
314 pollNodeMap.clear();
315 uv_rwlock_wrunlock(&lockMapContext);
316 break;
317 }
318 default:
319 break;
320 }
321 if (op == OP_ADD || op == OP_REMOVE || op == OP_CLEAR) {
322 uv_rwlock_wrlock(&lockJdwpTrack);
323 ProcessListUpdated();
324 uv_rwlock_wrunlock(&lockJdwpTrack);
325 }
326 return hRet;
327 }
328
329 // work on main thread
SendCallbackJdwpNewFD(uv_write_t * req,int status)330 void HdcJdwp::SendCallbackJdwpNewFD(uv_write_t *req, int status)
331 {
332 // It usually works successful, not notify session work
333 HCtxJdwp ctx = (HCtxJdwp)req->data;
334 if (status >= 0) {
335 WRITE_LOG(LOG_DEBUG, "SendCallbackJdwpNewFD successful %d, active jdwp forward", ctx->pid);
336 } else {
337 WRITE_LOG(LOG_WARN, "SendCallbackJdwpNewFD failed %d", ctx->pid);
338 }
339 // close my process's fd
340 Base::TryCloseHandle((const uv_handle_t *)&ctx->jvmTCP);
341 delete req;
342 --ctx->thisClass->refCount;
343 }
344
345 // Each session calls the interface through the main thread message queue, which cannot be called directly across
346 // threads
347 // work on main thread
SendJdwpNewFD(uint32_t targetPID,int fd)348 bool HdcJdwp::SendJdwpNewFD(uint32_t targetPID, int fd)
349 {
350 bool ret = false;
351 while (true) {
352 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
353 if (!ctx) {
354 break;
355 }
356 ctx->dummy = static_cast<uint8_t>('!');
357 if (uv_tcp_init(loop, &ctx->jvmTCP)) {
358 break;
359 }
360 if (uv_tcp_open(&ctx->jvmTCP, fd)) {
361 break;
362 }
363 // transfer fd to jvm
364 // clang-format off
365 if (Base::SendToStreamEx((uv_stream_t *)&ctx->pipe, (uint8_t *)&ctx->dummy, 1, (uv_stream_t *)&ctx->jvmTCP,
366 (void *)SendCallbackJdwpNewFD, (const void *)ctx) < 0) {
367 break;
368 }
369 // clang-format on
370 ++refCount;
371 ret = true;
372 WRITE_LOG(LOG_DEBUG, "SendJdwpNewFD successful targetPID:%d fd%d", targetPID, fd);
373 break;
374 }
375 return ret;
376 }
377
378 // cross thread call begin
CheckPIDExist(uint32_t targetPID)379 bool HdcJdwp::CheckPIDExist(uint32_t targetPID)
380 {
381 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
382 return ctx != nullptr;
383 }
384
GetProcessList()385 string HdcJdwp::GetProcessList()
386 {
387 string ret;
388 uv_rwlock_rdlock(&lockMapContext);
389 for (auto &&v : mapCtxJdwp) {
390 ret += std::to_string(v.first) + "\n";
391 }
392 uv_rwlock_rdunlock(&lockMapContext);
393 return ret;
394 }
395 // cross thread call finish
396
JdwpProcessListMsg(char * buffer,size_t bufferlen)397 size_t HdcJdwp::JdwpProcessListMsg(char *buffer, size_t bufferlen)
398 {
399 // Message is length-prefixed with 4 hex digits in ASCII.
400 static constexpr size_t headerLen = 5;
401 char head[headerLen + 2];
402 #ifdef JS_JDWP_CONNECT
403 string result = GetProcessListExtendPkgName();
404 #else
405 string result = GetProcessList();
406 #endif // JS_JDWP_CONNECT
407
408 size_t len = result.length();
409 if (bufferlen < (len + headerLen)) {
410 WRITE_LOG(LOG_WARN, "truncating JDWP process list (max len = %zu) ", bufferlen);
411 len = bufferlen;
412 }
413 if (snprintf_s(head, sizeof head, sizeof head - 1, "%04zx\n", len) < 0) {
414 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg head fail.");
415 return 0;
416 }
417 if (memcpy_s(buffer, bufferlen, head, headerLen) != EOK) {
418 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get head fail.");
419 return 0;
420 }
421 if (memcpy_s(buffer + headerLen, (bufferlen - headerLen), result.c_str(), len) != EOK) {
422 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get data fail.");
423 return 0;
424 }
425 return len + headerLen;
426 }
427
SendProcessList(HTaskInfo t,string data)428 void HdcJdwp::SendProcessList(HTaskInfo t, string data)
429 {
430 if (t == nullptr || data.size() == 0) {
431 WRITE_LOG(LOG_WARN, " SendProcessList, Nothing needs to be sent.");
432 return;
433 }
434 void *clsSession = t->ownerSessionClass;
435 HdcSessionBase *sessionBase = static_cast<HdcSessionBase *>(clsSession);
436 sessionBase->LogMsg(t->sessionId, t->channelId, MSG_OK, data.c_str());
437 }
438
ProcessListUpdated(HTaskInfo task)439 void HdcJdwp::ProcessListUpdated(HTaskInfo task)
440 {
441 if (jdwpTrackers.size() <= 0) {
442 WRITE_LOG(LOG_DEBUG, "None jdwpTrackers.");
443 return;
444 }
445 #ifdef JS_JDWP_CONNECT
446 static constexpr uint32_t jpidTrackListSize = 1024 * 4;
447 #else
448 static constexpr uint32_t jpidTrackListSize = 1024;
449 #endif // JS_JDWP_CONNECT
450 std::string data;
451 data.resize(jpidTrackListSize);
452 size_t len = JdwpProcessListMsg(&data[0], data.size());
453 if (len <= 0) {
454 return;
455 }
456 data.resize(len);
457 if (task != nullptr) {
458 SendProcessList(task, data);
459 } else {
460 for (auto &t : jdwpTrackers) {
461 if (t == nullptr) {
462 continue;
463 }
464 if (t->taskStop || t->taskFree || !t->taskClass) { // The channel for the track-jpid has been stopped.
465 jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), t), jdwpTrackers.end());
466 if (jdwpTrackers.size() <= 0) {
467 return;
468 }
469 } else {
470 SendProcessList(t, data);
471 }
472 }
473 }
474 }
475
CreateJdwpTracker(HTaskInfo taskInfo)476 bool HdcJdwp::CreateJdwpTracker(HTaskInfo taskInfo)
477 {
478 if (taskInfo == nullptr) {
479 return false;
480 }
481 uv_rwlock_wrlock(&lockJdwpTrack);
482 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
483 if (it == jdwpTrackers.end()) {
484 jdwpTrackers.push_back(taskInfo);
485 }
486 ProcessListUpdated(taskInfo);
487 uv_rwlock_wrunlock(&lockJdwpTrack);
488 return true;
489 }
490
RemoveJdwpTracker(HTaskInfo taskInfo)491 void HdcJdwp::RemoveJdwpTracker(HTaskInfo taskInfo)
492 {
493 if (taskInfo == nullptr) {
494 return;
495 }
496 uv_rwlock_wrlock(&lockJdwpTrack);
497 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
498 if (it != jdwpTrackers.end()) {
499 WRITE_LOG(LOG_DEBUG, "RemoveJdwpTracker channelId:%d, taskType:%d.", taskInfo->channelId, taskInfo->taskType);
500 jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *it), jdwpTrackers.end());
501 }
502 uv_rwlock_wrunlock(&lockJdwpTrack);
503 }
504
DrainAwakenPollThread() const505 void HdcJdwp::DrainAwakenPollThread() const
506 {
507 uint64_t value = 0;
508 ssize_t retVal = read(awakenPollFd, &value, sizeof(value));
509 if (retVal < 0) {
510 WRITE_LOG(LOG_FATAL, "DrainAwakenPollThread: Failed to read data from awaken pipe %d", retVal);
511 }
512 }
513
WakePollThread()514 void HdcJdwp::WakePollThread()
515 {
516 if (awakenPollFd < 0) {
517 WRITE_LOG(LOG_DEBUG, "awakenPollFd: MUST initialized before notifying");
518 return;
519 }
520 static const uint64_t increment = 1;
521 ssize_t retVal = write(awakenPollFd, &increment, sizeof(increment));
522 if (retVal < 0) {
523 WRITE_LOG(LOG_FATAL, "WakePollThread: Failed to write data into awaken pipe %d", retVal);
524 }
525 }
526
FdEventPollThread(void * args)527 void *HdcJdwp::FdEventPollThread(void *args)
528 {
529 auto thisClass = static_cast<HdcJdwp *>(args);
530 std::vector<struct pollfd> pollfds;
531 size_t size = 0;
532 while (!thisClass->stop) {
533 thisClass->freeContextMutex.lock();
534 if (size != thisClass->pollNodeMap.size() || thisClass->pollNodeMap.size() == 0) {
535 pollfds.clear();
536 struct pollfd pollFd;
537 for (const auto &pair : thisClass->pollNodeMap) {
538 pollFd.fd = pair.second.pollfd.fd;
539 pollFd.events = pair.second.pollfd.events;
540 pollFd.revents = pair.second.pollfd.revents;
541 pollfds.push_back(pollFd);
542 }
543 pollFd.fd = thisClass->awakenPollFd;
544 pollFd.events = POLLIN;
545 pollFd.revents = 0;
546 pollfds.push_back(pollFd);
547 size = pollfds.size();
548 }
549 thisClass->freeContextMutex.unlock();
550 poll(&pollfds[0], size, -1);
551 for (const auto &pollfdsing : pollfds) {
552 if (pollfdsing.revents & (POLLNVAL | POLLRDHUP | POLLHUP | POLLERR)) { // POLLNVAL:fd not open
553 thisClass->freeContextMutex.lock();
554 auto it = thisClass->pollNodeMap.find(pollfdsing.fd);
555 if (it != thisClass->pollNodeMap.end()) {
556 uint32_t targetPID = it->second.ppid;
557 HCtxJdwp ctx = static_cast<HCtxJdwp>(thisClass->AdminContext(OP_QUERY, targetPID, nullptr));
558 if (ctx != nullptr) {
559 thisClass->AdminContext(OP_REMOVE, targetPID, nullptr);
560 }
561 }
562 thisClass->freeContextMutex.unlock();
563 } else if (pollfdsing.revents & POLLIN) {
564 if (pollfdsing.fd == thisClass->awakenPollFd) {
565 thisClass->DrainAwakenPollThread();
566 }
567 }
568 }
569 }
570 return nullptr;
571 }
572
CreateFdEventPoll()573 int HdcJdwp::CreateFdEventPoll()
574 {
575 pthread_t tid;
576 Base::CloseFd(awakenPollFd);
577 awakenPollFd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
578 if (awakenPollFd < 0) {
579 WRITE_LOG(LOG_FATAL, "CreateFdEventPoll : Failed to create awakenPollFd");
580 return ERR_GENERIC;
581 }
582 int tret = pthread_create(&tid, nullptr, FdEventPollThread, this);
583 if (tret != 0) {
584 WRITE_LOG(LOG_INFO, "FdEventPollThread create fail.");
585 return tret;
586 }
587 return RET_SUCCESS;
588 }
589
590 // jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=8000
Initial()591 int HdcJdwp::Initial()
592 {
593 freeContextMutex.lock();
594 pollNodeMap.clear();
595 freeContextMutex.unlock();
596 if (!JdwpListen()) {
597 WRITE_LOG(LOG_FATAL, "JdwpListen failed");
598 return ERR_MODULE_JDWP_FAILED;
599 }
600 if (CreateFdEventPoll() < 0) {
601 return ERR_MODULE_JDWP_FAILED;
602 }
603 return RET_SUCCESS;
604 }
605 }
606