1 /*
2 * Copyright (C) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "jdwp.h"
16 #include <sys/eventfd.h>
17 #include <thread>
18
19 namespace Hdc {
HdcJdwp(uv_loop_t * loopIn)20 HdcJdwp::HdcJdwp(uv_loop_t *loopIn)
21 {
22 listenPipe.data = this;
23 loop = loopIn;
24 refCount = 0;
25 uv_rwlock_init(&lockMapContext);
26 uv_rwlock_init(&lockJdwpTrack);
27 awakenPollFd = -1;
28 stop = false;
29 }
30
~HdcJdwp()31 HdcJdwp::~HdcJdwp()
32 {
33 Base::CloseFd(awakenPollFd);
34 uv_rwlock_destroy(&lockMapContext);
35 uv_rwlock_destroy(&lockJdwpTrack);
36 }
37
ReadyForRelease()38 bool HdcJdwp::ReadyForRelease()
39 {
40 return refCount == 0;
41 }
42
Stop()43 void HdcJdwp::Stop()
44 {
45 stop = true;
46 WakePollThread();
47 auto funcListenPipeClose = [](uv_handle_t *handle) -> void {
48 HdcJdwp *thisClass = (HdcJdwp *)handle->data;
49 --thisClass->refCount;
50 };
51 Base::TryCloseHandle((const uv_handle_t *)&listenPipe, funcListenPipeClose);
52 freeContextMutex.lock();
53 for (auto &&obj : mapCtxJdwp) {
54 HCtxJdwp v = obj.second;
55 FreeContext(v);
56 }
57 AdminContext(OP_CLEAR, 0, nullptr);
58 freeContextMutex.unlock();
59 }
60
MallocContext()61 void *HdcJdwp::MallocContext()
62 {
63 HCtxJdwp ctx = nullptr;
64 if ((ctx = new ContextJdwp()) == nullptr) {
65 return nullptr;
66 }
67 ctx->isDebug = 0;
68 ctx->thisClass = this;
69 ctx->pipe.data = ctx;
70 ++refCount;
71 return ctx;
72 }
73
74 // Single thread, two parameters can be used
FreeContext(HCtxJdwp ctx)75 void HdcJdwp::FreeContext(HCtxJdwp ctx)
76 {
77 if (ctx->finish) {
78 return;
79 }
80 ctx->finish = true;
81 WRITE_LOG(LOG_INFO, "FreeContext for targetPID :%d", ctx->pid);
82 Base::TryCloseHandle((const uv_handle_t *)&ctx->pipe);
83 if (!stop) {
84 AdminContext(OP_REMOVE, ctx->pid, nullptr);
85 }
86 auto funcReqClose = [](uv_idle_t *handle) -> void {
87 HCtxJdwp ctxIn = (HCtxJdwp)handle->data;
88 --ctxIn->thisClass->refCount;
89 Base::TryCloseHandle((uv_handle_t *)handle, Base::CloseIdleCallback);
90 #ifndef HDC_EMULATOR
91 if (ctxIn != nullptr) {
92 delete ctxIn;
93 ctxIn = nullptr;
94 }
95 #endif
96 };
97 Base::IdleUvTask(loop, ctx, funcReqClose);
98 }
99
RemoveFdFromPollList(uint32_t pid)100 void HdcJdwp::RemoveFdFromPollList(uint32_t pid)
101 {
102 for (auto &&pair : pollNodeMap) {
103 if (pair.second.ppid == pid) {
104 WRITE_LOG(LOG_INFO, "RemoveFdFromPollList for pid:%d.", pid);
105 pollNodeMap.erase(pair.second.pollfd.fd);
106 break;
107 }
108 }
109 }
110
ReadStream(uv_stream_t * pipe,ssize_t nread,const uv_buf_t * buf)111 void HdcJdwp::ReadStream(uv_stream_t *pipe, ssize_t nread, const uv_buf_t *buf)
112 {
113 bool ret = true;
114 if (nread == UV_ENOBUFS) { // It is definite enough, usually only 4 bytes
115 ret = false;
116 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream IOBuf max");
117 } else if (nread == 0) {
118 return;
119 #ifdef JS_JDWP_CONNECT
120 } else if (nread < signed(JS_PKG_MIN_SIZE + sizeof(JsMsgHeader)) ||
121 nread > signed(JS_PKG_MAX_SIZE + sizeof(JsMsgHeader))) {
122 #else
123 } else if (nread < 0 || nread != 4) { // 4 : 4 bytes
124 #endif // JS_JDWP_CONNECT
125 ret = false;
126 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid package nread:%d.", nread);
127 }
128
129 HCtxJdwp ctxJdwp = static_cast<HCtxJdwp>(pipe->data);
130 HdcJdwp *thisClass = static_cast<HdcJdwp *>(ctxJdwp->thisClass);
131 if (ret) {
132 uint32_t pid = 0;
133 char *p = ctxJdwp->buf;
134 if (nread == sizeof(uint32_t)) { // Java: pid
135 pid = atoi(p);
136 } else { // JS:pid PkgName
137 #ifdef JS_JDWP_CONNECT
138 // pid isDebug pkgName/processName
139 struct JsMsgHeader *jsMsg = reinterpret_cast<struct JsMsgHeader *>(p);
140 if (jsMsg->msgLen == nread) {
141 pid = jsMsg->pid;
142 string pkgName = string((char *)p + sizeof(JsMsgHeader), jsMsg->msgLen - sizeof(JsMsgHeader));
143 ctxJdwp->pkgName = pkgName;
144 ctxJdwp->isDebug = jsMsg->isDebug;
145 } else {
146 ret = false;
147 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid js package size %d:%d.", jsMsg->msgLen, nread);
148 }
149 #endif // JS_JDWP_CONNECT
150 }
151 if (pid > 0) {
152 ctxJdwp->pid = pid;
153 #ifdef JS_JDWP_CONNECT
154 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d-pkg:%s isDebug:%d",
155 pid, ctxJdwp->pkgName.c_str(), ctxJdwp->isDebug);
156 #else
157 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d", pid);
158 #endif // JS_JDWP_CONNECT
159 thisClass->AdminContext(OP_ADD, pid, ctxJdwp);
160 ret = true;
161 int fd = -1;
162 if (uv_fileno(reinterpret_cast<uv_handle_t *>(&(ctxJdwp->pipe)), &fd) < 0) {
163 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream uv_fileno fail.");
164 } else {
165 thisClass->freeContextMutex.lock();
166 thisClass->pollNodeMap.emplace(fd, PollNode(fd, pid));
167 thisClass->freeContextMutex.unlock();
168 thisClass->WakePollThread();
169 }
170 }
171 }
172 Base::ZeroArray(ctxJdwp->buf);
173 if (!ret) {
174 WRITE_LOG(LOG_INFO, "ReadStream proc:%d err, free it.", ctxJdwp->pid);
175 thisClass->freeContextMutex.lock();
176 thisClass->FreeContext(ctxJdwp);
177 thisClass->freeContextMutex.unlock();
178 }
179 }
180
181 #ifdef JS_JDWP_CONNECT
GetProcessListExtendPkgName(uint8_t dr)182 string HdcJdwp::GetProcessListExtendPkgName(uint8_t dr)
183 {
184 string ret;
185 uv_rwlock_rdlock(&lockMapContext);
186 for (auto &&v : mapCtxJdwp) {
187 HCtxJdwp hj = v.second;
188 if (dr == 0) {
189 // allApp
190 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
191 } else if (dr == 1) {
192 // debugApp
193 if (hj->isDebug) {
194 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
195 }
196 } else {
197 // releaseApp
198 if (!hj->isDebug) {
199 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
200 }
201 }
202 }
203 uv_rwlock_rdunlock(&lockMapContext);
204 return ret;
205 }
206 #endif // JS_JDWP_CONNECT
207
AcceptClient(uv_stream_t * server,int status)208 void HdcJdwp::AcceptClient(uv_stream_t *server, int status)
209 {
210 uv_pipe_t *listenPipe = (uv_pipe_t *)server;
211 HdcJdwp *thisClass = (HdcJdwp *)listenPipe->data;
212 HCtxJdwp ctxJdwp = (HCtxJdwp)thisClass->MallocContext();
213 if (!ctxJdwp) {
214 return;
215 }
216 uv_pipe_init(thisClass->loop, &ctxJdwp->pipe, 1);
217 if (uv_accept(server, (uv_stream_t *)&ctxJdwp->pipe) < 0) {
218 WRITE_LOG(LOG_DEBUG, "uv_accept failed");
219 thisClass->freeContextMutex.lock();
220 thisClass->FreeContext(ctxJdwp);
221 thisClass->freeContextMutex.unlock();
222 return;
223 }
224 auto funAlloc = [](uv_handle_t *handle, size_t sizeSuggested, uv_buf_t *buf) -> void {
225 HCtxJdwp ctxJdwp = (HCtxJdwp)handle->data;
226 buf->base = (char *)ctxJdwp->buf;
227 buf->len = sizeof(ctxJdwp->buf);
228 };
229 uv_read_start((uv_stream_t *)&ctxJdwp->pipe, funAlloc, ReadStream);
230 }
231
232 // Test bash connnet(UNIX-domain sockets):nc -U path/ohjpid-control < hexpid.file
233 // Test uv connect(pipe): 'uv_pipe_connect'
JdwpListen()234 bool HdcJdwp::JdwpListen()
235 {
236 #ifdef HDC_PCDEBUG
237 // if test, can be enabled
238 return true;
239 const char jdwpCtrlName[] = { 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
240 unlink(jdwpCtrlName);
241 #else
242 const char jdwpCtrlName[] = { '\0', 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
243 #endif
244 const int DEFAULT_BACKLOG = 4;
245 bool ret = false;
246 while (true) {
247 uv_pipe_init(loop, &listenPipe, 0);
248 listenPipe.data = this;
249 if (UvPipeBind(&listenPipe, jdwpCtrlName, sizeof(jdwpCtrlName))) {
250 WRITE_LOG(LOG_FATAL, "UvPipeBind failed");
251 return ret;
252 }
253 if (uv_listen((uv_stream_t *)&listenPipe, DEFAULT_BACKLOG, AcceptClient)) {
254 WRITE_LOG(LOG_FATAL, "uv_listen failed");
255 break;
256 }
257 ++refCount;
258 ret = true;
259 break;
260 }
261 // listenPipe close by stop
262 return ret;
263 }
264
UvPipeBind(uv_pipe_t * handle,const char * name,size_t size)265 int HdcJdwp::UvPipeBind(uv_pipe_t* handle, const char* name, size_t size)
266 {
267 char buffer[BUF_SIZE_DEFAULT] = { 0 };
268
269 if (handle->io_watcher.fd >= 0) {
270 WRITE_LOG(LOG_FATAL, "socket already bound %d", handle->io_watcher.fd);
271 return -1;
272 }
273
274 int type = SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC;
275 int sockfd = socket(AF_UNIX, type, 0);
276 if (sockfd < 0) {
277 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
278 WRITE_LOG(LOG_FATAL, "socket failed errno:%d %s", errno, buffer);
279 return -1;
280 }
281
282 #if defined(SO_NOSIGPIPE)
283 int on = 1;
284 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
285 #endif
286
287 struct sockaddr_un saddr;
288 Base::ZeroStruct(saddr);
289 size_t capacity = sizeof(saddr.sun_path);
290 size_t min = size < capacity ? size : capacity;
291 for (size_t i = 0; i < min; i++) {
292 saddr.sun_path[i] = name[i];
293 }
294 saddr.sun_path[capacity - 1] = '\0';
295 saddr.sun_family = AF_UNIX;
296 size_t saddrLen = sizeof(saddr.sun_family) + size - 1;
297 int err = bind(sockfd, reinterpret_cast<struct sockaddr*>(&saddr), saddrLen);
298 if (err != 0) {
299 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
300 WRITE_LOG(LOG_FATAL, "bind failed errno:%d %s", errno, buffer);
301 close(sockfd);
302 return -1;
303 }
304 constexpr uint32_t uvHandleBound = 0x00002000;
305 handle->flags |= uvHandleBound;
306 handle->io_watcher.fd = sockfd;
307 return 0;
308 }
309
310 // Working in the main thread, but will be accessed by each session thread, so we need to set thread lock
AdminContext(const uint8_t op,const uint32_t pid,HCtxJdwp ctxJdwp)311 void *HdcJdwp::AdminContext(const uint8_t op, const uint32_t pid, HCtxJdwp ctxJdwp)
312 {
313 HCtxJdwp hRet = nullptr;
314 switch (op) {
315 case OP_ADD: {
316 uv_rwlock_wrlock(&lockMapContext);
317 const int maxMapSize = 1024;
318 if (mapCtxJdwp.size() < maxMapSize) {
319 mapCtxJdwp[pid] = ctxJdwp;
320 }
321 uv_rwlock_wrunlock(&lockMapContext);
322 break;
323 }
324 case OP_REMOVE:
325 uv_rwlock_wrlock(&lockMapContext);
326 mapCtxJdwp.erase(pid);
327 RemoveFdFromPollList(pid);
328 uv_rwlock_wrunlock(&lockMapContext);
329 break;
330 case OP_QUERY: {
331 uv_rwlock_rdlock(&lockMapContext);
332 if (mapCtxJdwp.count(pid)) {
333 hRet = mapCtxJdwp[pid];
334 }
335 uv_rwlock_rdunlock(&lockMapContext);
336 break;
337 }
338 case OP_CLEAR: {
339 uv_rwlock_wrlock(&lockMapContext);
340 mapCtxJdwp.clear();
341 pollNodeMap.clear();
342 uv_rwlock_wrunlock(&lockMapContext);
343 break;
344 }
345 default:
346 break;
347 }
348 if (op == OP_ADD || op == OP_REMOVE || op == OP_CLEAR) {
349 uv_rwlock_wrlock(&lockJdwpTrack);
350 ProcessListUpdated();
351 uv_rwlock_wrunlock(&lockJdwpTrack);
352 }
353 return hRet;
354 }
355
356 // work on main thread
SendCallbackJdwpNewFD(uv_write_t * req,int status)357 void HdcJdwp::SendCallbackJdwpNewFD(uv_write_t *req, int status)
358 {
359 // It usually works successful, not notify session work
360 HCtxJdwp ctx = (HCtxJdwp)req->data;
361 if (status >= 0) {
362 WRITE_LOG(LOG_DEBUG, "SendCallbackJdwpNewFD successful %d, active jdwp forward", ctx->pid);
363 } else {
364 WRITE_LOG(LOG_WARN, "SendCallbackJdwpNewFD failed %d", ctx->pid);
365 }
366 delete req;
367 }
368
369 // Each session calls the interface through the main thread message queue, which cannot be called directly across
370 // threads
371 // work on main thread
SendJdwpNewFD(uint32_t targetPID,int fd)372 bool HdcJdwp::SendJdwpNewFD(uint32_t targetPID, int fd)
373 {
374 bool ret = false;
375 while (true) {
376 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
377 if (!ctx) {
378 break;
379 }
380 ctx->dummy = static_cast<uint8_t>('!');
381 if (uv_tcp_init(loop, &ctx->jvmTCP)) {
382 break;
383 }
384 if (uv_tcp_open(&ctx->jvmTCP, fd)) {
385 break;
386 }
387 // transfer fd to jvm
388 // clang-format off
389 if (Base::SendToStreamEx((uv_stream_t *)&ctx->pipe, (uint8_t *)&ctx->dummy, 1, (uv_stream_t *)&ctx->jvmTCP,
390 (void *)SendCallbackJdwpNewFD, (const void *)ctx) < 0) {
391 break;
392 }
393 // clang-format on
394 ++refCount;
395 ret = true;
396 WRITE_LOG(LOG_DEBUG, "SendJdwpNewFD successful targetPID:%d fd%d", targetPID, fd);
397 break;
398 }
399 return ret;
400 }
401
SendArkNewFD(const std::string str,int fd)402 bool HdcJdwp::SendArkNewFD(const std::string str, int fd)
403 {
404 bool ret = false;
405 while (true) {
406 // str(ark:pid@tid@Debugger)
407 size_t pos = str.find_first_of(':');
408 std::string right = str.substr(pos + 1);
409 pos = right.find_first_of("@");
410 std::string pidstr = right.substr(0, pos);
411 uint32_t pid = static_cast<uint32_t>(std::atoi(pidstr.c_str()));
412 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, pid, nullptr);
413 if (!ctx) {
414 WRITE_LOG(LOG_FATAL, "SendArkNewFD query pid:%u failed", pid);
415 break;
416 }
417 uint32_t size = sizeof(int32_t) + str.size();
418 // fd | str(ark:pid@tid@Debugger)
419 uint8_t buf[size];
420 if (memcpy_s(buf, sizeof(int32_t), &fd, sizeof(int32_t)) != EOK) {
421 WRITE_LOG(LOG_WARN, "From fd Create buf failed, fd:%d", fd);
422 return false;
423 }
424 if (memcpy_s(buf + sizeof(int32_t), str.size(), str.c_str(), str.size()) != EOK) {
425 WRITE_LOG(LOG_WARN, "SendArkNewFD failed fd:%d str:%s", fd, str.c_str());
426 return false;
427 }
428 uv_stream_t *stream = (uv_stream_t *)&ctx->pipe;
429 SendFdToApp(stream->io_watcher.fd, buf, size, fd);
430 ret = true;
431 WRITE_LOG(LOG_DEBUG, "SendArkNewFD successful str:%s fd%d", str.c_str(), fd);
432 Base::CloseFd(fd);
433 break;
434 }
435 return ret;
436 }
437
SendFdToApp(int sockfd,uint8_t * buf,int size,int fd)438 bool HdcJdwp::SendFdToApp(int sockfd, uint8_t *buf, int size, int fd)
439 {
440 struct iovec iov;
441 iov.iov_base = buf;
442 iov.iov_len = static_cast<unsigned int>(size);
443 struct msghdr msg;
444 msg.msg_name = nullptr;
445 msg.msg_namelen = 0;
446 msg.msg_iov = &iov;
447 msg.msg_iovlen = 1;
448
449 int len = CMSG_SPACE(static_cast<unsigned int>(sizeof(fd)));
450 char ctlBuf[len];
451 msg.msg_control = ctlBuf;
452 msg.msg_controllen = sizeof(ctlBuf);
453
454 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
455 if (cmsg == nullptr) {
456 WRITE_LOG(LOG_FATAL, "SendFdToApp cmsg is nullptr");
457 return false;
458 }
459 cmsg->cmsg_level = SOL_SOCKET;
460 cmsg->cmsg_type = SCM_RIGHTS;
461 cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
462 if (memcpy_s(CMSG_DATA(cmsg), sizeof(fd), &fd, sizeof(fd)) != 0) {
463 WRITE_LOG(LOG_FATAL, "SendFdToApp memcpy error:%d", errno);
464 return false;
465 }
466 if (sendmsg(sockfd, &msg, 0) < 0) {
467 WRITE_LOG(LOG_FATAL, "SendFdToApp sendmsg errno:%d", errno);
468 return false;
469 }
470 return true;
471 }
472
473 // cross thread call begin
CheckPIDExist(uint32_t targetPID)474 bool HdcJdwp::CheckPIDExist(uint32_t targetPID)
475 {
476 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
477 return ctx != nullptr;
478 }
479
GetProcessList()480 string HdcJdwp::GetProcessList()
481 {
482 string ret;
483 uv_rwlock_rdlock(&lockMapContext);
484 for (auto &&v : mapCtxJdwp) {
485 ret += std::to_string(v.first) + "\n";
486 }
487 uv_rwlock_rdunlock(&lockMapContext);
488 return ret;
489 }
490 // cross thread call finish
491
JdwpProcessListMsg(char * buffer,size_t bufferlen,uint8_t dr)492 size_t HdcJdwp::JdwpProcessListMsg(char *buffer, size_t bufferlen, uint8_t dr)
493 {
494 // Message is length-prefixed with 4 hex digits in ASCII.
495 static constexpr size_t headerLen = 5;
496 char head[headerLen + 2];
497 #ifdef JS_JDWP_CONNECT
498 string result = GetProcessListExtendPkgName(dr);
499 #else
500 string result = GetProcessList();
501 #endif // JS_JDWP_CONNECT
502
503 size_t len = result.length();
504 if (bufferlen < (len + headerLen)) {
505 WRITE_LOG(LOG_WARN, "truncating JDWP process list (max len = %zu) ", bufferlen);
506 len = bufferlen;
507 }
508 if (snprintf_s(head, sizeof head, sizeof head - 1, "%04zx\n", len) < 0) {
509 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg head fail.");
510 return 0;
511 }
512 if (memcpy_s(buffer, bufferlen, head, headerLen) != EOK) {
513 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get head fail.");
514 return 0;
515 }
516 if (memcpy_s(buffer + headerLen, (bufferlen - headerLen), result.c_str(), len) != EOK) {
517 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get data fail.");
518 return 0;
519 }
520 return len + headerLen;
521 }
522
SendProcessList(HTaskInfo t,string data)523 void HdcJdwp::SendProcessList(HTaskInfo t, string data)
524 {
525 if (t == nullptr || data.size() == 0) {
526 WRITE_LOG(LOG_WARN, " SendProcessList, Nothing needs to be sent.");
527 return;
528 }
529 void *clsSession = t->ownerSessionClass;
530 HdcSessionBase *sessionBase = static_cast<HdcSessionBase *>(clsSession);
531 sessionBase->LogMsg(t->sessionId, t->channelId, MSG_OK, data.c_str());
532 }
533
ProcessListUpdated(HTaskInfo task)534 void HdcJdwp::ProcessListUpdated(HTaskInfo task)
535 {
536 if (jdwpTrackers.size() <= 0) {
537 return;
538 }
539 #ifdef JS_JDWP_CONNECT
540 static constexpr uint32_t jpidTrackListSize = 1024 * 4;
541 #else
542 static constexpr uint32_t jpidTrackListSize = 1024;
543 #endif // JS_JDWP_CONNECT
544 std::string data;
545 if (task != nullptr) {
546 data.resize(jpidTrackListSize);
547 size_t len = JdwpProcessListMsg(&data[0], data.size(), task->debugRelease);
548 if (len > 0) {
549 data.resize(len);
550 SendProcessList(task, data);
551 }
552 return;
553 }
554 for (auto iter = jdwpTrackers.begin(); iter != jdwpTrackers.end();) {
555 if (*iter == nullptr) {
556 continue;
557 }
558 // The channel for the track-jpid has been stopped.
559 if ((*iter)->taskStop || (*iter)->taskFree || !(*iter)->taskClass) {
560 iter = jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *iter), jdwpTrackers.end());
561 if (jdwpTrackers.size() == 0) {
562 return;
563 }
564 } else {
565 data.resize(jpidTrackListSize);
566 size_t len = JdwpProcessListMsg(&data[0], data.size(), (*iter)->debugRelease);
567 if (len > 0) {
568 data.resize(len);
569 SendProcessList(*iter, data);
570 }
571 iter++;
572 }
573 }
574 }
575
CreateJdwpTracker(HTaskInfo taskInfo)576 bool HdcJdwp::CreateJdwpTracker(HTaskInfo taskInfo)
577 {
578 if (taskInfo == nullptr) {
579 return false;
580 }
581 uv_rwlock_wrlock(&lockJdwpTrack);
582 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
583 if (it == jdwpTrackers.end()) {
584 jdwpTrackers.push_back(taskInfo);
585 }
586 ProcessListUpdated(taskInfo);
587 uv_rwlock_wrunlock(&lockJdwpTrack);
588 return true;
589 }
590
RemoveJdwpTracker(HTaskInfo taskInfo)591 void HdcJdwp::RemoveJdwpTracker(HTaskInfo taskInfo)
592 {
593 if (taskInfo == nullptr) {
594 return;
595 }
596 uv_rwlock_wrlock(&lockJdwpTrack);
597 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
598 if (it != jdwpTrackers.end()) {
599 WRITE_LOG(LOG_DEBUG, "RemoveJdwpTracker channelId:%d, taskType:%d.", taskInfo->channelId, taskInfo->taskType);
600 jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *it), jdwpTrackers.end());
601 }
602 uv_rwlock_wrunlock(&lockJdwpTrack);
603 }
604
DrainAwakenPollThread() const605 void HdcJdwp::DrainAwakenPollThread() const
606 {
607 uint64_t value = 0;
608 ssize_t retVal = read(awakenPollFd, &value, sizeof(value));
609 if (retVal < 0) {
610 WRITE_LOG(LOG_FATAL, "DrainAwakenPollThread: Failed to read data from awaken pipe %d", retVal);
611 }
612 }
613
WakePollThread()614 void HdcJdwp::WakePollThread()
615 {
616 if (awakenPollFd < 0) {
617 WRITE_LOG(LOG_DEBUG, "awakenPollFd: MUST initialized before notifying");
618 return;
619 }
620 static const uint64_t increment = 1;
621 ssize_t retVal = write(awakenPollFd, &increment, sizeof(increment));
622 if (retVal < 0) {
623 WRITE_LOG(LOG_FATAL, "WakePollThread: Failed to write data into awaken pipe %d", retVal);
624 }
625 }
626
FdEventPollThread(void * args)627 void *HdcJdwp::FdEventPollThread(void *args)
628 {
629 auto thisClass = static_cast<HdcJdwp *>(args);
630 std::vector<struct pollfd> pollfds;
631 size_t size = 0;
632 while (!thisClass->stop) {
633 thisClass->freeContextMutex.lock();
634 if (size != thisClass->pollNodeMap.size() || thisClass->pollNodeMap.size() == 0) {
635 pollfds.clear();
636 struct pollfd pollFd;
637 for (const auto &pair : thisClass->pollNodeMap) {
638 pollFd.fd = pair.second.pollfd.fd;
639 pollFd.events = pair.second.pollfd.events;
640 pollFd.revents = pair.second.pollfd.revents;
641 pollfds.push_back(pollFd);
642 }
643 pollFd.fd = thisClass->awakenPollFd;
644 pollFd.events = POLLIN;
645 pollFd.revents = 0;
646 pollfds.push_back(pollFd);
647 size = pollfds.size();
648 }
649 thisClass->freeContextMutex.unlock();
650 poll(&pollfds[0], size, -1);
651 for (const auto &pollfdsing : pollfds) {
652 if (pollfdsing.revents & (POLLNVAL | POLLRDHUP | POLLHUP | POLLERR)) { // POLLNVAL:fd not open
653 thisClass->freeContextMutex.lock();
654 auto it = thisClass->pollNodeMap.find(pollfdsing.fd);
655 if (it != thisClass->pollNodeMap.end()) {
656 uint32_t targetPID = it->second.ppid;
657 HCtxJdwp ctx = static_cast<HCtxJdwp>(thisClass->AdminContext(OP_QUERY, targetPID, nullptr));
658 if (ctx != nullptr) {
659 thisClass->AdminContext(OP_REMOVE, targetPID, nullptr);
660 }
661 }
662 thisClass->freeContextMutex.unlock();
663 } else if (pollfdsing.revents & POLLIN) {
664 if (pollfdsing.fd == thisClass->awakenPollFd) {
665 thisClass->DrainAwakenPollThread();
666 }
667 }
668 }
669 }
670 return nullptr;
671 }
672
CreateFdEventPoll()673 int HdcJdwp::CreateFdEventPoll()
674 {
675 pthread_t tid;
676 Base::CloseFd(awakenPollFd);
677 awakenPollFd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
678 if (awakenPollFd < 0) {
679 WRITE_LOG(LOG_FATAL, "CreateFdEventPoll : Failed to create awakenPollFd");
680 return ERR_GENERIC;
681 }
682 int tret = pthread_create(&tid, nullptr, FdEventPollThread, this);
683 if (tret != 0) {
684 WRITE_LOG(LOG_INFO, "FdEventPollThread create fail.");
685 return tret;
686 }
687 return RET_SUCCESS;
688 }
689
690 // jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=8000
Initial()691 int HdcJdwp::Initial()
692 {
693 freeContextMutex.lock();
694 pollNodeMap.clear();
695 freeContextMutex.unlock();
696 if (!JdwpListen()) {
697 WRITE_LOG(LOG_FATAL, "JdwpListen failed");
698 return ERR_MODULE_JDWP_FAILED;
699 }
700 if (CreateFdEventPoll() < 0) {
701 return ERR_MODULE_JDWP_FAILED;
702 }
703 return RET_SUCCESS;
704 }
705 }
706