1 /*
2 * Copyright (C) 2021 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15 #include "jdwp.h"
16 #include <sys/eventfd.h>
17 #include <thread>
18 #include "system_depend.h"
19
20 namespace Hdc {
HdcJdwp(uv_loop_t * loopIn,LoopStatus * ls)21 HdcJdwp::HdcJdwp(uv_loop_t *loopIn, LoopStatus *ls) : loopStatus(ls)
22 {
23 listenPipe.data = this;
24 loop = loopIn;
25 loopStatus = ls;
26 refCount = 0;
27 uv_rwlock_init(&lockMapContext);
28 uv_rwlock_init(&lockJdwpTrack);
29 awakenPollFd = -1;
30 stop = false;
31 }
32
~HdcJdwp()33 HdcJdwp::~HdcJdwp()
34 {
35 Base::CloseFd(awakenPollFd);
36 uv_rwlock_destroy(&lockMapContext);
37 uv_rwlock_destroy(&lockJdwpTrack);
38 }
39
ReadyForRelease()40 bool HdcJdwp::ReadyForRelease()
41 {
42 return refCount == 0;
43 }
44
Stop()45 void HdcJdwp::Stop()
46 {
47 stop = true;
48 WakePollThread();
49 auto funcListenPipeClose = [](uv_handle_t *handle) -> void {
50 HdcJdwp *thisClass = (HdcJdwp *)handle->data;
51 --thisClass->refCount;
52 };
53 Base::TryCloseHandle((const uv_handle_t *)&listenPipe, funcListenPipeClose);
54 freeContextMutex.lock();
55 for (auto &&obj : mapCtxJdwp) {
56 HCtxJdwp v = obj.second;
57 FreeContext(v);
58 }
59 AdminContext(OP_CLEAR, 0, nullptr);
60 freeContextMutex.unlock();
61 }
62
MallocContext()63 void *HdcJdwp::MallocContext()
64 {
65 HCtxJdwp ctx = nullptr;
66 if ((ctx = new ContextJdwp()) == nullptr) {
67 return nullptr;
68 }
69 ctx->isDebug = 0;
70 ctx->thisClass = this;
71 ctx->pipe.data = ctx;
72 ++refCount;
73 return ctx;
74 }
75
76 // Single thread, two parameters can be used
FreeContext(HCtxJdwp ctx)77 void HdcJdwp::FreeContext(HCtxJdwp ctx)
78 {
79 if (ctx->finish) {
80 return;
81 }
82 ctx->finish = true;
83 WRITE_LOG(LOG_INFO, "FreeContext for targetPID :%d", ctx->pid);
84 if (!stop) {
85 AdminContext(OP_REMOVE, ctx->pid, nullptr);
86 }
87 auto funcReqClose = [](uv_idle_t *handle) -> void {
88 HCtxJdwp ctxIn = (HCtxJdwp)handle->data;
89 --ctxIn->thisClass->refCount;
90 Base::TryCloseHandle((uv_handle_t *)handle, Base::CloseIdleCallback);
91
92 Base::TryCloseHandle((const uv_handle_t *)&ctxIn->pipe, [](uv_handle_t *handle) {
93 #ifndef HDC_EMULATOR
94 HCtxJdwp ctxIn = (HCtxJdwp)handle->data;
95 if (ctxIn != nullptr) {
96 delete ctxIn;
97 ctxIn = nullptr;
98 }
99 #endif
100 });
101 };
102 Base::IdleUvTask(loop, ctx, funcReqClose);
103 }
104
RemoveFdFromPollList(uint32_t pid)105 void HdcJdwp::RemoveFdFromPollList(uint32_t pid)
106 {
107 for (auto &&pair : pollNodeMap) {
108 if (pair.second.ppid == pid) {
109 WRITE_LOG(LOG_INFO, "RemoveFdFromPollList for pid:%d.", pid);
110 pollNodeMap.erase(pair.second.pollfd.fd);
111 break;
112 }
113 }
114 }
115
ReadStream(uv_stream_t * pipe,ssize_t nread,const uv_buf_t * buf)116 void HdcJdwp::ReadStream(uv_stream_t *pipe, ssize_t nread, const uv_buf_t *buf)
117 {
118 HCtxJdwp ctxJdwp = static_cast<HCtxJdwp>(pipe->data);
119 HdcJdwp *thisClass = static_cast<HdcJdwp *>(ctxJdwp->thisClass);
120 CALLSTAT_GUARD(*(thisClass->loopStatus), pipe->loop, "HdcJdwp::ReadStream");
121 static std::once_flag firstLog;
122 std::call_once(firstLog, [&]() { SystemDepend::SetDevItem("persist.hdc.jdwp", "0"); });
123
124 bool ret = true;
125 if (nread == UV_ENOBUFS) { // It is definite enough, usually only 4 bytes
126 ret = false;
127 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream IOBuf max");
128 } else if (nread == 0) {
129 return;
130 #ifdef JS_JDWP_CONNECT
131 } else if (nread < signed(JS_PKG_MIN_SIZE + sizeof(JsMsgHeader)) ||
132 nread > signed(JS_PKG_MAX_SIZE + sizeof(JsMsgHeader))) {
133 #else
134 } else if (nread < 0 || nread != 4) { // 4 : 4 bytes
135 #endif // JS_JDWP_CONNECT
136 ret = false;
137 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid package nread:%d.", nread);
138 }
139
140 if (ret) {
141 uint32_t pid = 0;
142 char *p = ctxJdwp->buf;
143 if (nread == sizeof(uint32_t)) { // Java: pid
144 pid = atoi(p);
145 } else { // JS:pid PkgName
146 #ifdef JS_JDWP_CONNECT
147 // pid isDebug pkgName/processName
148 struct JsMsgHeader *jsMsg = reinterpret_cast<struct JsMsgHeader *>(p);
149 if (jsMsg->msgLen == static_cast<uint32_t>(nread)) {
150 pid = jsMsg->pid;
151 string pkgName = string((char *)p + sizeof(JsMsgHeader), jsMsg->msgLen - sizeof(JsMsgHeader));
152 ctxJdwp->pkgName = pkgName;
153 ctxJdwp->isDebug = jsMsg->isDebug;
154 } else {
155 ret = false;
156 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream invalid js package size %d:%d.", jsMsg->msgLen, nread);
157 }
158 #endif // JS_JDWP_CONNECT
159 }
160 if (pid > 0) {
161 ctxJdwp->pid = pid;
162 #ifdef JS_JDWP_CONNECT
163 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d-pkg:%s isDebug:%d",
164 pid, ctxJdwp->pkgName.c_str(), ctxJdwp->isDebug);
165 #else
166 WRITE_LOG(LOG_DEBUG, "JDWP accept pid:%d", pid);
167 #endif // JS_JDWP_CONNECT
168 thisClass->AdminContext(OP_ADD, pid, ctxJdwp);
169 ret = true;
170 int fd = -1;
171 if (uv_fileno(reinterpret_cast<uv_handle_t *>(&(ctxJdwp->pipe)), &fd) < 0) {
172 WRITE_LOG(LOG_DEBUG, "HdcJdwp::ReadStream uv_fileno fail.");
173 } else {
174 thisClass->freeContextMutex.lock();
175 thisClass->pollNodeMap.emplace(fd, PollNode(fd, pid));
176 thisClass->freeContextMutex.unlock();
177 thisClass->WakePollThread();
178 }
179 }
180 }
181 Base::ZeroArray(ctxJdwp->buf);
182 if (!ret) {
183 WRITE_LOG(LOG_INFO, "ReadStream proc:%d err, free it.", ctxJdwp->pid);
184 thisClass->freeContextMutex.lock();
185 thisClass->FreeContext(ctxJdwp);
186 thisClass->freeContextMutex.unlock();
187 }
188 }
189
190 #ifdef JS_JDWP_CONNECT
GetProcessListExtendPkgName(uint8_t dr)191 string HdcJdwp::GetProcessListExtendPkgName(uint8_t dr)
192 {
193 constexpr uint8_t releaseApp = 2;
194 constexpr uint8_t allAppWithDr = 3;
195 string ret;
196 uv_rwlock_rdlock(&lockMapContext);
197 for (auto &&v : mapCtxJdwp) {
198 HCtxJdwp hj = v.second;
199 if (dr == 0) {
200 // allApp
201 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
202 } else if (dr == 1) {
203 // debugApp
204 if (hj->isDebug) {
205 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
206 }
207 } else if (dr == releaseApp) {
208 // releaseApp
209 if (!hj->isDebug) {
210 ret += std::to_string(v.first) + " " + hj->pkgName + "\n";
211 }
212 } else if (dr == allAppWithDr) {
213 // allApp with display debug or release
214 string apptype = "release";
215 if (hj->isDebug) {
216 apptype = "debug";
217 }
218 ret += std::to_string(v.first) + " " + hj->pkgName + " " + apptype + "\n";
219 }
220 }
221 uv_rwlock_rdunlock(&lockMapContext);
222 return ret;
223 }
224 #endif // JS_JDWP_CONNECT
225
AcceptClient(uv_stream_t * server,int status)226 void HdcJdwp::AcceptClient(uv_stream_t *server, int status)
227 {
228 uv_pipe_t *listenPipe = (uv_pipe_t *)server;
229 HdcJdwp *thisClass = (HdcJdwp *)listenPipe->data;
230 CALLSTAT_GUARD(*(thisClass->loopStatus), server->loop, "HdcJdwp::AcceptClient");
231 HCtxJdwp ctxJdwp = (HCtxJdwp)thisClass->MallocContext();
232 if (!ctxJdwp) {
233 return;
234 }
235 uv_pipe_init(thisClass->loop, &ctxJdwp->pipe, 1);
236 if (uv_accept(server, (uv_stream_t *)&ctxJdwp->pipe) < 0) {
237 WRITE_LOG(LOG_DEBUG, "uv_accept failed");
238 thisClass->freeContextMutex.lock();
239 thisClass->FreeContext(ctxJdwp);
240 thisClass->freeContextMutex.unlock();
241 return;
242 }
243 auto funAlloc = [](uv_handle_t *handle, size_t sizeSuggested, uv_buf_t *buf) -> void {
244 HCtxJdwp ctxJdwp = (HCtxJdwp)handle->data;
245 buf->base = (char *)ctxJdwp->buf;
246 buf->len = sizeof(ctxJdwp->buf);
247 };
248 uv_read_start((uv_stream_t *)&ctxJdwp->pipe, funAlloc, ReadStream);
249 }
250
251 // Test bash connnet(UNIX-domain sockets):nc -U path/ohjpid-control < hexpid.file
252 // Test uv connect(pipe): 'uv_pipe_connect'
JdwpListen()253 bool HdcJdwp::JdwpListen()
254 {
255 #ifdef HDC_PCDEBUG
256 // if test, can be enabled
257 return true;
258 const char jdwpCtrlName[] = { 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
259 unlink(jdwpCtrlName);
260 #else
261 const char jdwpCtrlName[] = { '\0', 'o', 'h', 'j', 'p', 'i', 'd', '-', 'c', 'o', 'n', 't', 'r', 'o', 'l', 0 };
262 #endif
263 const int DEFAULT_BACKLOG = 4;
264 bool ret = false;
265 while (true) {
266 uv_pipe_init(loop, &listenPipe, 0);
267 listenPipe.data = this;
268 if (UvPipeBind(&listenPipe, jdwpCtrlName, sizeof(jdwpCtrlName))) {
269 WRITE_LOG(LOG_FATAL, "UvPipeBind failed");
270 return ret;
271 }
272 if (uv_listen((uv_stream_t *)&listenPipe, DEFAULT_BACKLOG, AcceptClient)) {
273 WRITE_LOG(LOG_FATAL, "uv_listen failed");
274 break;
275 }
276 ++refCount;
277 ret = true;
278 break;
279 }
280 // listenPipe close by stop
281 return ret;
282 }
283
UvPipeBind(uv_pipe_t * handle,const char * name,size_t size)284 int HdcJdwp::UvPipeBind(uv_pipe_t* handle, const char* name, size_t size)
285 {
286 char buffer[BUF_SIZE_DEFAULT] = { 0 };
287
288 if (handle->io_watcher.fd >= 0) {
289 WRITE_LOG(LOG_FATAL, "socket already bound %d", handle->io_watcher.fd);
290 return -1;
291 }
292
293 int type = SOCK_STREAM | SOCK_NONBLOCK | SOCK_CLOEXEC;
294 int sockfd = socket(AF_UNIX, type, 0);
295 if (sockfd < 0) {
296 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
297 WRITE_LOG(LOG_FATAL, "socket failed errno:%d %s", errno, buffer);
298 return -1;
299 }
300
301 #if defined(SO_NOSIGPIPE)
302 int on = 1;
303 setsockopt(sockfd, SOL_SOCKET, SO_NOSIGPIPE, &on, sizeof(on));
304 #endif
305
306 struct sockaddr_un saddr;
307 Base::ZeroStruct(saddr);
308 size_t capacity = sizeof(saddr.sun_path);
309 size_t min = size < capacity ? size : capacity;
310 for (size_t i = 0; i < min; i++) {
311 saddr.sun_path[i] = name[i];
312 }
313 saddr.sun_path[capacity - 1] = '\0';
314 saddr.sun_family = AF_UNIX;
315 size_t saddrLen = sizeof(saddr.sun_family) + size - 1;
316 int err = bind(sockfd, reinterpret_cast<struct sockaddr*>(&saddr), saddrLen);
317 if (err != 0) {
318 strerror_r(errno, buffer, BUF_SIZE_DEFAULT);
319 WRITE_LOG(LOG_FATAL, "bind failed errno:%d %s", errno, buffer);
320 close(sockfd);
321 return -1;
322 }
323 constexpr uint32_t uvHandleBound = 0x00002000;
324 handle->flags |= uvHandleBound;
325 handle->io_watcher.fd = sockfd;
326 return 0;
327 }
328
329 // Working in the main thread, but will be accessed by each session thread, so we need to set thread lock
AdminContext(const uint8_t op,const uint32_t pid,HCtxJdwp ctxJdwp)330 void *HdcJdwp::AdminContext(const uint8_t op, const uint32_t pid, HCtxJdwp ctxJdwp)
331 {
332 HCtxJdwp hRet = nullptr;
333 switch (op) {
334 case OP_ADD: {
335 uv_rwlock_wrlock(&lockMapContext);
336 const int maxMapSize = 1024;
337 if (mapCtxJdwp.size() < maxMapSize) {
338 mapCtxJdwp[pid] = ctxJdwp;
339 }
340 uv_rwlock_wrunlock(&lockMapContext);
341 break;
342 }
343 case OP_REMOVE:
344 uv_rwlock_wrlock(&lockMapContext);
345 mapCtxJdwp.erase(pid);
346 RemoveFdFromPollList(pid);
347 uv_rwlock_wrunlock(&lockMapContext);
348 break;
349 case OP_QUERY: {
350 uv_rwlock_rdlock(&lockMapContext);
351 if (mapCtxJdwp.count(pid)) {
352 hRet = mapCtxJdwp[pid];
353 }
354 uv_rwlock_rdunlock(&lockMapContext);
355 break;
356 }
357 case OP_CLEAR: {
358 uv_rwlock_wrlock(&lockMapContext);
359 mapCtxJdwp.clear();
360 pollNodeMap.clear();
361 uv_rwlock_wrunlock(&lockMapContext);
362 break;
363 }
364 default:
365 break;
366 }
367 if (op == OP_ADD || op == OP_REMOVE || op == OP_CLEAR) {
368 uv_rwlock_wrlock(&lockJdwpTrack);
369 ProcessListUpdated();
370 uv_rwlock_wrunlock(&lockJdwpTrack);
371 }
372 return hRet;
373 }
374
375 // work on main thread
SendCallbackJdwpNewFD(uv_write_t * req,int status)376 void HdcJdwp::SendCallbackJdwpNewFD(uv_write_t *req, int status)
377 {
378 // It usually works successful, not notify session work
379 HCtxJdwp ctx = (HCtxJdwp)req->data;
380 if (status >= 0) {
381 WRITE_LOG(LOG_DEBUG, "SendCallbackJdwpNewFD successful %d, active jdwp forward", ctx->pid);
382 } else {
383 WRITE_LOG(LOG_WARN, "SendCallbackJdwpNewFD failed %d", ctx->pid);
384 }
385 delete req;
386 }
387
388 // Each session calls the interface through the main thread message queue, which cannot be called directly across
389 // threads
390 // work on main thread
SendJdwpNewFD(uint32_t targetPID,int fd)391 bool HdcJdwp::SendJdwpNewFD(uint32_t targetPID, int fd)
392 {
393 bool ret = false;
394 while (true) {
395 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
396 if (!ctx) {
397 break;
398 }
399 ctx->dummy = static_cast<uint8_t>('!');
400 if (uv_tcp_init(loop, &ctx->jvmTCP)) {
401 break;
402 }
403 if (uv_tcp_open(&ctx->jvmTCP, fd)) {
404 break;
405 }
406 // transfer fd to jvm
407 // clang-format off
408 if (Base::SendToStreamEx((uv_stream_t *)&ctx->pipe, (uint8_t *)&ctx->dummy, 1, (uv_stream_t *)&ctx->jvmTCP,
409 (void *)SendCallbackJdwpNewFD, (const void *)ctx) < 0) {
410 break;
411 }
412 // clang-format on
413 ++refCount;
414 ret = true;
415 WRITE_LOG(LOG_DEBUG, "SendJdwpNewFD successful targetPID:%d fd%d", targetPID, fd);
416 break;
417 }
418 return ret;
419 }
420
SendArkNewFD(const std::string str,int fd)421 bool HdcJdwp::SendArkNewFD(const std::string str, int fd)
422 {
423 bool ret = false;
424 while (true) {
425 // str(ark:pid@tid@Debugger)
426 size_t pos = str.find_first_of(':');
427 std::string right = str.substr(pos + 1);
428 pos = right.find_first_of("@");
429 std::string pidstr = right.substr(0, pos);
430 uint32_t pid = static_cast<uint32_t>(std::atoi(pidstr.c_str()));
431 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, pid, nullptr);
432 if (!ctx) {
433 WRITE_LOG(LOG_FATAL, "SendArkNewFD query pid:%u failed", pid);
434 break;
435 }
436 uint32_t size = sizeof(int32_t) + str.size();
437 // fd | str(ark:pid@tid@Debugger)
438 uint8_t *buf = new(std::nothrow) uint8_t[size];
439 if (buf == nullptr) {
440 WRITE_LOG(LOG_FATAL, "out of memory size:%u", size);
441 Base::CloseFd(fd);
442 return false;
443 }
444 if (memcpy_s(buf, sizeof(int32_t), &fd, sizeof(int32_t)) != EOK) {
445 WRITE_LOG(LOG_WARN, "From fd Create buf failed, fd:%d", fd);
446 Base::CloseFd(fd);
447 delete[] buf;
448 return false;
449 }
450 if (memcpy_s(buf + sizeof(int32_t), str.size(), str.c_str(), str.size()) != EOK) {
451 WRITE_LOG(LOG_WARN, "SendArkNewFD failed fd:%d str:%s", fd, str.c_str());
452 Base::CloseFd(fd);
453 delete[] buf;
454 return false;
455 }
456 uv_stream_t *stream = (uv_stream_t *)&ctx->pipe;
457 SendFdToApp(stream->io_watcher.fd, buf, size, fd);
458 delete[] buf;
459 ret = true;
460 WRITE_LOG(LOG_DEBUG, "SendArkNewFD successful str:%s fd%d", str.c_str(), fd);
461 Base::CloseFd(fd);
462 break;
463 }
464 return ret;
465 }
466
SendFdToApp(int sockfd,uint8_t * buf,int size,int fd)467 bool HdcJdwp::SendFdToApp(int sockfd, uint8_t *buf, int size, int fd)
468 {
469 struct iovec iov;
470 iov.iov_base = buf;
471 iov.iov_len = static_cast<unsigned int>(size);
472 struct msghdr msg;
473 msg.msg_name = nullptr;
474 msg.msg_namelen = 0;
475 msg.msg_iov = &iov;
476 msg.msg_iovlen = 1;
477
478 int len = CMSG_SPACE(static_cast<unsigned int>(sizeof(fd)));
479 char ctlBuf[len];
480 msg.msg_control = ctlBuf;
481 msg.msg_controllen = sizeof(ctlBuf);
482
483 struct cmsghdr *cmsg = CMSG_FIRSTHDR(&msg);
484 if (cmsg == nullptr) {
485 WRITE_LOG(LOG_FATAL, "SendFdToApp cmsg is nullptr");
486 return false;
487 }
488 cmsg->cmsg_level = SOL_SOCKET;
489 cmsg->cmsg_type = SCM_RIGHTS;
490 cmsg->cmsg_len = CMSG_LEN(sizeof(fd));
491 if (memcpy_s(CMSG_DATA(cmsg), sizeof(fd), &fd, sizeof(fd)) != 0) {
492 WRITE_LOG(LOG_FATAL, "SendFdToApp memcpy error:%d", errno);
493 return false;
494 }
495 if (sendmsg(sockfd, &msg, 0) < 0) {
496 WRITE_LOG(LOG_FATAL, "SendFdToApp sendmsg errno:%d", errno);
497 return false;
498 }
499 return true;
500 }
501
502 // cross thread call begin
CheckPIDExist(uint32_t targetPID)503 bool HdcJdwp::CheckPIDExist(uint32_t targetPID)
504 {
505 HCtxJdwp ctx = (HCtxJdwp)AdminContext(OP_QUERY, targetPID, nullptr);
506 return ctx != nullptr;
507 }
508
GetProcessList()509 string HdcJdwp::GetProcessList()
510 {
511 string ret;
512 uv_rwlock_rdlock(&lockMapContext);
513 for (auto &&v : mapCtxJdwp) {
514 ret += std::to_string(v.first) + "\n";
515 }
516 uv_rwlock_rdunlock(&lockMapContext);
517 return ret;
518 }
519 // cross thread call finish
520
JdwpProcessListMsg(char * buffer,size_t bufferlen,uint8_t dr)521 size_t HdcJdwp::JdwpProcessListMsg(char *buffer, size_t bufferlen, uint8_t dr)
522 {
523 // Message is length-prefixed with 4 hex digits in ASCII.
524 static constexpr size_t headerLen = 5;
525 char head[headerLen + 2];
526 #ifdef JS_JDWP_CONNECT
527 string result = GetProcessListExtendPkgName(dr);
528 #else
529 string result = GetProcessList();
530 #endif // JS_JDWP_CONNECT
531
532 size_t len = result.length();
533 if (bufferlen < (len + headerLen)) {
534 WRITE_LOG(LOG_WARN, "truncating JDWP process list (max len = %zu) ", bufferlen);
535 len = bufferlen;
536 }
537 if (snprintf_s(head, sizeof head, sizeof head - 1, "%04zx\n", len) < 0) {
538 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg head fail.");
539 return 0;
540 }
541 if (memcpy_s(buffer, bufferlen, head, headerLen) != EOK) {
542 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get head fail.");
543 return 0;
544 }
545 if (memcpy_s(buffer + headerLen, (bufferlen - headerLen), result.c_str(), len) != EOK) {
546 WRITE_LOG(LOG_WARN, " JdwpProcessListMsg get data fail.");
547 return 0;
548 }
549 return len + headerLen;
550 }
551
SendProcessList(HTaskInfo t,string data)552 void HdcJdwp::SendProcessList(HTaskInfo t, string data)
553 {
554 if (t == nullptr || data.size() == 0) {
555 WRITE_LOG(LOG_WARN, " SendProcessList, Nothing needs to be sent.");
556 return;
557 }
558 void *clsSession = t->ownerSessionClass;
559 HdcSessionBase *sessionBase = static_cast<HdcSessionBase *>(clsSession);
560 sessionBase->LogMsg(t->sessionId, t->channelId, MSG_OK, data.c_str());
561 }
562
ProcessListUpdated(HTaskInfo task)563 void HdcJdwp::ProcessListUpdated(HTaskInfo task)
564 {
565 if (jdwpTrackers.size() <= 0) {
566 return;
567 }
568 #ifdef JS_JDWP_CONNECT
569 static constexpr uint32_t jpidTrackListSize = 1024 * 4;
570 #else
571 static constexpr uint32_t jpidTrackListSize = 1024;
572 #endif // JS_JDWP_CONNECT
573 std::string data;
574 if (task != nullptr) {
575 data.resize(jpidTrackListSize);
576 size_t len = JdwpProcessListMsg(&data[0], data.size(), task->debugRelease);
577 if (len > 0) {
578 data.resize(len);
579 SendProcessList(task, data);
580 }
581 return;
582 }
583 for (auto iter = jdwpTrackers.begin(); iter != jdwpTrackers.end();) {
584 if (*iter == nullptr) {
585 continue;
586 }
587 // The channel for the track-jpid has been stopped.
588 if ((*iter)->taskStop || (*iter)->taskFree || !(*iter)->taskClass) {
589 iter = jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *iter), jdwpTrackers.end());
590 if (jdwpTrackers.size() == 0) {
591 return;
592 }
593 } else {
594 data.resize(jpidTrackListSize);
595 size_t len = JdwpProcessListMsg(&data[0], data.size(), (*iter)->debugRelease);
596 if (len > 0) {
597 data.resize(len);
598 SendProcessList(*iter, data);
599 }
600 iter++;
601 }
602 }
603 }
604
CreateJdwpTracker(HTaskInfo taskInfo)605 bool HdcJdwp::CreateJdwpTracker(HTaskInfo taskInfo)
606 {
607 if (taskInfo == nullptr) {
608 return false;
609 }
610 uv_rwlock_wrlock(&lockJdwpTrack);
611 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
612 if (it == jdwpTrackers.end()) {
613 jdwpTrackers.push_back(taskInfo);
614 }
615 ProcessListUpdated(taskInfo);
616 uv_rwlock_wrunlock(&lockJdwpTrack);
617 return true;
618 }
619
RemoveJdwpTracker(HTaskInfo taskInfo)620 void HdcJdwp::RemoveJdwpTracker(HTaskInfo taskInfo)
621 {
622 if (taskInfo == nullptr) {
623 return;
624 }
625 uv_rwlock_wrlock(&lockJdwpTrack);
626 auto it = std::find(jdwpTrackers.begin(), jdwpTrackers.end(), taskInfo);
627 if (it != jdwpTrackers.end()) {
628 WRITE_LOG(LOG_DEBUG, "RemoveJdwpTracker channelId:%d, taskType:%d.", taskInfo->channelId, taskInfo->taskType);
629 jdwpTrackers.erase(remove(jdwpTrackers.begin(), jdwpTrackers.end(), *it), jdwpTrackers.end());
630 }
631 uv_rwlock_wrunlock(&lockJdwpTrack);
632 }
633
DrainAwakenPollThread() const634 void HdcJdwp::DrainAwakenPollThread() const
635 {
636 uint64_t value = 0;
637 ssize_t retVal = read(awakenPollFd, &value, sizeof(value));
638 if (retVal < 0) {
639 WRITE_LOG(LOG_FATAL, "DrainAwakenPollThread: Failed to read data from awaken pipe %d", retVal);
640 }
641 }
642
WakePollThread()643 void HdcJdwp::WakePollThread()
644 {
645 if (awakenPollFd < 0) {
646 WRITE_LOG(LOG_DEBUG, "awakenPollFd: MUST initialized before notifying");
647 return;
648 }
649 static const uint64_t increment = 1;
650 ssize_t retVal = write(awakenPollFd, &increment, sizeof(increment));
651 if (retVal < 0) {
652 WRITE_LOG(LOG_FATAL, "WakePollThread: Failed to write data into awaken pipe %d", retVal);
653 }
654 }
655
FdEventPollThread(void * args)656 void *HdcJdwp::FdEventPollThread(void *args)
657 {
658 auto thisClass = static_cast<HdcJdwp *>(args);
659 std::vector<struct pollfd> pollfds;
660 size_t size = 0;
661 while (!thisClass->stop) {
662 thisClass->freeContextMutex.lock();
663 if (size != thisClass->pollNodeMap.size() || thisClass->pollNodeMap.size() == 0) {
664 pollfds.clear();
665 struct pollfd pollFd;
666 for (const auto &pair : thisClass->pollNodeMap) {
667 pollFd.fd = pair.second.pollfd.fd;
668 pollFd.events = pair.second.pollfd.events;
669 pollFd.revents = pair.second.pollfd.revents;
670 pollfds.push_back(pollFd);
671 }
672 pollFd.fd = thisClass->awakenPollFd;
673 pollFd.events = POLLIN;
674 pollFd.revents = 0;
675 pollfds.push_back(pollFd);
676 size = pollfds.size();
677 }
678 thisClass->freeContextMutex.unlock();
679 poll(&pollfds[0], size, -1);
680 for (const auto &pollfdsing : pollfds) {
681 if (pollfdsing.revents & (POLLNVAL | POLLRDHUP | POLLHUP | POLLERR)) { // POLLNVAL:fd not open
682 thisClass->freeContextMutex.lock();
683 auto it = thisClass->pollNodeMap.find(pollfdsing.fd);
684 if (it != thisClass->pollNodeMap.end()) {
685 uint32_t targetPID = it->second.ppid;
686 HCtxJdwp ctx = static_cast<HCtxJdwp>(thisClass->AdminContext(OP_QUERY, targetPID, nullptr));
687 if (ctx != nullptr) {
688 thisClass->AdminContext(OP_REMOVE, targetPID, nullptr);
689 }
690 }
691 thisClass->freeContextMutex.unlock();
692 } else if (pollfdsing.revents & POLLIN) {
693 if (pollfdsing.fd == thisClass->awakenPollFd) {
694 thisClass->DrainAwakenPollThread();
695 }
696 }
697 }
698 }
699 return nullptr;
700 }
701
CreateFdEventPoll()702 int HdcJdwp::CreateFdEventPoll()
703 {
704 pthread_t tid;
705 Base::CloseFd(awakenPollFd);
706 awakenPollFd = eventfd(0, EFD_CLOEXEC | EFD_NONBLOCK);
707 if (awakenPollFd < 0) {
708 WRITE_LOG(LOG_FATAL, "CreateFdEventPoll : Failed to create awakenPollFd");
709 return ERR_GENERIC;
710 }
711 int tret = pthread_create(&tid, nullptr, FdEventPollThread, this);
712 if (tret != 0) {
713 WRITE_LOG(LOG_INFO, "FdEventPollThread create fail.");
714 return tret;
715 }
716 return RET_SUCCESS;
717 }
718
719 // jdb -connect com.sun.jdi.SocketAttach:hostname=localhost,port=8000
Initial()720 int HdcJdwp::Initial()
721 {
722 freeContextMutex.lock();
723 pollNodeMap.clear();
724 freeContextMutex.unlock();
725 if (!JdwpListen()) {
726 WRITE_LOG(LOG_FATAL, "JdwpListen failed");
727 return ERR_MODULE_JDWP_FAILED;
728 }
729 SystemDepend::SetDevItem("persist.hdc.jdwp", "0");
730 SystemDepend::SetDevItem("persist.hdc.jdwp", "1");
731 if (CreateFdEventPoll() < 0) {
732 return ERR_MODULE_JDWP_FAILED;
733 }
734 return RET_SUCCESS;
735 }
736 }
737