1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "io_poller.h"
17 #include "core/task_ctx.h"
18 #include "sched/execute_ctx.h"
19 #include "eu/co_routine.h"
20 #include "dfx/log/ffrt_log_api.h"
21 #include "dfx/trace/ffrt_trace.h"
22
23 #include <cassert>
24
25 namespace ffrt {
26 constexpr unsigned int DEFAULT_CPUINDEX_LIMIT = 7;
27 struct IOPollerInstance: public IOPoller {
__anonfa1fa02d0102ffrt::IOPollerInstance28 IOPollerInstance() noexcept: m_runner([&] { RunForever(); })
29 {
30 pthread_setname_np(m_runner.native_handle(), "ffrt_io");
31 }
32
RunForeverffrt::IOPollerInstance33 void RunForever() noexcept
34 {
35 pid_t pid = syscall(SYS_gettid);
36 cpu_set_t mask;
37 CPU_ZERO(&mask);
38 for (unsigned int i = 0; i < DEFAULT_CPUINDEX_LIMIT; ++i) {
39 CPU_SET(i, &mask);
40 }
41 syscall(__NR_sched_setaffinity, pid, sizeof(mask), &mask);
42 while (!m_exitFlag.load(std::memory_order_relaxed)) {
43 IOPoller::PollOnce(-1);
44 }
45 }
46
~IOPollerInstanceffrt::IOPollerInstance47 ~IOPollerInstance() noexcept override
48 {
49 Stop();
50 m_runner.join();
51 }
52 private:
Stopffrt::IOPollerInstance53 void Stop() noexcept
54 {
55 m_exitFlag.store(true, std::memory_order_relaxed);
56 std::atomic_thread_fence(std::memory_order_acq_rel);
57 IOPoller::WakeUp();
58 }
59
60 private:
61 std::thread m_runner;
62 std::atomic<bool> m_exitFlag { false };
63 };
64
GetIOPoller()65 IOPoller& GetIOPoller() noexcept
66 {
67 static IOPollerInstance inst;
68 return inst;
69 }
70
IOPoller()71 IOPoller::IOPoller() noexcept: m_epFd { ::epoll_create1(EPOLL_CLOEXEC) },
72 m_events(32)
73 {
74 assert(m_epFd >= 0);
75 {
76 m_wakeData.data = nullptr;
77 m_wakeData.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
78 assert(m_wakeData.fd >= 0);
79 epoll_event ev{ .events = EPOLLIN, .data = { .ptr = static_cast<void*>(&m_wakeData) } };
80 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, m_wakeData.fd, &ev) < 0) {
81 std::terminate();
82 }
83 }
84 }
85
~IOPoller()86 IOPoller::~IOPoller() noexcept
87 {
88 ::close(m_wakeData.fd);
89 ::close(m_epFd);
90 }
91
CasStrong(std::atomic<int> & a,int cmp,int exc)92 bool IOPoller::CasStrong(std::atomic<int>& a, int cmp, int exc)
93 {
94 return a.compare_exchange_strong(cmp, exc);
95 }
96
WakeUp()97 void IOPoller::WakeUp() noexcept
98 {
99 uint64_t one = 1;
100 ssize_t n = ::write(m_wakeData.fd, &one, sizeof one);
101 assert(n == sizeof one);
102 }
103
WaitFdEvent(int fd)104 void IOPoller::WaitFdEvent(int fd) noexcept
105 {
106 auto ctx = ExecuteCtx::Cur();
107 struct WakeData data = {.fd = fd, .data = static_cast<void *>(ctx->task)};
108
109 epoll_event ev = { .events = EPOLLIN, .data = {.ptr = static_cast<void*>(&data)} };
110 FFRT_BLOCK_TRACER(ctx->task->gid, fd);
111 if (!USE_COROUTINE) {
112 std::unique_lock<std::mutex> lck(ctx->task->lock);
113 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, fd, &ev) == 0) {
114 ctx->task->childWaitCond_.wait(lck);
115 }
116 return;
117 }
118
119 if (!ctx->task) {
120 FFRT_LOGI("nonworker shall not call this fun.");
121 }
122
123 CoWait([&](TaskCtx *task)->bool {
124 (void)task;
125 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, fd, &ev) == 0) {
126 return true;
127 }
128 FFRT_LOGI("epoll_ctl add err:efd:=%d, fd=%d errorno = %d", m_epFd, fd, errno);
129 return false;
130 });
131 }
132
PollOnce(int timeout)133 void IOPoller::PollOnce(int timeout) noexcept
134 {
135 int ndfs = epoll_wait(m_epFd, m_events.data(), m_events.size(), timeout);
136 if (ndfs <= 0) {
137 FFRT_LOGE("epoll_wait error: efd = %d, errorno= %d", m_epFd, errno);
138 return;
139 }
140
141 for (unsigned int i = 0; i < static_cast<unsigned int>(ndfs); ++i) {
142 struct WakeData *data = reinterpret_cast<struct WakeData *>(m_events[i].data.ptr);
143
144 if (data->fd == m_wakeData.fd) {
145 uint64_t one = 1;
146 ssize_t n = ::read(m_wakeData.fd, &one, sizeof one);
147 assert(n == sizeof one);
148 continue;
149 }
150
151 if (epoll_ctl(m_epFd, EPOLL_CTL_DEL, data->fd, nullptr) == 0) {
152 auto task = reinterpret_cast<TaskCtx *>(data->data);
153 if (!USE_COROUTINE) {
154 std::unique_lock<std::mutex> lck(task->lock);
155 task->childWaitCond_.notify_one();
156 } else {
157 CoWake(task, false);
158 }
159 continue;
160 }
161
162 FFRT_LOGI("epoll_ctl fd = %d errorno = %d", data->fd, errno);
163 }
164 }
165 }
166