1 /*
2 * Copyright (c) 2023 Huawei Device Co., Ltd.
3 * Licensed under the Apache License, Version 2.0 (the "License");
4 * you may not use this file except in compliance with the License.
5 * You may obtain a copy of the License at
6 *
7 * http://www.apache.org/licenses/LICENSE-2.0
8 *
9 * Unless required by applicable law or agreed to in writing, software
10 * distributed under the License is distributed on an "AS IS" BASIS,
11 * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12 * See the License for the specific language governing permissions and
13 * limitations under the License.
14 */
15
16 #include "io_poller.h"
17 #include "sched/execute_ctx.h"
18 #include "eu/co_routine.h"
19 #include "dfx/log/ffrt_log_api.h"
20 #include "ffrt_trace.h"
21 #include "internal_inc/assert.h"
22 #include "internal_inc/types.h"
23 #include "tm/scpu_task.h"
24 #include "util/ffrt_facade.h"
25 #include "util/name_manager.h"
26
27 namespace ffrt {
28 constexpr unsigned int DEFAULT_CPUINDEX_LIMIT = 7;
29 struct IOPollerInstance: public IOPoller {
IOPollerInstanceffrt::IOPollerInstance30 IOPollerInstance() noexcept: m_runner([&] { RunForever(); })
31 {
32 DependenceManager::Instance();
33 pthread_setname_np(m_runner.native_handle(), IO_POLLER_NAME);
34 }
35
RunForeverffrt::IOPollerInstance36 void RunForever() noexcept
37 {
38 struct sched_param param;
39 param.sched_priority = 1;
40 int ret = pthread_setschedparam(pthread_self(), SCHED_RR, ¶m);
41 if (ret != 0) {
42 FFRT_LOGW("[%d] set priority warn ret[%d] eno[%d]\n", pthread_self(), ret, errno);
43 }
44 while (!m_exitFlag.load(std::memory_order_relaxed)) {
45 IOPoller::PollOnce(-1);
46 }
47 }
48
~IOPollerInstanceffrt::IOPollerInstance49 ~IOPollerInstance() noexcept override
50 {
51 Stop();
52 m_runner.join();
53 }
54 private:
Stopffrt::IOPollerInstance55 void Stop() noexcept
56 {
57 m_exitFlag.store(true, std::memory_order_relaxed);
58 std::atomic_thread_fence(std::memory_order_acq_rel);
59 IOPoller::WakeUp();
60 }
61
62 private:
63 std::thread m_runner;
64 std::atomic<bool> m_exitFlag { false };
65 };
66
GetIOPoller()67 IOPoller& GetIOPoller() noexcept
68 {
69 static IOPollerInstance inst;
70 return inst;
71 }
72
IOPoller()73 IOPoller::IOPoller() noexcept: m_epFd { ::epoll_create1(EPOLL_CLOEXEC) },
74 m_events(32)
75 {
76 FFRT_ASSERT(m_epFd >= 0);
77 {
78 m_wakeData.data = nullptr;
79 m_wakeData.fd = eventfd(0, EFD_NONBLOCK | EFD_CLOEXEC);
80 FFRT_ASSERT(m_wakeData.fd >= 0);
81 epoll_event ev{ .events = EPOLLIN, .data = { .ptr = static_cast<void*>(&m_wakeData) } };
82 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, m_wakeData.fd, &ev) < 0) {
83 std::terminate();
84 }
85 }
86 }
87
~IOPoller()88 IOPoller::~IOPoller() noexcept
89 {
90 ::close(m_wakeData.fd);
91 ::close(m_epFd);
92 }
93
CasStrong(std::atomic<int> & a,int cmp,int exc)94 bool IOPoller::CasStrong(std::atomic<int>& a, int cmp, int exc)
95 {
96 return a.compare_exchange_strong(cmp, exc);
97 }
98
WakeUp()99 void IOPoller::WakeUp() noexcept
100 {
101 uint64_t one = 1;
102 ssize_t n = ::write(m_wakeData.fd, &one, sizeof one);
103 FFRT_ASSERT(n == sizeof one);
104 }
105
WaitFdEvent(int fd)106 void IOPoller::WaitFdEvent(int fd) noexcept
107 {
108 auto ctx = ExecuteCtx::Cur();
109 if (!ctx->task) {
110 FFRT_LOGI("nonworker shall not call this fun.");
111 return;
112 }
113 struct WakeData data = {.fd = fd, .data = static_cast<void *>(ctx->task)};
114
115 epoll_event ev = { .events = EPOLLIN, .data = {.ptr = static_cast<void*>(&data)} };
116 FFRT_BLOCK_TRACER(ctx->task->gid, fd);
117 if (ThreadWaitMode(ctx->task)) {
118 std::unique_lock<std::mutex> lck(ctx->task->mutex_);
119 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, fd, &ev) == 0) {
120 if (FFRT_UNLIKELY(LegacyMode(ctx->task))) {
121 ctx->task->blockType = BlockType::BLOCK_THREAD;
122 }
123 reinterpret_cast<SCPUEUTask*>(ctx->task)->waitCond_.wait(lck);
124 }
125 return;
126 }
127
128 CoWait([&](CPUEUTask *task)->bool {
129 (void)task;
130 if (epoll_ctl(m_epFd, EPOLL_CTL_ADD, fd, &ev) == 0) {
131 return true;
132 }
133 // The ownership of the task belongs to epoll, and the task cannot be accessed any more.
134 FFRT_LOGI("epoll_ctl add err:efd:=%d, fd=%d errorno = %d", m_epFd, fd, errno);
135 return false;
136 });
137 }
138
PollOnce(int timeout)139 void IOPoller::PollOnce(int timeout) noexcept
140 {
141 int ndfs = epoll_wait(m_epFd, m_events.data(), m_events.size(), timeout);
142 if (ndfs <= 0) {
143 if (errno != EINTR) {
144 FFRT_LOGE("epoll_wait error: efd = %d, errorno= %d", m_epFd, errno);
145 }
146 return;
147 }
148
149 for (unsigned int i = 0; i < static_cast<unsigned int>(ndfs); ++i) {
150 struct WakeData *data = reinterpret_cast<struct WakeData *>(m_events[i].data.ptr);
151
152 if (data->fd == m_wakeData.fd) {
153 uint64_t one = 1;
154 ssize_t n = ::read(m_wakeData.fd, &one, sizeof one);
155 FFRT_ASSERT(n == sizeof one);
156 continue;
157 }
158
159 if (epoll_ctl(m_epFd, EPOLL_CTL_DEL, data->fd, nullptr) != 0) {
160 FFRT_LOGI("epoll_ctl fd = %d errorno = %d", data->fd, errno);
161 continue;
162 }
163
164 auto task = reinterpret_cast<CPUEUTask *>(data->data);
165 if (ThreadNotifyMode(task)) {
166 std::unique_lock<std::mutex> lck(task->mutex_);
167 if (BlockThread(task)) {
168 task->blockType = BlockType::BLOCK_COROUTINE;
169 }
170 reinterpret_cast<SCPUEUTask*>(task)->waitCond_.notify_one();
171 } else {
172 CoRoutineFactory::CoWakeFunc(task, CoWakeType::NO_TIMEOUT_WAKE);
173 }
174 }
175 }
176 }
177
ffrt_wait_fd(int fd)178 void ffrt_wait_fd(int fd)
179 {
180 ffrt::FFRTFacade::GetIoPPInstance().WaitFdEvent(fd);
181 }