1 /*
2 * Copyright (c) 2021-2022 Huawei Device Co., Ltd. All rights reserved.
3 *
4 * Redistribution and use in source and binary forms, with or without modification,
5 * are permitted provided that the following conditions are met:
6 *
7 * 1. Redistributions of source code must retain the above copyright notice, this list of
8 * conditions and the following disclaimer.
9 *
10 * 2. Redistributions in binary form must reproduce the above copyright notice, this list
11 * of conditions and the following disclaimer in the documentation and/or other materials
12 * provided with the distribution.
13 *
14 * 3. Neither the name of the copyright holder nor the names of its contributors may be used
15 * to endorse or promote products derived from this software without specific prior written
16 * permission.
17 *
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
20 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
22 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
23 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
24 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
25 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
26 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
27 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
28 * ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
29 */
30
31 #include "epoll.h"
32 #include <stdint.h>
33 #include <poll.h>
34 #include <errno.h>
35 #include <string.h>
36 #include "pthread.h"
37
38 /* 100, the number of fd one epollfd can control */
39 #define EPOLL_DEFAULT_SIZE 100
40
41 /* Internal data, used to manage each epoll fd */
42 struct epoll_head {
43 int size;
44 int nodeCount;
45 struct epoll_event *evs;
46 };
47
48 STATIC pthread_mutex_t g_epollMutex = PTHREAD_RECURSIVE_MUTEX_INITIALIZER_NP;
49
50 #ifndef MAX_EPOLL_FD
51 #define MAX_EPOLL_FD CONFIG_EPOLL_DESCRIPTORS
52 #endif
53
54 /* Record the kernel fd of epoll */
55 STATIC fd_set g_epollFdSet;
56
57 /* Record the private data of epoll */
58 STATIC struct epoll_head *g_epPrivBuf[MAX_EPOLL_FD];
59
60 /**
61 * Alloc sysFd, storage epoll private data, set using bit.
62 *
63 * @param maxfdp: Maximum allowed application of sysFd.
64 * @param head: Private data.
65 * @return the index of the new fd; -1 on error
66 */
EpollAllocSysFd(int maxfdp,struct epoll_head * head)67 static int EpollAllocSysFd(int maxfdp, struct epoll_head *head)
68 {
69 int i;
70
71 fd_set *fdset = &g_epollFdSet;
72
73 for (i = 0; i < maxfdp; i++) {
74 if (fdset && !(FD_ISSET(i, fdset))) {
75 FD_SET(i, fdset);
76 if (!g_epPrivBuf[i]) {
77 g_epPrivBuf[i] = head;
78 return i + EPOLL_FD_OFFSET;
79 }
80 }
81 }
82
83 set_errno(EMFILE);
84 return -1;
85 }
86
87 /**
88 * free sysFd, delete epoll private data, clear using bit.
89 *
90 * @param fd: epoll fd.
91 * @return 0 or -1
92 */
EpollFreeSysFd(int fd)93 static int EpollFreeSysFd(int fd)
94 {
95 int efd = fd - EPOLL_FD_OFFSET;
96
97 if ((efd < 0) || (efd >= MAX_EPOLL_FD)) {
98 set_errno(EMFILE);
99 return -1;
100 }
101
102 fd_set *fdset = &g_epollFdSet;
103 if (fdset && FD_ISSET(efd, fdset)) {
104 FD_CLR(efd, fdset);
105 g_epPrivBuf[efd] = NULL;
106 }
107
108 return 0;
109 }
110
111 /**
112 * get private data by epoll fd
113 *
114 * @param fd: epoll fd.
115 * @return point to epoll_head
116 */
EpollGetDataBuff(int fd)117 static struct epoll_head *EpollGetDataBuff(int fd)
118 {
119 int id = fd - EPOLL_FD_OFFSET;
120
121 if ((id < 0) || (id >= MAX_EPOLL_FD)) {
122 return NULL;
123 }
124
125 return g_epPrivBuf[id];
126 }
127
128 /**
129 * when do EPOLL_CTL_ADD, need check if fd exist
130 *
131 * @param epHead: epoll control head, find by epoll id .
132 * @param fd: ctl add fd.
133 * @return 0 or -1
134 */
CheckFdExist(struct epoll_head * epHead,int fd)135 static int CheckFdExist(struct epoll_head *epHead, int fd)
136 {
137 int i;
138 for (i = 0; i < epHead->nodeCount; i++) {
139 if (epHead->evs[i].data.fd == fd) {
140 return -1;
141 }
142 }
143
144 return 0;
145 }
146
147 /**
148 * close epoll
149 *
150 * @param epHead: epoll control head.
151 * @return void
152 */
DoEpollClose(struct epoll_head * epHead)153 static VOID DoEpollClose(struct epoll_head *epHead)
154 {
155 if (epHead != NULL) {
156 if (epHead->evs != NULL) {
157 free(epHead->evs);
158 }
159
160 free(epHead);
161 }
162
163 return;
164 }
165
166 /**
167 * epoll_create unsupported api
168 *
169 * epoll_create is implemented by calling epoll_create1, it's parameter 'size' is useless.
170 *
171 * epoll_create1,
172 * The simple version of epoll does not use red-black trees,
173 * so when fd is normal value (greater than 0),
174 * actually allocated epoll can manage num of EPOLL_DEFAULT_SIZE
175 *
176 * @param flags: not actually used
177 * @return epoll fd
178 */
epoll_create1(int flags)179 int epoll_create1(int flags)
180 {
181 (void)flags;
182 int fd = -1;
183
184 struct epoll_head *epHead = (struct epoll_head *)malloc(sizeof(struct epoll_head));
185 if (epHead == NULL) {
186 set_errno(ENOMEM);
187 return fd;
188 }
189
190 /* actually allocated epoll can manage num is EPOLL_DEFAULT_SIZE */
191 epHead->size = EPOLL_DEFAULT_SIZE;
192 epHead->nodeCount = 0;
193 epHead->evs = malloc(sizeof(struct epoll_event) * EPOLL_DEFAULT_SIZE);
194 if (epHead->evs == NULL) {
195 free(epHead);
196 set_errno(ENOMEM);
197 return fd;
198 }
199
200 /* fd set, get sysfd, for close */
201 (VOID)pthread_mutex_lock(&g_epollMutex);
202 fd = EpollAllocSysFd(MAX_EPOLL_FD, epHead);
203 if (fd == -1) {
204 (VOID)pthread_mutex_unlock(&g_epollMutex);
205 DoEpollClose(epHead);
206 set_errno(EMFILE);
207 return fd;
208 }
209 (VOID)pthread_mutex_unlock(&g_epollMutex);
210 return fd;
211 }
212
213 /**
214 * epoll_close,
215 * called by close
216 * @param epfd: epoll fd
217 * @return 0 or -1
218 */
epoll_close(int epfd)219 int epoll_close(int epfd)
220 {
221 struct epoll_head *epHead = NULL;
222
223 (VOID)pthread_mutex_lock(&g_epollMutex);
224 epHead = EpollGetDataBuff(epfd);
225 if (epHead == NULL) {
226 (VOID)pthread_mutex_unlock(&g_epollMutex);
227 set_errno(EBADF);
228 return -1;
229 }
230
231 DoEpollClose(epHead);
232 int ret = EpollFreeSysFd(epfd);
233 (VOID)pthread_mutex_unlock(&g_epollMutex);
234 return ret;
235 }
236
epoll_ctl(int epfd,int op,int fd,struct epoll_event * ev)237 int epoll_ctl(int epfd, int op, int fd, struct epoll_event *ev)
238 {
239 struct epoll_head *epHead = NULL;
240 int i;
241 int ret = -1;
242
243 (VOID)pthread_mutex_lock(&g_epollMutex);
244 epHead = EpollGetDataBuff(epfd);
245 if (epHead == NULL) {
246 set_errno(EBADF);
247 goto OUT_RELEASE;
248 }
249
250 if (ev == NULL) {
251 set_errno(EINVAL);
252 goto OUT_RELEASE;
253 }
254
255 switch (op) {
256 case EPOLL_CTL_ADD:
257 ret = CheckFdExist(epHead, fd);
258 if (ret == -1) {
259 set_errno(EEXIST);
260 goto OUT_RELEASE;
261 }
262
263 if (epHead->nodeCount == EPOLL_DEFAULT_SIZE) {
264 set_errno(ENOMEM);
265 goto OUT_RELEASE;
266 }
267
268 epHead->evs[epHead->nodeCount].events = ev->events | POLLERR | POLLHUP;
269 epHead->evs[epHead->nodeCount].data.fd = fd;
270 epHead->nodeCount++;
271 ret = 0;
272 break;
273 case EPOLL_CTL_DEL:
274 for (i = 0; i < epHead->nodeCount; i++) {
275 if (epHead->evs[i].data.fd != fd) {
276 continue;
277 }
278
279 if (i != epHead->nodeCount - 1) {
280 memmove_s(&epHead->evs[i], epHead->nodeCount - i, &epHead->evs[i + 1],
281 epHead->nodeCount - i);
282 }
283 epHead->nodeCount--;
284 ret = 0;
285 goto OUT_RELEASE;
286 }
287 set_errno(ENOENT);
288 break;
289 case EPOLL_CTL_MOD:
290 for (i = 0; i < epHead->nodeCount; i++) {
291 if (epHead->evs[i].data.fd == fd) {
292 epHead->evs[i].events = ev->events | POLLERR | POLLHUP;
293 ret = 0;
294 goto OUT_RELEASE;
295 }
296 }
297 set_errno(ENOENT);
298 break;
299 default:
300 set_errno(EINVAL);
301 break;
302 }
303
304 OUT_RELEASE:
305 (VOID)pthread_mutex_unlock(&g_epollMutex);
306 return ret;
307 }
308
epoll_wait(int epfd,FAR struct epoll_event * evs,int maxevents,int timeout)309 int epoll_wait(int epfd, FAR struct epoll_event *evs, int maxevents, int timeout)
310 {
311 struct epoll_head *epHead = NULL;
312 int ret = -1;
313 int counter;
314 int i;
315 struct pollfd *pFd = NULL;
316 int pollSize;
317
318 (VOID)pthread_mutex_lock(&g_epollMutex);
319 epHead = EpollGetDataBuff(epfd);
320 if (epHead == NULL) {
321 set_errno(EBADF);
322 goto OUT_RELEASE;
323 }
324
325 if ((maxevents <= 0) || (evs == NULL)) {
326 set_errno(EINVAL);
327 goto OUT_RELEASE;
328 }
329
330 if (maxevents > epHead->nodeCount) {
331 pollSize = epHead->nodeCount;
332 } else {
333 pollSize = maxevents;
334 }
335
336 pFd = malloc(sizeof(struct pollfd) * pollSize);
337 if (pFd == NULL) {
338 set_errno(EINVAL);
339 goto OUT_RELEASE;
340 }
341
342 for (i = 0; i < pollSize; i++) {
343 pFd[i].fd = epHead->evs[i].data.fd;
344 pFd[i].events = (short)epHead->evs[i].events;
345 }
346
347
348 ret = poll(pFd, pollSize, timeout);
349 if (ret <= 0) {
350 free(pFd);
351 ret = 0;
352 goto OUT_RELEASE;
353 }
354
355 for (i = 0, counter = 0; i < ret && counter < pollSize; counter++) {
356 if (pFd[counter].revents != 0) {
357 evs[i].data.fd = pFd[counter].fd;
358 evs[i].events = pFd[counter].revents;
359 i++;
360 }
361 }
362
363 free(pFd);
364 ret = i;
365
366 OUT_RELEASE:
367 (VOID)pthread_mutex_unlock(&g_epollMutex);
368 return ret;
369 }
370
371