• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2022 Huawei Device Co., Ltd.
3  * Licensed under the Apache License, Version 2.0 (the "License");
4  * you may not use this file except in compliance with the License.
5  * You may obtain a copy of the License at
6  *
7  *     http://www.apache.org/licenses/LICENSE-2.0
8  *
9  * Unless required by applicable law or agreed to in writing, software
10  * distributed under the License is distributed on an "AS IS" BASIS,
11  * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12  * See the License for the specific language governing permissions and
13  * limitations under the License.
14  */
15 
16 #include "epoll.h"
17 #include "spunge.h"
18 #include "spunge_app.h"
19 #include "socket_common.h"
20 #include "res.h"
21 
22 #ifdef __cplusplus
23 extern "C" {
24 #endif
25 
26 /*
27  * Search the file inside the eventpoll hash. It add usage count to
28  * the returned item, so the caller must call ep_release_epitem()
29  * after finished using the "struct EpItem".
30  */
EpFind(struct EventPoll * ep,FILLP_INT fd)31 static struct EpItem *EpFind(struct EventPoll *ep, FILLP_INT fd)
32 {
33     struct RbNode *rbp = FILLP_NULL_PTR;
34     struct EpItem *epi = FILLP_NULL_PTR;
35     struct EpItem *ret = FILLP_NULL_PTR;
36 
37     FILLP_UINT loopLimit = g_spunge->resConf.maxEpollItemNum;
38 
39     for (rbp = ep->rbr.rbNode; rbp && loopLimit; loopLimit--) {
40         epi = EpItemEntryRbNode(rbp);
41         if (fd > epi->fileDespcriptor) {
42             rbp = rbp->rbRight;
43         } else if (fd < epi->fileDespcriptor) {
44             rbp = rbp->rbLeft;
45         } else {
46             /* Find it */
47             ret = epi;
48             break;
49         }
50     }
51 
52     return ret;
53 }
54 
55 /*
56  * insert epitem to eventpoll->rbr
57  */
EpRbtreeInsert(struct EventPoll * ep,struct EpItem * epi)58 static void EpRbtreeInsert(struct EventPoll *ep, struct EpItem *epi)
59 {
60     struct RbNode **p = &ep->rbr.rbNode;
61     struct RbNode *parent = FILLP_NULL_PTR;
62     struct EpItem *epic = FILLP_NULL_PTR;
63     FILLP_UINT loopLimit = g_spunge->resConf.maxEpollItemNum;
64 
65     while (*p && loopLimit--) {
66         parent = *p;
67         epic = EpItemEntryRbNode(parent);
68         if (epi->fileDespcriptor > epic->fileDespcriptor) {
69             p = &parent->rbRight;
70         } else {
71             p = &parent->rbLeft;
72         }
73     }
74 
75     epi->rbn.rbLeft = epi->rbn.rbRight = FILLP_NULL_PTR;
76     epi->rbn.rbParent = parent;
77 
78     epi->rbn.color = RB_RED;
79 
80     *p = &epi->rbn;
81 
82     FillpRbInsertColor(&epi->rbn, &ep->rbr);
83 
84     return;
85 }
86 
87 /*
88  * Add epitem to sock->epoll_taskList
89  * epi is application pointer
90  */
EpollAddToSockWaitList(struct FtSocket * sock,struct EpItem * epi)91 static void EpollAddToSockWaitList(struct FtSocket *sock, struct EpItem *epi)
92 {
93     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
94         FILLP_LOGERR("Sem Wait fail");
95         return;
96     }
97     HlistAddTail(&sock->epTaskList, &epi->sockWaitNode);
98     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
99 
100     return;
101 }
102 
103 /* Check and triggle the event when do epoll ctl */
EpollCtlTriggleEvent(struct EventPoll * ep,struct FtSocket * sock,struct EpItem * epi)104 static void EpollCtlTriggleEvent(
105     struct EventPoll *ep,
106     struct FtSocket *sock,
107     struct EpItem *epi)
108 {
109     epi->revents = 0;
110     if (SYS_ARCH_ATOMIC_READ(&sock->rcvEvent) > 0) {
111         epi->revents |= SPUNGE_EPOLLIN;
112     }
113 
114     if ((SYS_ARCH_ATOMIC_READ(&sock->sendEvent) != 0) && (SYS_ARCH_ATOMIC_READ(&sock->sendEventCount) > 0)) {
115         epi->revents |= SPUNGE_EPOLLOUT;
116     }
117 
118     epi->revents |= (FILLP_UINT32)sock->errEvent;
119     epi->revents &= epi->event.events;
120 
121     if (epi->revents > 0) {
122         EpSocketReady(ep, epi);
123     }
124 }
125 
EpollMallocEpitem(void)126 static struct EpItem *EpollMallocEpitem(void)
127 {
128     struct EpItem *epi = FILLP_NULL_PTR;
129     FILLP_INT ret = DympAlloc(g_spunge->epitemPool, (void **)&epi, FILLP_FALSE);
130     if ((ret != ERR_OK) || (epi == FILLP_NULL_PTR)) {
131         FILLP_LOGERR("MP_MALLOC epoll failed.");
132         return FILLP_NULL_PTR;
133     }
134 
135     epi->rbn.rbParent = &(epi->rbn);
136     epi->fileDespcriptor = -1;
137     epi->ep = FILLP_NULL_PTR;
138     epi->revents = FILLP_NULL_NUM;
139     HLIST_INIT_NODE(&epi->rdlNode);
140     HLIST_INIT_NODE(&epi->sockWaitNode);
141 
142     return epi;
143 }
144 
145 /*
146  * Modify the interest event mask by dropping an event if the new mask
147  * has a match in the current file status.
148  */
EpModify(struct EventPoll * ep,struct FtSocket * sock,struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)149 static FILLP_INT EpModify(
150     struct EventPoll *ep,
151     struct FtSocket *sock,
152     struct EpItem *epi,
153     FILLP_CONST struct SpungeEpollEvent *event)
154 {
155     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
156         FILLP_LOGERR("Sem wait fail");
157         SET_ERRNO(FILLP_EBUSY);
158         return ERR_COMM;
159     }
160     (void)memcpy_s(&epi->event, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
161     EpollCtlTriggleEvent(ep, sock, epi);
162     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
163     return FILLP_OK;
164 }
165 
166 /*
167   Unlink the "struct EpItem" from all places it might have been hooked up.
168   remove epitem from eventpoll->rbn
169 
170  Comment 1:
171   It can happen that this one is called for an item already unlinked.
172   The check protect us from doing a double unlink ( crash ).
173 
174  Comment 2:
175   Clear the event mask for the unlinked item. This will avoid item
176   notifications to be sent after the unlink operation from inside
177   the kernel->userspace event transfer loop.
178 
179  Comment 3:
180   At this point is safe to do the job, unlink the item from our rb-tree.
181   This operation togheter with the above check closes the door to
182   double unlinks.
183 
184  Comment 4:
185   If the item we are going to remove is inside the ready file descriptors
186   we want to remove it from this list to avoid stale events.
187  */
EpUnlink(struct EventPoll * ep,struct EpItem * epi)188 static FILLP_INT EpUnlink(struct EventPoll *ep, struct EpItem *epi)
189 {
190     /* Comment 1 */
191     if (epi->rbn.rbParent == &(epi->rbn)) {
192         FILLP_LOGERR("struct EpItem already unlinked.");
193         SET_ERRNO(FILLP_EINVAL);
194         return ERR_FAILURE;
195     }
196 
197     /* Comment 2 */
198     epi->event.events = 0;
199 
200     /* Comment 3 */
201     FillpRbErase(&epi->rbn, &ep->rbr);
202 
203     /* Comment 4 */
204     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
205         FILLP_LOGERR("Sem Wait fail");
206         SET_ERRNO(FILLP_EBUSY);
207         return ERR_COMM;
208     }
209 
210     epi->revents = FILLP_NULL_NUM;
211     EpDelRdlnode(ep, epi);
212 
213     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
214 
215     return FILLP_OK;
216 }
217 
218 /*
219  * Removes a "struct EpItem" from the eventpoll hash and deallocates
220  * all the associated resources.
221  * epi is application pointer
222  */
EpRemove(struct EventPoll * ep,struct EpItem * epi)223 static FILLP_INT EpRemove(struct EventPoll *ep, struct EpItem *epi)
224 {
225     FILLP_INT error;
226     FILLP_INT fd;
227     struct FtSocket *sock = FILLP_NULL_PTR;
228     struct HlistNode *node = FILLP_NULL_PTR;
229 
230     if ((ep == FILLP_NULL_PTR) || (epi == FILLP_NULL_PTR)) {
231         FILLP_LOGERR("EpRemove: Inavild parameters passed.");
232         SET_ERRNO(FILLP_EINVAL);
233         return ERR_NULLPTR;
234     }
235 
236     fd = epi->fileDespcriptor;
237 
238     /* For the given fd, already validation is present in upper function
239        SpungeEpollCtl. So no need to validate again for ori_sock
240 
241        FtEpollCtl->SpungeEpollCtl->EpRemove/EpInsert
242     */
243     sock = SockGetSocket(fd);
244     if ((sock == FILLP_NULL_PTR) || (sock->allocState == SOCK_ALLOC_STATE_EPOLL)) {
245         FILLP_LOGERR("EpRemove: SockGetSocket failed.");
246         SET_ERRNO(FILLP_EBADF);
247         return ERR_COMM;
248     }
249 
250     if (SYS_ARCH_SEM_WAIT(&(sock->epollTaskListLock))) {
251         FILLP_LOGERR("sem wait fail");
252         SET_ERRNO(FILLP_EBUSY);
253         return ERR_COMM;
254     }
255     node = HLIST_FIRST(&sock->epTaskList);
256     while (node != FILLP_NULL_PTR) {
257         if (node == &epi->sockWaitNode) {
258             HlistDelete(&sock->epTaskList, node);
259             break;
260         }
261         node = node->next;
262     }
263     (void)SYS_ARCH_SEM_POST(&(sock->epollTaskListLock));
264 
265     /* Really unlink the item from the hash */
266     error = EpUnlink(ep, epi);
267     if (error != ERR_OK) {
268         return error;
269     }
270 
271     DympFree(epi);
272 
273     return FILLP_OK;
274 }
275 
EpGetEventsAndSignal(struct EventPoll * ep,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_SLONG timeout)276 static FILLP_INT EpGetEventsAndSignal(
277     struct EventPoll *ep,
278     struct SpungeEpollEvent *events,
279     FILLP_INT maxEvents,
280     FILLP_SLONG timeout)
281 {
282     FILLP_INT eventCount = 0;
283     struct HlistNode *node = FILLP_NULL_PTR;
284     struct EpItem *epi = FILLP_NULL_PTR;
285 
286     if (SYS_ARCH_SEM_WAIT(&ep->appSem)) {
287         FILLP_LOGERR("app-sem wait fail");
288         return ERR_COMM;
289     }
290     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
291         FILLP_LOGERR("core-sem wait fail");
292         (void)SYS_ARCH_SEM_POST(&ep->appSem);
293         return ERR_COMM;
294     }
295     node = HLIST_FIRST(&ep->rdList);
296     while ((node != FILLP_NULL_PTR) && (eventCount < maxEvents)) {
297         epi = EpItemEntryRdlNode(node);
298         node = node->next;
299 
300         epi->revents &= epi->event.events;
301         EpollUpdateEpEvent(epi);
302 
303         if (epi->revents > 0) {
304             events[eventCount].events = epi->revents;
305             (void)memcpy_s(&events[eventCount].data, sizeof(events[eventCount].data), &epi->event.data,
306                 sizeof(epi->event.data));
307             eventCount++;
308         }
309 
310         /* Check if event is present or not, if present report to application otherwise remove */
311         if ((epi->revents == 0) || (epi->event.events & SPUNGE_EPOLLET)) {
312             EpDelRdlnode(ep, epi);
313         }
314     }
315 
316     if ((timeout != 0) && (eventCount == 0)) {
317         /* caller will wait for signal in this case, so set set signal variable under appCoreSem sem */
318         (void)SYS_ARCH_ATOMIC_SET(&ep->semSignalled, 0);
319     }
320 
321     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
322     (void)SYS_ARCH_SEM_POST(&ep->appSem);
323     if (eventCount > 0) {
324         FILLP_LOGDBG("Get eventCount:%d", eventCount);
325     }
326     return eventCount;
327 }
328 
EpPoll(struct FtSocket * sock,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_SLONG timeout)329 static FILLP_INT EpPoll(
330     struct FtSocket *sock,
331     struct SpungeEpollEvent *events,
332     FILLP_INT maxEvents,
333     FILLP_SLONG timeout)
334 {
335     FILLP_INT eventCount = 0;
336     FILLP_INT semTimedWait;
337     FILLP_LLONG begintime = 0;
338     FILLP_LLONG endtime;
339     FILLP_UCHAR isTakenBeginTs = 0;
340     FILLP_BOOL needLoopNun = FILLP_TRUE;
341     FILLP_SLONG timeoutBkp = timeout;
342     FILLP_SLONG timeoutWork = timeout;
343     struct EventPoll *ep = sock->eventEpoll;
344 
345     /*
346     * We don't have any available event to return to the caller.
347     * We need to sleep here, and we will be wake up by
348     * ep_poll_callback() when events will become available.
349 
350       Here we do not acquire rdlock, there is diffciulty to handle this. If we
351       lock it in the function, and pass the timeout as -1 then it will result in
352       deadlock as the core thread will not get the lock and update the readylist.
353       Also the FtEpollWait is running in another thread, the check here is
354       performs only reading and validate for NULL, hence the wait lock is not
355       acquired. Acquire lock here also might reduce performance
356     */
357     while (needLoopNun == FILLP_TRUE) {
358         if (sock->allocState == SOCK_ALLOC_STATE_EPOLL_TO_CLOSE) {
359             FILLP_LOGERR("epFd will be destroyed, so return");
360             return 0;
361         }
362 
363         eventCount = EpGetEventsAndSignal(ep, events, maxEvents, timeoutBkp);
364         if (eventCount) {
365             break;
366         }
367         if (timeoutBkp == -1) {
368             EPOLL_CPU_PAUSE();
369             if (SYS_ARCH_SEM_WAIT(&ep->waitSem)) {
370                 FILLP_LOGERR("ep_wait fail");
371                 return 0;
372             }
373         } else if (timeoutBkp == 0) {
374             break;
375         } else { /* timed wait */
376             if (isTakenBeginTs == 0) {
377                 begintime = SYS_ARCH_GET_CUR_TIME_LONGLONG(); /* microseconds */
378                 isTakenBeginTs = 1;
379             }
380 
381             semTimedWait = SYS_ARCH_SEM_WAIT_TIMEOUT(&ep->waitSem, timeoutWork);
382             endtime = SYS_ARCH_GET_CUR_TIME_LONGLONG();
383             /* timeoutBkp is in milliseconds and SYS_ARCH_GET_CUR_TIME_LONGLONG() is in microseconds */
384             if ((FILLP_UTILS_US2MS(endtime - begintime)) >= timeoutBkp) {
385                 /* Try again if some event is posted or not, as currently we do not check why sem_wait has returned */
386                 eventCount = EpGetEventsAndSignal(ep, events, maxEvents, 0);
387                 (void)semTimedWait;
388 
389                 break;
390             }
391 
392             timeoutWork = (FILLP_SLONG)(timeoutBkp - (FILLP_UTILS_US2MS(endtime - begintime)));
393             continue;
394         }
395     }
396 
397     return eventCount;
398 }
399 
EpollMallocEventpoll()400 static struct EventPoll *EpollMallocEventpoll()
401 {
402     struct EventPoll *ep = FILLP_NULL_PTR;
403     FILLP_INT ret = DympAlloc(g_spunge->eventpollPool, (void **)&ep, FILLP_FALSE);
404     if ((ret != ERR_OK) || (ep == FILLP_NULL_PTR)) {
405         FILLP_LOGERR("EpollMallocEventpoll: MP_MALLOC failed. \r\n");
406         SET_ERRNO(FILLP_ENOMEM);
407         return FILLP_NULL_PTR;
408     }
409 
410     ret = SYS_ARCH_SEM_INIT(&ep->appSem, 1);
411     if (ret != FILLP_OK) {
412         FILLP_LOGERR("EpollMallocEventpoll:socket create epoll semaphore failed. ");
413         DympFree(ep);
414         SET_ERRNO(FILLP_EFAULT);
415         return FILLP_NULL_PTR;
416     }
417 
418     ret = SYS_ARCH_SEM_INIT(&ep->waitSem, 0);
419     if (ret != FILLP_OK) {
420         (void)SYS_ARCH_SEM_DESTROY(&ep->appSem);
421         DympFree(ep);
422         SET_ERRNO(FILLP_EFAULT);
423         return FILLP_NULL_PTR;
424     }
425 
426     ep->rbr.rbNode = FILLP_NULL_PTR;
427     HLIST_INIT(&ep->rdList);
428     ret = SYS_ARCH_SEM_INIT(&ep->appCoreSem, 1);
429     if (ret != FILLP_OK) {
430         (void)SYS_ARCH_SEM_DESTROY(&ep->waitSem);
431         (void)SYS_ARCH_SEM_DESTROY(&ep->appSem);
432         DympFree(ep);
433         SET_ERRNO(FILLP_EFAULT);
434         return FILLP_NULL_PTR;
435     }
436 
437     (void)SYS_ARCH_ATOMIC_SET(&ep->semSignalled, 0);
438     return ep;
439 }
440 
441 /*
442  * Called by epoll_ctl with "add" op
443  */
EpInsert(struct EventPoll * ep,FILLP_CONST struct SpungeEpollEvent * event,FILLP_INT fd)444 static FILLP_INT EpInsert(
445     struct EventPoll *ep,
446     FILLP_CONST struct SpungeEpollEvent *event,
447     FILLP_INT fd)
448 {
449     struct EpItem *epi = FILLP_NULL_PTR;
450 
451     /* If the file is already "ready" we drop it inside the ready list
452        For the given fd, already validation is present in upper function
453        SpungeEpollCtl. So no need to validate again for ori_sock
454        FtEpollCtl->SpungeEpollCtl->EpRemove/EpInsert
455     */
456     struct FtSocket *sock = SockGetSocket(fd);
457     if (sock == FILLP_NULL_PTR) {
458         SET_ERRNO(FILLP_EBADF);
459         FILLP_LOGERR("SockGetSocket returns NULL, fillp_sock_id:%d", fd);
460         return ERR_NO_SOCK;
461     }
462 
463     if (sock->allocState == SOCK_ALLOC_STATE_EPOLL) {
464         FILLP_LOGERR("Epoll socket not supported, fillp_sock_id:%d", fd);
465         SET_ERRNO(FILLP_EBADF);
466         return ERR_NO_SOCK;
467     }
468 
469     epi = EpollMallocEpitem();
470 
471     if (epi == FILLP_NULL_PTR) {
472         FILLP_LOGERR("EpollMallocEpitem returns NULL.");
473         SET_ERRNO(FILLP_ENOMEM);
474         return ERR_NULLPTR;
475     }
476 
477     epi->ep = ep;
478     (void)memcpy_s(&epi->event, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
479     epi->fileDespcriptor = fd;
480 
481     EpRbtreeInsert(ep, epi);
482     /* add to fd wait queue */
483     EpollAddToSockWaitList(sock, epi);
484 
485     if (SYS_ARCH_SEM_WAIT(&ep->appCoreSem)) {
486         FILLP_LOGERR("Fail to wait appCoreSem");
487         SET_ERRNO(FILLP_EBUSY);
488         return ERR_COMM;
489     }
490     EpollCtlTriggleEvent(ep, sock, epi);
491     (void)SYS_ARCH_SEM_POST(&ep->appCoreSem);
492 
493     return FILLP_OK;
494 }
495 
SpungeGetEpollSocketByFd(FILLP_INT epFd)496 static struct FtSocket *SpungeGetEpollSocketByFd(FILLP_INT epFd)
497 {
498     struct FtSocket *epollSock = SockGetSocket(epFd);
499     if (epollSock == FILLP_NULL_PTR) {
500         FILLP_LOGERR("SpungeEpollCtl: SockGetSocket failed.");
501         SET_ERRNO(FILLP_EBADF);
502         return FILLP_NULL_PTR;
503     }
504 
505     if (SYS_ARCH_RWSEM_TRYRDWAIT(&epollSock->sockConnSem) != ERR_OK) {
506         FILLP_LOGERR("Socket-%d state is changing,maybe closing ", epFd);
507         SET_ERRNO(FILLP_EBUSY);
508         return FILLP_NULL_PTR;
509     }
510 
511     if (epollSock->allocState != SOCK_ALLOC_STATE_EPOLL) {
512         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
513         FILLP_LOGERR("SpungeEpollCtl: epoll socket state is incorrect for epoll sock Id=%d , state=%d",
514             epFd, epollSock->allocState);
515         SET_ERRNO(FILLP_ENOTSOCK);
516         return FILLP_NULL_PTR;
517     }
518 
519     if (epollSock->eventEpoll == FILLP_NULL_PTR) {
520         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
521         FILLP_LOGERR("SpungeEpollCtl: epollSock->eventEpoll is null. ");
522 
523         SET_ERRNO(FILLP_EINVAL);
524         return FILLP_NULL_PTR;
525     }
526 
527     return epollSock;
528 }
529 
SpungeEpollCtlCheckSockValid(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT fd)530 static FILLP_INT SpungeEpollCtlCheckSockValid(struct FtSocket *epollSock, struct FtSocket *sock, FILLP_INT fd)
531 {
532     if (SYS_ARCH_RWSEM_TRYRDWAIT(&sock->sockConnSem) != ERR_OK) {
533         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
534         FILLP_LOGERR("Socket-%d state is changing,maybe closing ", fd);
535         SET_ERRNO(FILLP_EBUSY);
536         return -1;
537     }
538 
539     if ((sock->allocState != SOCK_ALLOC_STATE_COMM) && (sock->allocState != SOCK_ALLOC_STATE_WAIT_TO_CLOSE)) {
540         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
541         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
542         FILLP_LOGERR("SpungeEpollCtl: socket stat is wrong ");
543         if (sock->allocState == SOCK_ALLOC_STATE_EPOLL) {
544             SET_ERRNO(FILLP_EINVAL);
545         } else {
546             SET_ERRNO(FILLP_EBADF);
547         }
548 
549         return -1;
550     }
551 
552     if (SYS_ARCH_SEM_WAIT(&epollSock->eventEpoll->appSem)) {
553         FILLP_LOGERR("sem-wait fail");
554         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
555         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
556         SET_ERRNO(FILLP_EBUSY);
557         return -1;
558     }
559 
560     return ERR_OK;
561 }
562 
SpungeEpollCtlHandleAddEvent(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT epFd,struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)563 static FILLP_INT SpungeEpollCtlHandleAddEvent(
564     struct FtSocket *epollSock,
565     struct FtSocket *sock,
566     FILLP_INT epFd,
567     struct EpItem *epi,
568     FILLP_CONST struct SpungeEpollEvent *event)
569 {
570     FILLP_INT error = 0;
571     struct SpungeEpollEvent epds;
572 
573     if (epi != FILLP_NULL_PTR) {
574         SET_ERRNO(FILLP_EEXIST);
575         return -1;
576     }
577 
578     /* It means, that A ft-socket can be registered up to 10 epoll instances, not
579           more than that. This value is compile config controlled.
580     */
581     if (sock->associatedEpollInstanceIdx >= FILLP_NUM_OF_EPOLL_INSTANCE_SUPPORTED) {
582         FILLP_LOGERR("already added too much socket, sock->associatedEpollInstanceIdx:%u",
583             sock->associatedEpollInstanceIdx);
584         SET_ERRNO(FILLP_ENOMEM);
585         return -1;
586     }
587 
588     (void)memset_s(&epds, sizeof(struct SpungeEpollEvent), 0, sizeof(struct SpungeEpollEvent));
589     (void)memcpy_s(&epds, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
590     epds.events |= ((FILLP_UINT32)SPUNGE_EPOLLERR | (FILLP_UINT32)SPUNGE_EPOLLHUP);
591 
592     error = EpInsert(epollSock->eventEpoll, &epds, sock->index);
593     if (error != ERR_OK) {
594         return -1;
595     }
596     (void)SYS_ARCH_ATOMIC_INC(&sock->epollWaiting, 1);
597 
598     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
599         FILLP_LOGERR("tasklock fail");
600         SET_ERRNO(FILLP_EBUSY);
601         return -1;
602     }
603     sock->associatedEpollInstanceArr[sock->associatedEpollInstanceIdx++] = epFd;
604     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
605 
606     return ERR_OK;
607 }
608 
SpungeEpollCtlHandleDelEvent(struct FtSocket * epollSock,struct FtSocket * sock,FILLP_INT epFd,struct EpItem * epi)609 static FILLP_INT SpungeEpollCtlHandleDelEvent(
610     struct FtSocket *epollSock,
611     struct FtSocket *sock,
612     FILLP_INT epFd,
613     struct EpItem *epi)
614 {
615     FILLP_INT error;
616 
617     if (epi == FILLP_NULL_PTR) {
618         SET_ERRNO(FILLP_ENOENT);
619         return -1;
620     }
621 
622     error = EpRemove(epollSock->eventEpoll, epi);
623     if (error != ERR_OK) {
624         return -1;
625     }
626     (void)SYS_ARCH_ATOMIC_DEC(&sock->epollWaiting, 1);
627 
628     if (SYS_ARCH_SEM_WAIT(&sock->epollTaskListLock)) {
629         FILLP_LOGERR("Wait epoll tasklist fail");
630         SET_ERRNO(FILLP_EBUSY);
631         return -1;
632     }
633     SpungeDelEpInstFromFtSocket(sock, epFd);
634     (void)SYS_ARCH_SEM_POST(&sock->epollTaskListLock);
635 
636     return ERR_OK;
637 }
638 
SpungeEpollCtlHandleModEvent(struct FtSocket * epollSock,struct FtSocket * sock,struct EpItem * epi,FILLP_CONST struct SpungeEpollEvent * event)639 static FILLP_INT SpungeEpollCtlHandleModEvent(
640     struct FtSocket *epollSock,
641     struct FtSocket *sock,
642     struct EpItem *epi,
643     FILLP_CONST struct SpungeEpollEvent *event)
644 {
645     struct SpungeEpollEvent epds;
646     FILLP_INT error;
647 
648     if (epi == FILLP_NULL_PTR) {
649         SET_ERRNO(FILLP_ENOENT);
650         return -1;
651     }
652 
653     (void)memset_s(&epds, sizeof(struct SpungeEpollEvent), 0, sizeof(struct SpungeEpollEvent));
654     (void)memcpy_s(&epds, sizeof(struct SpungeEpollEvent), event, sizeof(struct SpungeEpollEvent));
655     epds.events |= ((FILLP_UINT32)SPUNGE_EPOLLERR | (FILLP_UINT32)SPUNGE_EPOLLHUP);
656     error = EpModify(epollSock->eventEpoll, sock, epi, &epds);
657     if (error != ERR_OK) {
658         return -1;
659     }
660 
661     return ERR_OK;
662 }
663 
SpungeEpollCtlParaChk(FILLP_INT epFd,FILLP_INT op,FILLP_INT fd,FILLP_CONST struct SpungeEpollEvent * event)664 static FILLP_INT SpungeEpollCtlParaChk(FILLP_INT epFd, FILLP_INT op, FILLP_INT fd,
665     FILLP_CONST struct SpungeEpollEvent *event)
666 {
667     /* For SPUNGE_EPOLL_CTL_DEL: Old kernels do not check the 'event' NULL case */
668     if (((op == SPUNGE_EPOLL_CTL_ADD) || (op == SPUNGE_EPOLL_CTL_MOD)) && (event == FILLP_NULL_PTR)) {
669         FILLP_LOGERR("SpungeEpollCtl: 'event' param is NULL");
670         SET_ERRNO(FILLP_EFAULT);
671         return -1;
672     }
673 
674     if (event != FILLP_NULL_PTR) {
675         FILLP_LOGINF("epFd:%d,op:%d,fillp_sock_id:%d,event->events:%x,event->u64:%llx",
676             epFd, op, fd, event->events, event->data.u64);
677         FILLP_LOGINF("sizeof(event):%zu, sizeof(evnent->events):%zu, sizeof(data):%zu",
678             sizeof(*event), sizeof(event->events), sizeof(event->data));
679     } else {
680         FILLP_LOGWAR("epFd:%d,op:%d,fillp_sock_id:%d,event null", epFd, op, fd);
681     }
682     return 0;
683 }
684 
SpungeEpollCtl(FILLP_INT epFd,FILLP_INT op,FILLP_INT fd,FILLP_CONST struct SpungeEpollEvent * event)685 FILLP_INT SpungeEpollCtl(FILLP_INT epFd, FILLP_INT op, FILLP_INT fd, FILLP_CONST struct SpungeEpollEvent *event)
686 {
687     struct FtSocket *epollSock = FILLP_NULL_PTR;
688     struct FtSocket *sock = FILLP_NULL_PTR;
689     struct EpItem *epi = FILLP_NULL_PTR;
690     FILLP_INT error;
691 
692     if (SpungeEpollCtlParaChk(epFd, op, fd, event) != 0) {
693         return -1;
694     }
695 
696     /* Get the epoll instance socket ID */
697     epollSock = SpungeGetEpollSocketByFd(epFd);
698     if (epollSock == FILLP_NULL_PTR) {
699         return -1;
700     }
701 
702     sock = SockGetSocket(fd);
703     if (sock == FILLP_NULL_PTR) {
704         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
705         FILLP_LOGERR("SpungeEpollCtl: SockGetSocket failed.");
706         SET_ERRNO(FILLP_EBADF);
707         return -1;
708     }
709 
710     error = SpungeEpollCtlCheckSockValid(epollSock, sock, fd);
711     if (error != ERR_OK) {
712         return -1;
713     }
714 
715     epi = EpFind(epollSock->eventEpoll, fd);
716 
717     switch (op) {
718         case SPUNGE_EPOLL_CTL_ADD:
719             error = SpungeEpollCtlHandleAddEvent(epollSock, sock, epFd, epi, event);
720             break;
721         case SPUNGE_EPOLL_CTL_DEL:
722             error = SpungeEpollCtlHandleDelEvent(epollSock, sock, epFd, epi);
723             break;
724         case SPUNGE_EPOLL_CTL_MOD:
725             error = SpungeEpollCtlHandleModEvent(epollSock, sock, epi, event);
726             break;
727         default:
728             SET_ERRNO(FILLP_EINVAL);
729             error = -1;
730             break;
731     }
732 
733     (void)SYS_ARCH_SEM_POST(&epollSock->eventEpoll->appSem);
734     (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
735     (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
736     FILLP_LOGDBG("return value:%d", error);
737     return error;
738 }
739 
SpungeEpollFindRemove(FILLP_INT epFd,FILLP_INT fd)740 FILLP_INT SpungeEpollFindRemove(FILLP_INT epFd, FILLP_INT fd)
741 {
742     struct FtSocket *sock = FILLP_NULL_PTR;
743     struct EpItem *epi = FILLP_NULL_PTR;
744 
745     /* Get the epoll instance socket ID */
746     struct FtSocket *epollSock = SockGetSocket(epFd);
747     if (epollSock == FILLP_NULL_PTR) {
748         FILLP_LOGERR("SpungeEpollFindRemove: SockGetSocket failed.");
749         SET_ERRNO(FILLP_EBADF);
750         return ERR_PARAM;
751     }
752 
753     if (SYS_ARCH_RWSEM_TRYRDWAIT(&epollSock->sockConnSem) != ERR_OK) {
754         FILLP_LOGERR("SpungeEpollFindRemove: Socket-%d state is changing,maybe closing", epFd);
755         SET_ERRNO(FILLP_EBUSY);
756         return ERR_COMM;
757     }
758 
759     if (epollSock->allocState != SOCK_ALLOC_STATE_EPOLL) {
760         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
761         FILLP_LOGWAR("SpungeEpollFindRemove: epoll socket state is incorrect for epoll sock Id=%d , state=%d\r\n",
762             epFd, epollSock->allocState);
763         return ERR_PARAM;
764     }
765 
766     if (epollSock->eventEpoll == FILLP_NULL_PTR) {
767         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
768         FILLP_LOGERR("SpungeEpollFindRemove: epollSock->eventEpoll is null.");
769         return ERR_NULLPTR;
770     }
771 
772     sock = SockGetSocket(fd);
773     if (sock == FILLP_NULL_PTR) {
774         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
775         FILLP_LOGERR("SpungeEpollFindRemove: SockGetSocket failed.");
776         SET_ERRNO(FILLP_EBADF);
777         return ERR_PARAM;
778     }
779 
780     if (SYS_ARCH_SEM_WAIT(&epollSock->eventEpoll->appSem)) {
781         FILLP_LOGERR("Error to wait appSem");
782         (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
783         return ERR_COMM;
784     }
785 
786     epi = EpFind(epollSock->eventEpoll, fd);
787     if (epi != FILLP_NULL_PTR) {
788         (void)EpRemove(epollSock->eventEpoll, epi);
789         (void)SYS_ARCH_ATOMIC_DEC(&sock->epollWaiting, 1);
790         SpungeDelEpInstFromFtSocket(sock, epFd);
791     }
792 
793     (void)SYS_ARCH_SEM_POST(&epollSock->eventEpoll->appSem);
794     (void)SYS_ARCH_RWSEM_RDPOST(&epollSock->sockConnSem);
795     return ERR_OK;
796 }
797 
SpungeEpollWait(FILLP_INT epFd,struct SpungeEpollEvent * events,FILLP_INT maxEvents,FILLP_INT timeout)798 FILLP_INT SpungeEpollWait(FILLP_INT epFd, struct SpungeEpollEvent *events, FILLP_INT maxEvents, FILLP_INT timeout)
799 {
800     FILLP_INT num;
801     struct FtSocket *sock;
802     FILLP_INT ret;
803     sock = SockGetSocket(epFd);
804     if (sock == FILLP_NULL_PTR) {
805         FILLP_LOGERR("SpungeEpollWait: SockGetSocket failed. ");
806         SET_ERRNO(FILLP_EBADF);
807         return -1;
808     }
809 
810     ret = SYS_ARCH_RWSEM_TRYRDWAIT(&sock->sockConnSem);
811     if (ret != ERR_OK) {
812         FILLP_LOGERR("Socket-%d state is changing,maybe closing", epFd);
813         SET_ERRNO(FILLP_EBUSY);
814         return -1;
815     }
816 
817     if ((sock->allocState != SOCK_ALLOC_STATE_EPOLL) || (sock->eventEpoll == FILLP_NULL_PTR)) {
818         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
819         FILLP_LOGERR("SpungeEpollWait: allocState is not epoll or eventEpoll is NULL. ");
820 
821         SET_ERRNO(FILLP_ENOTSOCK);
822         return -1;
823     }
824 
825     /* The maximum number of event must be greater than zero */
826     if ((maxEvents <= 0) || (events == FILLP_NULL_PTR)) {
827         (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
828         FILLP_LOGERR("SpungeEpollWait: The maximum number of event must be greater than zero. ");
829         SET_ERRNO(FILLP_EINVAL);
830         return -1;
831     }
832 
833     num = EpPoll(sock, events, maxEvents, timeout);
834 
835     (void)SYS_ARCH_RWSEM_RDPOST(&sock->sockConnSem);
836     return num;
837 }
838 
SpungeEpollCreate(void)839 FILLP_INT SpungeEpollCreate(void)
840 {
841     struct FtSocket *sock = SpungeAllocSock(SOCK_ALLOC_STATE_EPOLL);
842     struct EventPoll *ep = FILLP_NULL_PTR;
843 
844     FILLP_LOGINF("create epoll");
845 
846     if (sock == FILLP_NULL_PTR) {
847         FILLP_LOGERR("SpungeEpollCreate: alloc sock failed.");
848         SET_ERRNO(FILLP_ENOMEM);
849         return -1;
850     }
851 
852     ep = EpollMallocEventpoll();
853     if (ep == FILLP_NULL_PTR) {
854         FILLP_LOGINF("Fail to alloc ep");
855         sock->allocState = SOCK_ALLOC_STATE_FREE;
856         SockFreeSocket(sock);
857         return -1;
858     }
859 
860     sock->eventEpoll = ep;
861     sock->isListenSock = FILLP_FALSE;
862     sock->isSockBind = FILLP_FALSE;
863 
864     (void)SYS_ARCH_ATOMIC_SET(&sock->rcvEvent, 0);
865     (void)SYS_ARCH_ATOMIC_SET(&sock->sendEvent, 0);
866     sock->errEvent = 0;
867 
868     (void)SYS_ARCH_ATOMIC_SET(&sock->epollWaiting, 0);
869     HLIST_INIT(&sock->epTaskList);
870 
871     FILLP_LOGINF("create epoll return, epFd:%d", sock->index);
872     return sock->index;
873 }
874 
875 #ifdef __cplusplus
876 }
877 #endif
878