1 /*
2 * libwebsockets - small server side websockets and web server implementation
3 *
4 * Copyright (C) 2010 - 2020 Andy Green <andy@warmcat.com>
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a copy
7 * of this software and associated documentation files (the "Software"), to
8 * deal in the Software without restriction, including without limitation the
9 * rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
10 * sell copies of the Software, and to permit persons to whom the Software is
11 * furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
19 * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
20 * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
21 * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
22 * IN THE SOFTWARE.
23 */
24
25 #include "private-lib-core.h"
26
27 int
_lws_change_pollfd(struct lws * wsi,int _and,int _or,struct lws_pollargs * pa)28 _lws_change_pollfd(struct lws *wsi, int _and, int _or, struct lws_pollargs *pa)
29 {
30 #if !defined(LWS_WITH_LIBUV) && !defined(LWS_WITH_LIBEV) && \
31 !defined(LWS_WITH_LIBEVENT) && !defined(LWS_WITH_GLIB)
32 volatile struct lws_context_per_thread *vpt;
33 #endif
34 struct lws_context_per_thread *pt;
35 struct lws_context *context;
36 int ret = 0, pa_events;
37 struct lws_pollfd *pfd;
38 int sampled_tid, tid;
39
40 if (!wsi)
41 return 0;
42
43 assert(wsi->position_in_fds_table == LWS_NO_FDS_POS ||
44 wsi->position_in_fds_table >= 0);
45
46 if (wsi->position_in_fds_table == LWS_NO_FDS_POS)
47 return 0;
48
49 if (((volatile struct lws *)wsi)->handling_pollout &&
50 !_and && _or == LWS_POLLOUT) {
51 /*
52 * Happening alongside service thread handling POLLOUT.
53 * The danger is when he is finished, he will disable POLLOUT,
54 * countermanding what we changed here.
55 *
56 * Instead of changing the fds, inform the service thread
57 * what happened, and ask it to leave POLLOUT active on exit
58 */
59 ((volatile struct lws *)wsi)->leave_pollout_active = 1;
60 /*
61 * by definition service thread is not in poll wait, so no need
62 * to cancel service
63 */
64
65 lwsl_debug("%s: using leave_pollout_active\n", __func__);
66
67 return 0;
68 }
69
70 context = wsi->context;
71 pt = &context->pt[(int)wsi->tsi];
72
73 assert(wsi->position_in_fds_table < (int)pt->fds_count);
74
75 #if !defined(LWS_WITH_LIBUV) && !defined(LWS_WITH_LIBEV) && \
76 !defined(LWS_WITH_LIBEVENT) && !defined(LWS_WITH_GLIB)
77 /*
78 * This only applies when we use the default poll() event loop.
79 *
80 * BSD can revert pa->events at any time, when the kernel decides to
81 * exit from poll(). We can't protect against it using locking.
82 *
83 * Therefore we must check first if the service thread is in poll()
84 * wait; if so, we know we must be being called from a foreign thread,
85 * and we must keep a strictly ordered list of changes we made instead
86 * of trying to apply them, since when poll() exits, which may happen
87 * at any time it would revert our changes.
88 *
89 * The plat code will apply them when it leaves the poll() wait
90 * before doing anything else.
91 */
92
93 vpt = (volatile struct lws_context_per_thread *)pt;
94
95 vpt->foreign_spinlock = 1;
96 lws_memory_barrier();
97
98 if (vpt->inside_poll) {
99 struct lws_foreign_thread_pollfd *ftp, **ftp1;
100 /*
101 * We are certainly a foreign thread trying to change events
102 * while the service thread is in the poll() wait.
103 *
104 * Create a list of changes to be applied after poll() exit,
105 * instead of trying to apply them now.
106 */
107 ftp = lws_malloc(sizeof(*ftp), "ftp");
108 if (!ftp) {
109 vpt->foreign_spinlock = 0;
110 lws_memory_barrier();
111 ret = -1;
112 goto bail;
113 }
114
115 ftp->_and = _and;
116 ftp->_or = _or;
117 ftp->fd_index = wsi->position_in_fds_table;
118 ftp->next = NULL;
119
120 lws_pt_lock(pt, __func__);
121
122 /* place at END of list to maintain order */
123 ftp1 = (struct lws_foreign_thread_pollfd **)
124 &vpt->foreign_pfd_list;
125 while (*ftp1)
126 ftp1 = &((*ftp1)->next);
127
128 *ftp1 = ftp;
129 vpt->foreign_spinlock = 0;
130 lws_memory_barrier();
131
132 lws_pt_unlock(pt);
133
134 lws_cancel_service_pt(wsi);
135
136 return 0;
137 }
138
139 vpt->foreign_spinlock = 0;
140 lws_memory_barrier();
141 #endif
142
143 #if !defined(__linux__)
144 /* OSX couldn't see close on stdin pipe side otherwise */
145 _or |= LWS_POLLHUP;
146 #endif
147
148 pfd = &pt->fds[wsi->position_in_fds_table];
149 pa->fd = wsi->desc.sockfd;
150 lwsl_debug("%s: wsi %p: fd %d events %d -> %d\n", __func__, wsi,
151 pa->fd, pfd->events, (pfd->events & ~_and) | _or);
152 pa->prev_events = pfd->events;
153 pa->events = pfd->events = (pfd->events & ~_and) | _or;
154
155 if (wsi->mux_substream)
156 return 0;
157
158 #if defined(LWS_WITH_EXTERNAL_POLL)
159
160 if (wsi->vhost &&
161 wsi->vhost->protocols[0].callback(wsi,
162 LWS_CALLBACK_CHANGE_MODE_POLL_FD,
163 wsi->user_space, (void *)pa, 0)) {
164 ret = -1;
165 goto bail;
166 }
167 #endif
168
169 if (context->event_loop_ops->io) {
170 if (_and & LWS_POLLIN)
171 context->event_loop_ops->io(wsi,
172 LWS_EV_STOP | LWS_EV_READ);
173
174 if (_or & LWS_POLLIN)
175 context->event_loop_ops->io(wsi,
176 LWS_EV_START | LWS_EV_READ);
177
178 if (_and & LWS_POLLOUT)
179 context->event_loop_ops->io(wsi,
180 LWS_EV_STOP | LWS_EV_WRITE);
181
182 if (_or & LWS_POLLOUT)
183 context->event_loop_ops->io(wsi,
184 LWS_EV_START | LWS_EV_WRITE);
185 }
186
187 /*
188 * if we changed something in this pollfd...
189 * ... and we're running in a different thread context
190 * than the service thread...
191 * ... and the service thread is waiting ...
192 * then cancel it to force a restart with our changed events
193 */
194 pa_events = pa->prev_events != pa->events;
195
196 if (pa_events) {
197 if (lws_plat_change_pollfd(context, wsi, pfd)) {
198 lwsl_info("%s failed\n", __func__);
199 ret = -1;
200 goto bail;
201 }
202 sampled_tid = pt->service_tid;
203 if (sampled_tid && wsi->vhost) {
204 tid = wsi->vhost->protocols[0].callback(wsi,
205 LWS_CALLBACK_GET_THREAD_ID, NULL, NULL, 0);
206 if (tid == -1) {
207 ret = -1;
208 goto bail;
209 }
210 if (tid != sampled_tid)
211 lws_cancel_service_pt(wsi);
212 }
213 }
214
215 bail:
216 return ret;
217 }
218
219 #if defined(LWS_WITH_SERVER)
220 /*
221 * Enable or disable listen sockets on this pt globally...
222 * it's modulated according to the pt having space for a new accept.
223 */
224 static void
lws_accept_modulation(struct lws_context * context,struct lws_context_per_thread * pt,int allow)225 lws_accept_modulation(struct lws_context *context,
226 struct lws_context_per_thread *pt, int allow)
227 {
228 struct lws_vhost *vh = context->vhost_list;
229 struct lws_pollargs pa1;
230
231 while (vh) {
232 if (vh->lserv_wsi) {
233 if (allow)
234 _lws_change_pollfd(vh->lserv_wsi,
235 0, LWS_POLLIN, &pa1);
236 else
237 _lws_change_pollfd(vh->lserv_wsi,
238 LWS_POLLIN, 0, &pa1);
239 }
240 vh = vh->vhost_next;
241 }
242 }
243 #endif
244
245 #if defined(_DEBUG)
246 void
__dump_fds(struct lws_context_per_thread * pt,const char * s)247 __dump_fds(struct lws_context_per_thread *pt, const char *s)
248 {
249 unsigned int n;
250
251 lwsl_warn("%s: fds_count %u, %s\n", __func__, pt->fds_count, s);
252
253 for (n = 0; n < pt->fds_count; n++) {
254 struct lws *wsi = wsi_from_fd(pt->context, pt->fds[n].fd);
255
256 lwsl_warn(" %d: fd %d, wsi %p, pos_in_fds: %d\n",
257 n + 1, pt->fds[n].fd, wsi,
258 wsi ? wsi->position_in_fds_table : -1);
259 }
260 }
261 #else
262 #define __dump_fds(x, y)
263 #endif
264
265 int
__insert_wsi_socket_into_fds(struct lws_context * context,struct lws * wsi)266 __insert_wsi_socket_into_fds(struct lws_context *context, struct lws *wsi)
267 {
268 #if defined(LWS_WITH_EXTERNAL_POLL)
269 struct lws_pollargs pa = { wsi->desc.sockfd, LWS_POLLIN, 0 };
270 #endif
271 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
272 int ret = 0;
273
274 // __dump_fds(pt, "pre insert");
275
276 lwsl_debug("%s: %p: tsi=%d, sock=%d, pos-in-fds=%d\n",
277 __func__, wsi, wsi->tsi, wsi->desc.sockfd, pt->fds_count);
278
279 if ((unsigned int)pt->fds_count >= context->fd_limit_per_thread) {
280 lwsl_err("Too many fds (%d vs %d)\n", context->max_fds,
281 context->fd_limit_per_thread );
282 return 1;
283 }
284
285 #if !defined(_WIN32)
286 if (!wsi->context->max_fds_unrelated_to_ulimit &&
287 wsi->desc.sockfd - lws_plat_socket_offset() >= context->max_fds) {
288 lwsl_err("Socket fd %d is too high (%d) offset %d\n",
289 wsi->desc.sockfd, context->max_fds,
290 lws_plat_socket_offset());
291 return 1;
292 }
293 #endif
294
295 assert(wsi);
296 assert(wsi->event_pipe || wsi->vhost);
297 assert(lws_socket_is_valid(wsi->desc.sockfd));
298
299 #if defined(LWS_WITH_EXTERNAL_POLL)
300
301 if (wsi->vhost &&
302 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
303 wsi->user_space, (void *) &pa, 1))
304 return -1;
305 #endif
306
307 if (insert_wsi(context, wsi))
308 return -1;
309 pt->count_conns++;
310 wsi->position_in_fds_table = pt->fds_count;
311
312 pt->fds[wsi->position_in_fds_table].fd = wsi->desc.sockfd;
313 pt->fds[wsi->position_in_fds_table].events = LWS_POLLIN;
314 #if defined(LWS_WITH_EXTERNAL_POLL)
315 pa.events = pt->fds[pt->fds_count].events;
316 #endif
317
318 lws_plat_insert_socket_into_fds(context, wsi);
319
320 #if defined(LWS_WITH_EXTERNAL_POLL)
321
322 /* external POLL support via protocol 0 */
323 if (wsi->vhost &&
324 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_ADD_POLL_FD,
325 wsi->user_space, (void *) &pa, 0))
326 ret = -1;
327 #endif
328 #if defined(LWS_WITH_SERVER)
329 /* if no more room, defeat accepts on this service thread */
330 if ((unsigned int)pt->fds_count == context->fd_limit_per_thread - 1)
331 lws_accept_modulation(context, pt, 0);
332 #endif
333
334 #if defined(LWS_WITH_EXTERNAL_POLL)
335 if (wsi->vhost &&
336 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
337 wsi->user_space, (void *)&pa, 1))
338 ret = -1;
339 #endif
340
341 // __dump_fds(pt, "post insert");
342
343 return ret;
344 }
345
346 int
__remove_wsi_socket_from_fds(struct lws * wsi)347 __remove_wsi_socket_from_fds(struct lws *wsi)
348 {
349 struct lws_context *context = wsi->context;
350 #if defined(LWS_WITH_EXTERNAL_POLL)
351 struct lws_pollargs pa = { wsi->desc.sockfd, 0, 0 };
352 #endif
353 struct lws_context_per_thread *pt = &context->pt[(int)wsi->tsi];
354 struct lws *end_wsi;
355 int v, m, ret = 0;
356
357 // __dump_fds(pt, "pre remove");
358
359 #if !defined(_WIN32)
360 if (!wsi->context->max_fds_unrelated_to_ulimit &&
361 wsi->desc.sockfd - lws_plat_socket_offset() > context->max_fds) {
362 lwsl_err("fd %d too high (%d)\n", wsi->desc.sockfd,
363 context->max_fds);
364
365 return 1;
366 }
367 #endif
368 #if defined(LWS_WITH_EXTERNAL_POLL)
369 if (wsi->vhost && wsi->vhost->protocols &&
370 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
371 wsi->user_space, (void *)&pa, 1))
372 return -1;
373 #endif
374
375 lws_same_vh_protocol_remove(wsi);
376
377 /* the guy who is to be deleted's slot index in pt->fds */
378 m = wsi->position_in_fds_table;
379
380 /* these are the only valid possibilities for position_in_fds_table */
381 assert(m == LWS_NO_FDS_POS || (m >= 0 &&
382 (unsigned int)m < pt->fds_count));
383
384 if (context->event_loop_ops->io)
385 context->event_loop_ops->io(wsi,
386 LWS_EV_STOP | LWS_EV_READ | LWS_EV_WRITE |
387 LWS_EV_PREPARE_DELETION);
388 /*
389 lwsl_notice("%s: wsi=%p, skt=%d, fds pos=%d, end guy pos=%d, endfd=%d\n",
390 __func__, wsi, wsi->desc.sockfd, wsi->position_in_fds_table,
391 pt->fds_count, pt->fds[pt->fds_count - 1].fd); */
392
393 if (m != LWS_NO_FDS_POS) {
394 char fixup = 0;
395
396 assert(pt->fds_count && (unsigned int)m != pt->fds_count);
397
398 /* deletion guy's lws_lookup entry needs nuking */
399 delete_from_fd(context, wsi->desc.sockfd);
400
401 if ((unsigned int)m != pt->fds_count - 1) {
402 /* have the last guy take up the now vacant slot */
403 pt->fds[m] = pt->fds[pt->fds_count - 1];
404 fixup = 1;
405 }
406
407 pt->fds[pt->fds_count - 1].fd = -1;
408
409 /* this decrements pt->fds_count */
410 lws_plat_delete_socket_from_fds(context, wsi, m);
411 pt->count_conns--;
412 if (fixup) {
413 v = (int) pt->fds[m].fd;
414 /* old end guy's "position in fds table" is now the
415 * deletion guy's old one */
416 end_wsi = wsi_from_fd(context, v);
417 if (!end_wsi) {
418 lwsl_err("no wsi for fd %d pos %d, "
419 "pt->fds_count=%d\n",
420 (int)pt->fds[m].fd, m, pt->fds_count);
421 assert(0);
422 } else
423 end_wsi->position_in_fds_table = m;
424 }
425
426 /* removed wsi has no position any more */
427 wsi->position_in_fds_table = LWS_NO_FDS_POS;
428 }
429
430 #if defined(LWS_WITH_EXTERNAL_POLL)
431 /* remove also from external POLL support via protocol 0 */
432 if (lws_socket_is_valid(wsi->desc.sockfd) && wsi->vhost &&
433 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_DEL_POLL_FD,
434 wsi->user_space, (void *) &pa, 0))
435 ret = -1;
436 #endif
437
438 #if defined(LWS_WITH_SERVER)
439 if (!context->being_destroyed &&
440 /* if this made some room, accept connects on this thread */
441 (unsigned int)pt->fds_count < context->fd_limit_per_thread - 1)
442 lws_accept_modulation(context, pt, 1);
443 #endif
444
445 #if defined(LWS_WITH_EXTERNAL_POLL)
446 if (wsi->vhost &&
447 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
448 wsi->user_space, (void *) &pa, 1))
449 ret = -1;
450 #endif
451
452 // __dump_fds(pt, "post remove");
453
454 return ret;
455 }
456
457 int
__lws_change_pollfd(struct lws * wsi,int _and,int _or)458 __lws_change_pollfd(struct lws *wsi, int _and, int _or)
459 {
460 struct lws_context *context;
461 struct lws_pollargs pa;
462 int ret = 0;
463
464 if (!wsi || (!wsi->protocol && !wsi->event_pipe) ||
465 wsi->position_in_fds_table == LWS_NO_FDS_POS)
466 return 0;
467
468 context = lws_get_context(wsi);
469 if (!context)
470 return 1;
471
472 #if defined(LWS_WITH_EXTERNAL_POLL)
473 if (wsi->vhost &&
474 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_LOCK_POLL,
475 wsi->user_space, (void *) &pa, 0))
476 return -1;
477 #endif
478
479 ret = _lws_change_pollfd(wsi, _and, _or, &pa);
480
481 #if defined(LWS_WITH_EXTERNAL_POLL)
482 if (wsi->vhost &&
483 wsi->vhost->protocols[0].callback(wsi, LWS_CALLBACK_UNLOCK_POLL,
484 wsi->user_space, (void *) &pa, 0))
485 ret = -1;
486 #endif
487
488 return ret;
489 }
490
491 int
lws_change_pollfd(struct lws * wsi,int _and,int _or)492 lws_change_pollfd(struct lws *wsi, int _and, int _or)
493 {
494 struct lws_context_per_thread *pt;
495 int ret = 0;
496
497 pt = &wsi->context->pt[(int)wsi->tsi];
498
499 lws_pt_lock(pt, __func__);
500 ret = __lws_change_pollfd(wsi, _and, _or);
501 lws_pt_unlock(pt);
502
503 return ret;
504 }
505
506 int
lws_callback_on_writable(struct lws * wsi)507 lws_callback_on_writable(struct lws *wsi)
508 {
509 struct lws_context_per_thread *pt;
510 struct lws *w = wsi;
511
512 if (lwsi_state(wsi) == LRS_SHUTDOWN)
513 return 0;
514
515 if (wsi->socket_is_permanently_unusable)
516 return 0;
517
518 pt = &wsi->context->pt[(int)wsi->tsi];
519
520 #if defined(LWS_WITH_DETAILED_LATENCY)
521 if (!wsi->detlat.earliest_write_req)
522 wsi->detlat.earliest_write_req = lws_now_usecs();
523 #endif
524
525 lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB_REQ, 1);
526 #if defined(LWS_WITH_STATS)
527 if (!wsi->active_writable_req_us) {
528 wsi->active_writable_req_us = lws_now_usecs();
529 lws_stats_bump(pt, LWSSTATS_C_WRITEABLE_CB_EFF_REQ, 1);
530 }
531 #endif
532
533 if (wsi->role_ops->callback_on_writable) {
534 int q = wsi->role_ops->callback_on_writable(wsi);
535 //lwsl_notice("%s: rops_cow says %d\n", __func__, q);
536 if (q)
537 return 1;
538 w = lws_get_network_wsi(wsi);
539 } else
540
541 if (w->position_in_fds_table == LWS_NO_FDS_POS) {
542 lwsl_debug("%s: failed to find socket %d\n", __func__,
543 wsi->desc.sockfd);
544 return -1;
545 }
546
547 //lwsl_notice("%s: marking for POLLOUT %p (wsi %p)\n", __func__, w, wsi);
548
549 if (__lws_change_pollfd(w, 0, LWS_POLLOUT))
550 return -1;
551
552 return 1;
553 }
554
555
556 /*
557 * stitch protocol choice into the vh protocol linked list
558 * We always insert ourselves at the start of the list
559 *
560 * X <-> B
561 * X <-> pAn <-> pB
562 *
563 * Illegal to attach more than once without detach inbetween
564 */
565 void
lws_same_vh_protocol_insert(struct lws * wsi,int n)566 lws_same_vh_protocol_insert(struct lws *wsi, int n)
567 {
568 lws_vhost_lock(wsi->vhost);
569
570 lws_dll2_remove(&wsi->same_vh_protocol);
571 lws_dll2_add_head(&wsi->same_vh_protocol,
572 &wsi->vhost->same_vh_protocol_owner[n]);
573
574 wsi->bound_vhost_index = n;
575
576 lws_vhost_unlock(wsi->vhost);
577 }
578
579 void
__lws_same_vh_protocol_remove(struct lws * wsi)580 __lws_same_vh_protocol_remove(struct lws *wsi)
581 {
582 if (wsi->vhost && wsi->vhost->same_vh_protocol_owner)
583 lws_dll2_remove(&wsi->same_vh_protocol);
584 }
585
586 void
lws_same_vh_protocol_remove(struct lws * wsi)587 lws_same_vh_protocol_remove(struct lws *wsi)
588 {
589 if (!wsi->vhost)
590 return;
591
592 lws_vhost_lock(wsi->vhost);
593
594 __lws_same_vh_protocol_remove(wsi);
595
596 lws_vhost_unlock(wsi->vhost);
597 }
598
599
600 int
lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost * vhost,const struct lws_protocols * protocol)601 lws_callback_on_writable_all_protocol_vhost(const struct lws_vhost *vhost,
602 const struct lws_protocols *protocol)
603 {
604 struct lws *wsi;
605 int n;
606
607 if (protocol < vhost->protocols ||
608 protocol >= (vhost->protocols + vhost->count_protocols)) {
609 lwsl_err("%s: protocol %p is not from vhost %p (%p - %p)\n",
610 __func__, protocol, vhost->protocols, vhost,
611 (vhost->protocols + vhost->count_protocols));
612
613 return -1;
614 }
615
616 n = (int)(protocol - vhost->protocols);
617
618 lws_start_foreach_dll_safe(struct lws_dll2 *, d, d1,
619 lws_dll2_get_head(&vhost->same_vh_protocol_owner[n])) {
620 wsi = lws_container_of(d, struct lws, same_vh_protocol);
621
622 assert(wsi->protocol == protocol);
623 lws_callback_on_writable(wsi);
624
625 } lws_end_foreach_dll_safe(d, d1);
626
627 return 0;
628 }
629
630 int
lws_callback_on_writable_all_protocol(const struct lws_context * context,const struct lws_protocols * protocol)631 lws_callback_on_writable_all_protocol(const struct lws_context *context,
632 const struct lws_protocols *protocol)
633 {
634 struct lws_vhost *vhost;
635 int n;
636
637 if (!context)
638 return 0;
639
640 vhost = context->vhost_list;
641
642 while (vhost) {
643 for (n = 0; n < vhost->count_protocols; n++)
644 if (protocol->callback ==
645 vhost->protocols[n].callback &&
646 !strcmp(protocol->name, vhost->protocols[n].name))
647 break;
648 if (n != vhost->count_protocols)
649 lws_callback_on_writable_all_protocol_vhost(
650 vhost, &vhost->protocols[n]);
651
652 vhost = vhost->vhost_next;
653 }
654
655 return 0;
656 }
657