• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * nghttp2 - HTTP/2 C Library
3  *
4  * Copyright (c) 2012 Tatsuhiro Tsujikawa
5  *
6  * Permission is hereby granted, free of charge, to any person obtaining
7  * a copy of this software and associated documentation files (the
8  * "Software"), to deal in the Software without restriction, including
9  * without limitation the rights to use, copy, modify, merge, publish,
10  * distribute, sublicense, and/or sell copies of the Software, and to
11  * permit persons to whom the Software is furnished to do so, subject to
12  * the following conditions:
13  *
14  * The above copyright notice and this permission notice shall be
15  * included in all copies or substantial portions of the Software.
16  *
17  * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18  * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19  * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20  * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21  * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22  * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23  * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24  */
25 #include "shrpx_connection_handler.h"
26 
27 #ifdef HAVE_UNISTD_H
28 #  include <unistd.h>
29 #endif // HAVE_UNISTD_H
30 #include <sys/types.h>
31 #include <sys/wait.h>
32 
33 #include <cerrno>
34 #include <thread>
35 #include <random>
36 
37 #include "shrpx_client_handler.h"
38 #include "shrpx_tls.h"
39 #include "shrpx_worker.h"
40 #include "shrpx_config.h"
41 #include "shrpx_http2_session.h"
42 #include "shrpx_connect_blocker.h"
43 #include "shrpx_downstream_connection.h"
44 #include "shrpx_accept_handler.h"
45 #include "shrpx_memcached_dispatcher.h"
46 #include "shrpx_signal.h"
47 #include "shrpx_log.h"
48 #include "xsi_strerror.h"
49 #include "util.h"
50 #include "template.h"
51 #include "ssl_compat.h"
52 
53 using namespace nghttp2;
54 
55 namespace shrpx {
56 
57 namespace {
acceptor_disable_cb(struct ev_loop * loop,ev_timer * w,int revent)58 void acceptor_disable_cb(struct ev_loop *loop, ev_timer *w, int revent) {
59   auto h = static_cast<ConnectionHandler *>(w->data);
60 
61   // If we are in graceful shutdown period, we must not enable
62   // acceptors again.
63   if (h->get_graceful_shutdown()) {
64     return;
65   }
66 
67   h->enable_acceptor();
68 }
69 } // namespace
70 
71 namespace {
ocsp_cb(struct ev_loop * loop,ev_timer * w,int revent)72 void ocsp_cb(struct ev_loop *loop, ev_timer *w, int revent) {
73   auto h = static_cast<ConnectionHandler *>(w->data);
74 
75   // If we are in graceful shutdown period, we won't do ocsp query.
76   if (h->get_graceful_shutdown()) {
77     return;
78   }
79 
80   LOG(NOTICE) << "Start ocsp update";
81 
82   h->proceed_next_cert_ocsp();
83 }
84 } // namespace
85 
86 namespace {
ocsp_read_cb(struct ev_loop * loop,ev_io * w,int revent)87 void ocsp_read_cb(struct ev_loop *loop, ev_io *w, int revent) {
88   auto h = static_cast<ConnectionHandler *>(w->data);
89 
90   h->read_ocsp_chunk();
91 }
92 } // namespace
93 
94 namespace {
ocsp_chld_cb(struct ev_loop * loop,ev_child * w,int revent)95 void ocsp_chld_cb(struct ev_loop *loop, ev_child *w, int revent) {
96   auto h = static_cast<ConnectionHandler *>(w->data);
97 
98   h->handle_ocsp_complete();
99 }
100 } // namespace
101 
102 namespace {
thread_join_async_cb(struct ev_loop * loop,ev_async * w,int revent)103 void thread_join_async_cb(struct ev_loop *loop, ev_async *w, int revent) {
104   ev_break(loop);
105 }
106 } // namespace
107 
108 namespace {
serial_event_async_cb(struct ev_loop * loop,ev_async * w,int revent)109 void serial_event_async_cb(struct ev_loop *loop, ev_async *w, int revent) {
110   auto h = static_cast<ConnectionHandler *>(w->data);
111 
112   h->handle_serial_event();
113 }
114 } // namespace
115 
ConnectionHandler(struct ev_loop * loop,std::mt19937 & gen)116 ConnectionHandler::ConnectionHandler(struct ev_loop *loop, std::mt19937 &gen)
117     :
118 #ifdef ENABLE_HTTP3
119       quic_ipc_fd_(-1),
120 #endif // ENABLE_HTTP3
121       gen_(gen),
122       single_worker_(nullptr),
123       loop_(loop),
124 #ifdef HAVE_NEVERBLEED
125       nb_(nullptr),
126 #endif // HAVE_NEVERBLEED
127       tls_ticket_key_memcached_get_retry_count_(0),
128       tls_ticket_key_memcached_fail_count_(0),
129       worker_round_robin_cnt_(get_config()->api.enabled ? 1 : 0),
130       graceful_shutdown_(false),
131       enable_acceptor_on_ocsp_completion_(false) {
132   ev_timer_init(&disable_acceptor_timer_, acceptor_disable_cb, 0., 0.);
133   disable_acceptor_timer_.data = this;
134 
135   ev_timer_init(&ocsp_timer_, ocsp_cb, 0., 0.);
136   ocsp_timer_.data = this;
137 
138   ev_io_init(&ocsp_.rev, ocsp_read_cb, -1, EV_READ);
139   ocsp_.rev.data = this;
140 
141   ev_async_init(&thread_join_asyncev_, thread_join_async_cb);
142 
143   ev_async_init(&serial_event_asyncev_, serial_event_async_cb);
144   serial_event_asyncev_.data = this;
145 
146   ev_async_start(loop_, &serial_event_asyncev_);
147 
148   ev_child_init(&ocsp_.chldev, ocsp_chld_cb, 0, 0);
149   ocsp_.chldev.data = this;
150 
151   ocsp_.next = 0;
152   ocsp_.proc.rfd = -1;
153 
154   reset_ocsp();
155 }
156 
~ConnectionHandler()157 ConnectionHandler::~ConnectionHandler() {
158   ev_child_stop(loop_, &ocsp_.chldev);
159   ev_async_stop(loop_, &serial_event_asyncev_);
160   ev_async_stop(loop_, &thread_join_asyncev_);
161   ev_io_stop(loop_, &ocsp_.rev);
162   ev_timer_stop(loop_, &ocsp_timer_);
163   ev_timer_stop(loop_, &disable_acceptor_timer_);
164 
165 #ifdef ENABLE_HTTP3
166   for (auto ssl_ctx : quic_all_ssl_ctx_) {
167     if (ssl_ctx == nullptr) {
168       continue;
169     }
170 
171     auto tls_ctx_data =
172         static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
173     delete tls_ctx_data;
174     SSL_CTX_free(ssl_ctx);
175   }
176 #endif // ENABLE_HTTP3
177 
178   for (auto ssl_ctx : all_ssl_ctx_) {
179     auto tls_ctx_data =
180         static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
181     delete tls_ctx_data;
182     SSL_CTX_free(ssl_ctx);
183   }
184 
185   // Free workers before destroying ev_loop
186   workers_.clear();
187 
188   for (auto loop : worker_loops_) {
189     ev_loop_destroy(loop);
190   }
191 }
192 
set_ticket_keys_to_worker(const std::shared_ptr<TicketKeys> & ticket_keys)193 void ConnectionHandler::set_ticket_keys_to_worker(
194     const std::shared_ptr<TicketKeys> &ticket_keys) {
195   for (auto &worker : workers_) {
196     worker->set_ticket_keys(ticket_keys);
197   }
198 }
199 
worker_reopen_log_files()200 void ConnectionHandler::worker_reopen_log_files() {
201   for (auto &worker : workers_) {
202     WorkerEvent wev{};
203 
204     wev.type = WorkerEventType::REOPEN_LOG;
205 
206     worker->send(std::move(wev));
207   }
208 }
209 
worker_replace_downstream(std::shared_ptr<DownstreamConfig> downstreamconf)210 void ConnectionHandler::worker_replace_downstream(
211     std::shared_ptr<DownstreamConfig> downstreamconf) {
212   for (auto &worker : workers_) {
213     WorkerEvent wev{};
214 
215     wev.type = WorkerEventType::REPLACE_DOWNSTREAM;
216     wev.downstreamconf = downstreamconf;
217 
218     worker->send(std::move(wev));
219   }
220 }
221 
create_single_worker()222 int ConnectionHandler::create_single_worker() {
223   cert_tree_ = tls::create_cert_lookup_tree();
224   auto sv_ssl_ctx = tls::setup_server_ssl_context(
225       all_ssl_ctx_, indexed_ssl_ctx_, cert_tree_.get()
226 #ifdef HAVE_NEVERBLEED
227                                           ,
228       nb_
229 #endif // HAVE_NEVERBLEED
230   );
231 
232 #ifdef ENABLE_HTTP3
233   quic_cert_tree_ = tls::create_cert_lookup_tree();
234   auto quic_sv_ssl_ctx = tls::setup_quic_server_ssl_context(
235       quic_all_ssl_ctx_, quic_indexed_ssl_ctx_, quic_cert_tree_.get()
236 #  ifdef HAVE_NEVERBLEED
237                                                     ,
238       nb_
239 #  endif // HAVE_NEVERBLEED
240   );
241 #endif // ENABLE_HTTP3
242 
243   auto cl_ssl_ctx = tls::setup_downstream_client_ssl_context(
244 #ifdef HAVE_NEVERBLEED
245       nb_
246 #endif // HAVE_NEVERBLEED
247   );
248 
249   if (cl_ssl_ctx) {
250     all_ssl_ctx_.push_back(cl_ssl_ctx);
251 #ifdef ENABLE_HTTP3
252     quic_all_ssl_ctx_.push_back(nullptr);
253 #endif // ENABLE_HTTP3
254   }
255 
256   auto config = get_config();
257   auto &tlsconf = config->tls;
258 
259   SSL_CTX *session_cache_ssl_ctx = nullptr;
260   {
261     auto &memcachedconf = config->tls.session_cache.memcached;
262     if (memcachedconf.tls) {
263       session_cache_ssl_ctx = tls::create_ssl_client_context(
264 #ifdef HAVE_NEVERBLEED
265           nb_,
266 #endif // HAVE_NEVERBLEED
267           tlsconf.cacert, memcachedconf.cert_file,
268           memcachedconf.private_key_file);
269       all_ssl_ctx_.push_back(session_cache_ssl_ctx);
270 #ifdef ENABLE_HTTP3
271       quic_all_ssl_ctx_.push_back(nullptr);
272 #endif // ENABLE_HTTP3
273     }
274   }
275 
276 #if defined(ENABLE_HTTP3) && defined(HAVE_LIBBPF)
277   quic_bpf_refs_.resize(config->conn.quic_listener.addrs.size());
278 #endif // ENABLE_HTTP3 && HAVE_LIBBPF
279 
280 #ifdef ENABLE_HTTP3
281   assert(worker_ids_.size() == 1);
282   const auto &wid = worker_ids_[0];
283 #endif // ENABLE_HTTP3
284 
285   single_worker_ = std::make_unique<Worker>(
286       loop_, sv_ssl_ctx, cl_ssl_ctx, session_cache_ssl_ctx, cert_tree_.get(),
287 #ifdef ENABLE_HTTP3
288       quic_sv_ssl_ctx, quic_cert_tree_.get(), wid,
289 #  ifdef HAVE_LIBBPF
290       /* index = */ 0,
291 #  endif // HAVE_LIBBPF
292 #endif   // ENABLE_HTTP3
293       ticket_keys_, this, config->conn.downstream);
294 #ifdef HAVE_MRUBY
295   if (single_worker_->create_mruby_context() != 0) {
296     return -1;
297   }
298 #endif // HAVE_MRUBY
299 
300 #ifdef ENABLE_HTTP3
301   if (single_worker_->setup_quic_server_socket() != 0) {
302     return -1;
303   }
304 #endif // ENABLE_HTTP3
305 
306   return 0;
307 }
308 
create_worker_thread(size_t num)309 int ConnectionHandler::create_worker_thread(size_t num) {
310 #ifndef NOTHREADS
311   assert(workers_.size() == 0);
312 
313   cert_tree_ = tls::create_cert_lookup_tree();
314   auto sv_ssl_ctx = tls::setup_server_ssl_context(
315       all_ssl_ctx_, indexed_ssl_ctx_, cert_tree_.get()
316 #  ifdef HAVE_NEVERBLEED
317                                           ,
318       nb_
319 #  endif // HAVE_NEVERBLEED
320   );
321 
322 #  ifdef ENABLE_HTTP3
323   quic_cert_tree_ = tls::create_cert_lookup_tree();
324   auto quic_sv_ssl_ctx = tls::setup_quic_server_ssl_context(
325       quic_all_ssl_ctx_, quic_indexed_ssl_ctx_, quic_cert_tree_.get()
326 #    ifdef HAVE_NEVERBLEED
327                                                     ,
328       nb_
329 #    endif // HAVE_NEVERBLEED
330   );
331 #  endif // ENABLE_HTTP3
332 
333   auto cl_ssl_ctx = tls::setup_downstream_client_ssl_context(
334 #  ifdef HAVE_NEVERBLEED
335       nb_
336 #  endif // HAVE_NEVERBLEED
337   );
338 
339   if (cl_ssl_ctx) {
340     all_ssl_ctx_.push_back(cl_ssl_ctx);
341 #  ifdef ENABLE_HTTP3
342     quic_all_ssl_ctx_.push_back(nullptr);
343 #  endif // ENABLE_HTTP3
344   }
345 
346   auto config = get_config();
347   auto &tlsconf = config->tls;
348   auto &apiconf = config->api;
349 
350 #  if defined(ENABLE_HTTP3) && defined(HAVE_LIBBPF)
351   quic_bpf_refs_.resize(config->conn.quic_listener.addrs.size());
352 #  endif // ENABLE_HTTP3 && HAVE_LIBBPF
353 
354   // We have dedicated worker for API request processing.
355   if (apiconf.enabled) {
356     ++num;
357   }
358 
359   SSL_CTX *session_cache_ssl_ctx = nullptr;
360   {
361     auto &memcachedconf = config->tls.session_cache.memcached;
362 
363     if (memcachedconf.tls) {
364       session_cache_ssl_ctx = tls::create_ssl_client_context(
365 #  ifdef HAVE_NEVERBLEED
366           nb_,
367 #  endif // HAVE_NEVERBLEED
368           tlsconf.cacert, memcachedconf.cert_file,
369           memcachedconf.private_key_file);
370       all_ssl_ctx_.push_back(session_cache_ssl_ctx);
371 #  ifdef ENABLE_HTTP3
372       quic_all_ssl_ctx_.push_back(nullptr);
373 #  endif // ENABLE_HTTP3
374     }
375   }
376 
377 #  ifdef ENABLE_HTTP3
378   assert(worker_ids_.size() == num);
379 #  endif // ENABLE_HTTP3
380 
381   for (size_t i = 0; i < num; ++i) {
382     auto loop = ev_loop_new(config->ev_loop_flags);
383 
384 #  ifdef ENABLE_HTTP3
385     const auto &wid = worker_ids_[i];
386 #  endif // ENABLE_HTTP3
387 
388     auto worker = std::make_unique<Worker>(
389         loop, sv_ssl_ctx, cl_ssl_ctx, session_cache_ssl_ctx, cert_tree_.get(),
390 #  ifdef ENABLE_HTTP3
391         quic_sv_ssl_ctx, quic_cert_tree_.get(), wid,
392 #    ifdef HAVE_LIBBPF
393         i,
394 #    endif // HAVE_LIBBPF
395 #  endif   // ENABLE_HTTP3
396         ticket_keys_, this, config->conn.downstream);
397 #  ifdef HAVE_MRUBY
398     if (worker->create_mruby_context() != 0) {
399       return -1;
400     }
401 #  endif // HAVE_MRUBY
402 
403 #  ifdef ENABLE_HTTP3
404     if ((!apiconf.enabled || i != 0) &&
405         worker->setup_quic_server_socket() != 0) {
406       return -1;
407     }
408 #  endif // ENABLE_HTTP3
409 
410     workers_.push_back(std::move(worker));
411     worker_loops_.push_back(loop);
412 
413     LLOG(NOTICE, this) << "Created worker thread #" << workers_.size() - 1;
414   }
415 
416   for (auto &worker : workers_) {
417     worker->run_async();
418   }
419 
420 #endif // NOTHREADS
421 
422   return 0;
423 }
424 
join_worker()425 void ConnectionHandler::join_worker() {
426 #ifndef NOTHREADS
427   int n = 0;
428 
429   if (LOG_ENABLED(INFO)) {
430     LLOG(INFO, this) << "Waiting for worker thread to join: n="
431                      << workers_.size();
432   }
433 
434   for (auto &worker : workers_) {
435     worker->wait();
436     if (LOG_ENABLED(INFO)) {
437       LLOG(INFO, this) << "Thread #" << n << " joined";
438     }
439     ++n;
440   }
441 #endif // NOTHREADS
442 }
443 
graceful_shutdown_worker()444 void ConnectionHandler::graceful_shutdown_worker() {
445   if (single_worker_) {
446     return;
447   }
448 
449   if (LOG_ENABLED(INFO)) {
450     LLOG(INFO, this) << "Sending graceful shutdown signal to worker";
451   }
452 
453   for (auto &worker : workers_) {
454     WorkerEvent wev{};
455     wev.type = WorkerEventType::GRACEFUL_SHUTDOWN;
456 
457     worker->send(std::move(wev));
458   }
459 
460 #ifndef NOTHREADS
461   ev_async_start(loop_, &thread_join_asyncev_);
462 
463   thread_join_fut_ = std::async(std::launch::async, [this]() {
464     (void)reopen_log_files(get_config()->logging);
465     join_worker();
466     ev_async_send(get_loop(), &thread_join_asyncev_);
467     delete_log_config();
468   });
469 #endif // NOTHREADS
470 }
471 
handle_connection(int fd,sockaddr * addr,int addrlen,const UpstreamAddr * faddr)472 int ConnectionHandler::handle_connection(int fd, sockaddr *addr, int addrlen,
473                                          const UpstreamAddr *faddr) {
474   if (LOG_ENABLED(INFO)) {
475     LLOG(INFO, this) << "Accepted connection from "
476                      << util::numeric_name(addr, addrlen) << ", fd=" << fd;
477   }
478 
479   auto config = get_config();
480 
481   if (single_worker_) {
482     auto &upstreamconf = config->conn.upstream;
483     if (single_worker_->get_worker_stat()->num_connections >=
484         upstreamconf.worker_connections) {
485 
486       if (LOG_ENABLED(INFO)) {
487         LLOG(INFO, this) << "Too many connections >="
488                          << upstreamconf.worker_connections;
489       }
490 
491       close(fd);
492       return -1;
493     }
494 
495     auto client =
496         tls::accept_connection(single_worker_.get(), fd, addr, addrlen, faddr);
497     if (!client) {
498       LLOG(ERROR, this) << "ClientHandler creation failed";
499 
500       close(fd);
501       return -1;
502     }
503 
504     return 0;
505   }
506 
507   Worker *worker;
508 
509   if (faddr->alt_mode == UpstreamAltMode::API) {
510     worker = workers_[0].get();
511 
512     if (LOG_ENABLED(INFO)) {
513       LOG(INFO) << "Dispatch connection to API worker #0";
514     }
515   } else {
516     worker = workers_[worker_round_robin_cnt_].get();
517 
518     if (LOG_ENABLED(INFO)) {
519       LOG(INFO) << "Dispatch connection to worker #" << worker_round_robin_cnt_;
520     }
521 
522     if (++worker_round_robin_cnt_ == workers_.size()) {
523       auto &apiconf = config->api;
524 
525       if (apiconf.enabled) {
526         worker_round_robin_cnt_ = 1;
527       } else {
528         worker_round_robin_cnt_ = 0;
529       }
530     }
531   }
532 
533   WorkerEvent wev{};
534   wev.type = WorkerEventType::NEW_CONNECTION;
535   wev.client_fd = fd;
536   memcpy(&wev.client_addr, addr, addrlen);
537   wev.client_addrlen = addrlen;
538   wev.faddr = faddr;
539 
540   worker->send(std::move(wev));
541 
542   return 0;
543 }
544 
get_loop() const545 struct ev_loop *ConnectionHandler::get_loop() const { return loop_; }
546 
get_single_worker() const547 Worker *ConnectionHandler::get_single_worker() const {
548   return single_worker_.get();
549 }
550 
add_acceptor(std::unique_ptr<AcceptHandler> h)551 void ConnectionHandler::add_acceptor(std::unique_ptr<AcceptHandler> h) {
552   acceptors_.push_back(std::move(h));
553 }
554 
delete_acceptor()555 void ConnectionHandler::delete_acceptor() { acceptors_.clear(); }
556 
enable_acceptor()557 void ConnectionHandler::enable_acceptor() {
558   for (auto &a : acceptors_) {
559     a->enable();
560   }
561 }
562 
disable_acceptor()563 void ConnectionHandler::disable_acceptor() {
564   for (auto &a : acceptors_) {
565     a->disable();
566   }
567 }
568 
sleep_acceptor(ev_tstamp t)569 void ConnectionHandler::sleep_acceptor(ev_tstamp t) {
570   if (t == 0. || ev_is_active(&disable_acceptor_timer_)) {
571     return;
572   }
573 
574   disable_acceptor();
575 
576   ev_timer_set(&disable_acceptor_timer_, t, 0.);
577   ev_timer_start(loop_, &disable_acceptor_timer_);
578 }
579 
accept_pending_connection()580 void ConnectionHandler::accept_pending_connection() {
581   for (auto &a : acceptors_) {
582     a->accept_connection();
583   }
584 }
585 
set_ticket_keys(std::shared_ptr<TicketKeys> ticket_keys)586 void ConnectionHandler::set_ticket_keys(
587     std::shared_ptr<TicketKeys> ticket_keys) {
588   ticket_keys_ = std::move(ticket_keys);
589   if (single_worker_) {
590     single_worker_->set_ticket_keys(ticket_keys_);
591   }
592 }
593 
get_ticket_keys() const594 const std::shared_ptr<TicketKeys> &ConnectionHandler::get_ticket_keys() const {
595   return ticket_keys_;
596 }
597 
set_graceful_shutdown(bool f)598 void ConnectionHandler::set_graceful_shutdown(bool f) {
599   graceful_shutdown_ = f;
600   if (single_worker_) {
601     single_worker_->set_graceful_shutdown(f);
602   }
603 }
604 
get_graceful_shutdown() const605 bool ConnectionHandler::get_graceful_shutdown() const {
606   return graceful_shutdown_;
607 }
608 
cancel_ocsp_update()609 void ConnectionHandler::cancel_ocsp_update() {
610   enable_acceptor_on_ocsp_completion_ = false;
611   ev_timer_stop(loop_, &ocsp_timer_);
612 
613   if (ocsp_.proc.pid == 0) {
614     return;
615   }
616 
617   int rv;
618 
619   rv = kill(ocsp_.proc.pid, SIGTERM);
620   if (rv != 0) {
621     auto error = errno;
622     LOG(ERROR) << "Could not send signal to OCSP query process: errno="
623                << error;
624   }
625 
626   while ((rv = waitpid(ocsp_.proc.pid, nullptr, 0)) == -1 && errno == EINTR)
627     ;
628   if (rv == -1) {
629     auto error = errno;
630     LOG(ERROR) << "Error occurred while we were waiting for the completion of "
631                   "OCSP query process: errno="
632                << error;
633   }
634 }
635 
636 // inspired by h2o_read_command function from h2o project:
637 // https://github.com/h2o/h2o
start_ocsp_update(const char * cert_file)638 int ConnectionHandler::start_ocsp_update(const char *cert_file) {
639   int rv;
640 
641   if (LOG_ENABLED(INFO)) {
642     LOG(INFO) << "Start ocsp update for " << cert_file;
643   }
644 
645   assert(!ev_is_active(&ocsp_.rev));
646   assert(!ev_is_active(&ocsp_.chldev));
647 
648   char *const argv[] = {
649       const_cast<char *>(
650           get_config()->tls.ocsp.fetch_ocsp_response_file.data()),
651       const_cast<char *>(cert_file), nullptr};
652 
653   Process proc;
654   rv = exec_read_command(proc, argv);
655   if (rv != 0) {
656     return -1;
657   }
658 
659   ocsp_.proc = proc;
660 
661   ev_io_set(&ocsp_.rev, ocsp_.proc.rfd, EV_READ);
662   ev_io_start(loop_, &ocsp_.rev);
663 
664   ev_child_set(&ocsp_.chldev, ocsp_.proc.pid, 0);
665   ev_child_start(loop_, &ocsp_.chldev);
666 
667   return 0;
668 }
669 
read_ocsp_chunk()670 void ConnectionHandler::read_ocsp_chunk() {
671   std::array<uint8_t, 4_k> buf;
672   for (;;) {
673     ssize_t n;
674     while ((n = read(ocsp_.proc.rfd, buf.data(), buf.size())) == -1 &&
675            errno == EINTR)
676       ;
677 
678     if (n == -1) {
679       if (errno == EAGAIN || errno == EWOULDBLOCK) {
680         return;
681       }
682       auto error = errno;
683       LOG(WARN) << "Reading from ocsp query command failed: errno=" << error;
684       ocsp_.error = error;
685 
686       break;
687     }
688 
689     if (n == 0) {
690       break;
691     }
692 
693     std::copy_n(std::begin(buf), n, std::back_inserter(ocsp_.resp));
694   }
695 
696   ev_io_stop(loop_, &ocsp_.rev);
697 }
698 
handle_ocsp_complete()699 void ConnectionHandler::handle_ocsp_complete() {
700   ev_io_stop(loop_, &ocsp_.rev);
701   ev_child_stop(loop_, &ocsp_.chldev);
702 
703   assert(ocsp_.next < all_ssl_ctx_.size());
704 #ifdef ENABLE_HTTP3
705   assert(all_ssl_ctx_.size() == quic_all_ssl_ctx_.size());
706 #endif // ENABLE_HTTP3
707 
708   auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
709   auto tls_ctx_data =
710       static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
711 
712   auto rstatus = ocsp_.chldev.rstatus;
713   auto status = WEXITSTATUS(rstatus);
714   if (ocsp_.error || !WIFEXITED(rstatus) || status != 0) {
715     LOG(WARN) << "ocsp query command for " << tls_ctx_data->cert_file
716               << " failed: error=" << ocsp_.error << ", rstatus=" << log::hex
717               << rstatus << log::dec << ", status=" << status;
718     ++ocsp_.next;
719     proceed_next_cert_ocsp();
720     return;
721   }
722 
723   if (LOG_ENABLED(INFO)) {
724     LOG(INFO) << "ocsp update for " << tls_ctx_data->cert_file
725               << " finished successfully";
726   }
727 
728   auto config = get_config();
729   auto &tlsconf = config->tls;
730 
731   if (tlsconf.ocsp.no_verify ||
732       tls::verify_ocsp_response(ssl_ctx, ocsp_.resp.data(),
733                                 ocsp_.resp.size()) == 0) {
734 #ifdef ENABLE_HTTP3
735     // We have list of SSL_CTX with the same certificate in
736     // quic_all_ssl_ctx_ as well.  Some SSL_CTXs are missing there in
737     // that case we get nullptr.
738     auto quic_ssl_ctx = quic_all_ssl_ctx_[ocsp_.next];
739     if (quic_ssl_ctx) {
740       auto quic_tls_ctx_data = static_cast<tls::TLSContextData *>(
741           SSL_CTX_get_app_data(quic_ssl_ctx));
742 #  ifdef HAVE_ATOMIC_STD_SHARED_PTR
743       quic_tls_ctx_data->ocsp_data.store(
744           std::make_shared<std::vector<uint8_t>>(ocsp_.resp),
745           std::memory_order_release);
746 #  else  // !HAVE_ATOMIC_STD_SHARED_PTR
747       std::lock_guard<std::mutex> g(quic_tls_ctx_data->mu);
748       quic_tls_ctx_data->ocsp_data =
749           std::make_shared<std::vector<uint8_t>>(ocsp_.resp);
750 #  endif // !HAVE_ATOMIC_STD_SHARED_PTR
751     }
752 #endif // ENABLE_HTTP3
753 
754 #ifdef HAVE_ATOMIC_STD_SHARED_PTR
755     tls_ctx_data->ocsp_data.store(
756         std::make_shared<std::vector<uint8_t>>(std::move(ocsp_.resp)),
757         std::memory_order_release);
758 #else  // !HAVE_ATOMIC_STD_SHARED_PTR
759     std::lock_guard<std::mutex> g(tls_ctx_data->mu);
760     tls_ctx_data->ocsp_data =
761         std::make_shared<std::vector<uint8_t>>(std::move(ocsp_.resp));
762 #endif // !HAVE_ATOMIC_STD_SHARED_PTR
763   }
764 
765   ++ocsp_.next;
766   proceed_next_cert_ocsp();
767 }
768 
reset_ocsp()769 void ConnectionHandler::reset_ocsp() {
770   if (ocsp_.proc.rfd != -1) {
771     close(ocsp_.proc.rfd);
772   }
773 
774   ocsp_.proc.rfd = -1;
775   ocsp_.proc.pid = 0;
776   ocsp_.error = 0;
777   ocsp_.resp = std::vector<uint8_t>();
778 }
779 
proceed_next_cert_ocsp()780 void ConnectionHandler::proceed_next_cert_ocsp() {
781   for (;;) {
782     reset_ocsp();
783     if (ocsp_.next == all_ssl_ctx_.size()) {
784       ocsp_.next = 0;
785       // We have updated all ocsp response, and schedule next update.
786       ev_timer_set(&ocsp_timer_, get_config()->tls.ocsp.update_interval, 0.);
787       ev_timer_start(loop_, &ocsp_timer_);
788 
789       if (enable_acceptor_on_ocsp_completion_) {
790         enable_acceptor_on_ocsp_completion_ = false;
791         enable_acceptor();
792       }
793 
794       return;
795     }
796 
797     auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
798     auto tls_ctx_data =
799         static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
800 
801     // client SSL_CTX is also included in all_ssl_ctx_, but has no
802     // tls_ctx_data.
803     if (!tls_ctx_data) {
804       ++ocsp_.next;
805       continue;
806     }
807 
808     auto cert_file = tls_ctx_data->cert_file;
809 
810     if (start_ocsp_update(cert_file) != 0) {
811       ++ocsp_.next;
812       continue;
813     }
814 
815     break;
816   }
817 }
818 
set_tls_ticket_key_memcached_dispatcher(std::unique_ptr<MemcachedDispatcher> dispatcher)819 void ConnectionHandler::set_tls_ticket_key_memcached_dispatcher(
820     std::unique_ptr<MemcachedDispatcher> dispatcher) {
821   tls_ticket_key_memcached_dispatcher_ = std::move(dispatcher);
822 }
823 
824 MemcachedDispatcher *
get_tls_ticket_key_memcached_dispatcher() const825 ConnectionHandler::get_tls_ticket_key_memcached_dispatcher() const {
826   return tls_ticket_key_memcached_dispatcher_.get();
827 }
828 
829 // Use the similar backoff algorithm described in
830 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md
831 namespace {
832 constexpr size_t MAX_BACKOFF_EXP = 10;
833 constexpr auto MULTIPLIER = 3.2;
834 constexpr auto JITTER = 0.2;
835 } // namespace
836 
on_tls_ticket_key_network_error(ev_timer * w)837 void ConnectionHandler::on_tls_ticket_key_network_error(ev_timer *w) {
838   if (++tls_ticket_key_memcached_get_retry_count_ >=
839       get_config()->tls.ticket.memcached.max_retry) {
840     LOG(WARN) << "Memcached: tls ticket get retry all failed "
841               << tls_ticket_key_memcached_get_retry_count_ << " times.";
842 
843     on_tls_ticket_key_not_found(w);
844     return;
845   }
846 
847   auto base_backoff = util::int_pow(
848       MULTIPLIER,
849       std::min(MAX_BACKOFF_EXP, tls_ticket_key_memcached_get_retry_count_));
850   auto dist = std::uniform_real_distribution<>(-JITTER * base_backoff,
851                                                JITTER * base_backoff);
852 
853   auto backoff = base_backoff + dist(gen_);
854 
855   LOG(WARN)
856       << "Memcached: tls ticket get failed due to network error, retrying in "
857       << backoff << " seconds";
858 
859   ev_timer_set(w, backoff, 0.);
860   ev_timer_start(loop_, w);
861 }
862 
on_tls_ticket_key_not_found(ev_timer * w)863 void ConnectionHandler::on_tls_ticket_key_not_found(ev_timer *w) {
864   tls_ticket_key_memcached_get_retry_count_ = 0;
865 
866   if (++tls_ticket_key_memcached_fail_count_ >=
867       get_config()->tls.ticket.memcached.max_fail) {
868     LOG(WARN) << "Memcached: could not get tls ticket; disable tls ticket";
869 
870     tls_ticket_key_memcached_fail_count_ = 0;
871 
872     set_ticket_keys(nullptr);
873     set_ticket_keys_to_worker(nullptr);
874   }
875 
876   LOG(WARN) << "Memcached: tls ticket get failed, schedule next";
877   schedule_next_tls_ticket_key_memcached_get(w);
878 }
879 
on_tls_ticket_key_get_success(const std::shared_ptr<TicketKeys> & ticket_keys,ev_timer * w)880 void ConnectionHandler::on_tls_ticket_key_get_success(
881     const std::shared_ptr<TicketKeys> &ticket_keys, ev_timer *w) {
882   LOG(NOTICE) << "Memcached: tls ticket get success";
883 
884   tls_ticket_key_memcached_get_retry_count_ = 0;
885   tls_ticket_key_memcached_fail_count_ = 0;
886 
887   schedule_next_tls_ticket_key_memcached_get(w);
888 
889   if (!ticket_keys || ticket_keys->keys.empty()) {
890     LOG(WARN) << "Memcached: tls ticket keys are empty; tls ticket disabled";
891     set_ticket_keys(nullptr);
892     set_ticket_keys_to_worker(nullptr);
893     return;
894   }
895 
896   if (LOG_ENABLED(INFO)) {
897     LOG(INFO) << "ticket keys get done";
898     LOG(INFO) << 0 << " enc+dec: "
899               << util::format_hex(ticket_keys->keys[0].data.name);
900     for (size_t i = 1; i < ticket_keys->keys.size(); ++i) {
901       auto &key = ticket_keys->keys[i];
902       LOG(INFO) << i << " dec: " << util::format_hex(key.data.name);
903     }
904   }
905 
906   set_ticket_keys(ticket_keys);
907   set_ticket_keys_to_worker(ticket_keys);
908 }
909 
schedule_next_tls_ticket_key_memcached_get(ev_timer * w)910 void ConnectionHandler::schedule_next_tls_ticket_key_memcached_get(
911     ev_timer *w) {
912   ev_timer_set(w, get_config()->tls.ticket.memcached.interval, 0.);
913   ev_timer_start(loop_, w);
914 }
915 
create_tls_ticket_key_memcached_ssl_ctx()916 SSL_CTX *ConnectionHandler::create_tls_ticket_key_memcached_ssl_ctx() {
917   auto config = get_config();
918   auto &tlsconf = config->tls;
919   auto &memcachedconf = config->tls.ticket.memcached;
920 
921   auto ssl_ctx = tls::create_ssl_client_context(
922 #ifdef HAVE_NEVERBLEED
923       nb_,
924 #endif // HAVE_NEVERBLEED
925       tlsconf.cacert, memcachedconf.cert_file, memcachedconf.private_key_file);
926 
927   all_ssl_ctx_.push_back(ssl_ctx);
928 #ifdef ENABLE_HTTP3
929   quic_all_ssl_ctx_.push_back(nullptr);
930 #endif // ENABLE_HTTP3
931 
932   return ssl_ctx;
933 }
934 
935 #ifdef HAVE_NEVERBLEED
set_neverbleed(neverbleed_t * nb)936 void ConnectionHandler::set_neverbleed(neverbleed_t *nb) { nb_ = nb; }
937 #endif // HAVE_NEVERBLEED
938 
handle_serial_event()939 void ConnectionHandler::handle_serial_event() {
940   std::vector<SerialEvent> q;
941   {
942     std::lock_guard<std::mutex> g(serial_event_mu_);
943     q.swap(serial_events_);
944   }
945 
946   for (auto &sev : q) {
947     switch (sev.type) {
948     case SerialEventType::REPLACE_DOWNSTREAM:
949       // Mmake sure that none of worker uses
950       // get_config()->conn.downstream
951       mod_config()->conn.downstream = sev.downstreamconf;
952 
953       if (single_worker_) {
954         single_worker_->replace_downstream_config(sev.downstreamconf);
955 
956         break;
957       }
958 
959       worker_replace_downstream(sev.downstreamconf);
960 
961       break;
962     default:
963       break;
964     }
965   }
966 }
967 
send_replace_downstream(const std::shared_ptr<DownstreamConfig> & downstreamconf)968 void ConnectionHandler::send_replace_downstream(
969     const std::shared_ptr<DownstreamConfig> &downstreamconf) {
970   send_serial_event(
971       SerialEvent(SerialEventType::REPLACE_DOWNSTREAM, downstreamconf));
972 }
973 
send_serial_event(SerialEvent ev)974 void ConnectionHandler::send_serial_event(SerialEvent ev) {
975   {
976     std::lock_guard<std::mutex> g(serial_event_mu_);
977 
978     serial_events_.push_back(std::move(ev));
979   }
980 
981   ev_async_send(loop_, &serial_event_asyncev_);
982 }
983 
get_ssl_ctx(size_t idx) const984 SSL_CTX *ConnectionHandler::get_ssl_ctx(size_t idx) const {
985   return all_ssl_ctx_[idx];
986 }
987 
988 const std::vector<SSL_CTX *> &
get_indexed_ssl_ctx(size_t idx) const989 ConnectionHandler::get_indexed_ssl_ctx(size_t idx) const {
990   return indexed_ssl_ctx_[idx];
991 }
992 
993 #ifdef ENABLE_HTTP3
994 const std::vector<SSL_CTX *> &
get_quic_indexed_ssl_ctx(size_t idx) const995 ConnectionHandler::get_quic_indexed_ssl_ctx(size_t idx) const {
996   return quic_indexed_ssl_ctx_[idx];
997 }
998 #endif // ENABLE_HTTP3
999 
set_enable_acceptor_on_ocsp_completion(bool f)1000 void ConnectionHandler::set_enable_acceptor_on_ocsp_completion(bool f) {
1001   enable_acceptor_on_ocsp_completion_ = f;
1002 }
1003 
1004 #ifdef ENABLE_HTTP3
forward_quic_packet(const UpstreamAddr * faddr,const Address & remote_addr,const Address & local_addr,const ngtcp2_pkt_info & pi,const WorkerID & wid,std::span<const uint8_t> data)1005 int ConnectionHandler::forward_quic_packet(const UpstreamAddr *faddr,
1006                                            const Address &remote_addr,
1007                                            const Address &local_addr,
1008                                            const ngtcp2_pkt_info &pi,
1009                                            const WorkerID &wid,
1010                                            std::span<const uint8_t> data) {
1011   assert(!get_config()->single_thread);
1012 
1013   auto worker = find_worker(wid);
1014   if (worker == nullptr) {
1015     return -1;
1016   }
1017 
1018   WorkerEvent wev{};
1019   wev.type = WorkerEventType::QUIC_PKT_FORWARD;
1020   wev.quic_pkt = std::make_unique<QUICPacket>(faddr->index, remote_addr,
1021                                               local_addr, pi, data);
1022 
1023   worker->send(std::move(wev));
1024 
1025   return 0;
1026 }
1027 
set_quic_keying_materials(std::shared_ptr<QUICKeyingMaterials> qkms)1028 void ConnectionHandler::set_quic_keying_materials(
1029     std::shared_ptr<QUICKeyingMaterials> qkms) {
1030   quic_keying_materials_ = std::move(qkms);
1031 }
1032 
1033 const std::shared_ptr<QUICKeyingMaterials> &
get_quic_keying_materials() const1034 ConnectionHandler::get_quic_keying_materials() const {
1035   return quic_keying_materials_;
1036 }
1037 
set_worker_ids(std::vector<WorkerID> worker_ids)1038 void ConnectionHandler::set_worker_ids(std::vector<WorkerID> worker_ids) {
1039   worker_ids_ = std::move(worker_ids);
1040 }
1041 
1042 namespace {
find_worker_index(const std::vector<WorkerID> & worker_ids,const WorkerID & wid)1043 ssize_t find_worker_index(const std::vector<WorkerID> &worker_ids,
1044                           const WorkerID &wid) {
1045   assert(!worker_ids.empty());
1046 
1047   if (wid.server != worker_ids[0].server ||
1048       wid.worker_process != worker_ids[0].worker_process ||
1049       wid.thread >= worker_ids.size()) {
1050     return -1;
1051   }
1052 
1053   return wid.thread;
1054 }
1055 } // namespace
1056 
find_worker(const WorkerID & wid) const1057 Worker *ConnectionHandler::find_worker(const WorkerID &wid) const {
1058   auto idx = find_worker_index(worker_ids_, wid);
1059   if (idx == -1) {
1060     return nullptr;
1061   }
1062 
1063   return workers_[idx].get();
1064 }
1065 
1066 QUICLingeringWorkerProcess *
match_quic_lingering_worker_process_worker_id(const WorkerID & wid)1067 ConnectionHandler::match_quic_lingering_worker_process_worker_id(
1068     const WorkerID &wid) {
1069   for (auto &lwps : quic_lingering_worker_processes_) {
1070     if (find_worker_index(lwps.worker_ids, wid) != -1) {
1071       return &lwps;
1072     }
1073   }
1074 
1075   return nullptr;
1076 }
1077 
1078 #  ifdef HAVE_LIBBPF
get_quic_bpf_refs()1079 std::vector<BPFRef> &ConnectionHandler::get_quic_bpf_refs() {
1080   return quic_bpf_refs_;
1081 }
1082 
unload_bpf_objects()1083 void ConnectionHandler::unload_bpf_objects() {
1084   LOG(NOTICE) << "Unloading BPF objects";
1085 
1086   for (auto &ref : quic_bpf_refs_) {
1087     if (ref.obj == nullptr) {
1088       continue;
1089     }
1090 
1091     bpf_object__close(ref.obj);
1092 
1093     ref.obj = nullptr;
1094   }
1095 }
1096 #  endif // HAVE_LIBBPF
1097 
set_quic_ipc_fd(int fd)1098 void ConnectionHandler::set_quic_ipc_fd(int fd) { quic_ipc_fd_ = fd; }
1099 
set_quic_lingering_worker_processes(const std::vector<QUICLingeringWorkerProcess> & quic_lwps)1100 void ConnectionHandler::set_quic_lingering_worker_processes(
1101     const std::vector<QUICLingeringWorkerProcess> &quic_lwps) {
1102   quic_lingering_worker_processes_ = quic_lwps;
1103 }
1104 
forward_quic_packet_to_lingering_worker_process(QUICLingeringWorkerProcess * quic_lwp,const Address & remote_addr,const Address & local_addr,const ngtcp2_pkt_info & pi,std::span<const uint8_t> data)1105 int ConnectionHandler::forward_quic_packet_to_lingering_worker_process(
1106     QUICLingeringWorkerProcess *quic_lwp, const Address &remote_addr,
1107     const Address &local_addr, const ngtcp2_pkt_info &pi,
1108     std::span<const uint8_t> data) {
1109   std::array<uint8_t, 512> header;
1110 
1111   assert(header.size() >= 1 + 1 + 1 + 1 + sizeof(sockaddr_storage) * 2);
1112   assert(remote_addr.len > 0);
1113   assert(local_addr.len > 0);
1114 
1115   auto p = header.data();
1116 
1117   *p++ = static_cast<uint8_t>(QUICIPCType::DGRAM_FORWARD);
1118   *p++ = static_cast<uint8_t>(remote_addr.len - 1);
1119   p = std::copy_n(reinterpret_cast<const uint8_t *>(&remote_addr.su),
1120                   remote_addr.len, p);
1121   *p++ = static_cast<uint8_t>(local_addr.len - 1);
1122   p = std::copy_n(reinterpret_cast<const uint8_t *>(&local_addr.su),
1123                   local_addr.len, p);
1124   *p++ = pi.ecn;
1125 
1126   iovec msg_iov[] = {
1127       {
1128           .iov_base = header.data(),
1129           .iov_len = static_cast<size_t>(p - header.data()),
1130       },
1131       {
1132           .iov_base = const_cast<uint8_t *>(data.data()),
1133           .iov_len = data.size(),
1134       },
1135   };
1136 
1137   msghdr msg{};
1138   msg.msg_iov = msg_iov;
1139   msg.msg_iovlen = array_size(msg_iov);
1140 
1141   ssize_t nwrite;
1142 
1143   while ((nwrite = sendmsg(quic_lwp->quic_ipc_fd, &msg, 0)) == -1 &&
1144          errno == EINTR)
1145     ;
1146 
1147   if (nwrite == -1) {
1148     std::array<char, STRERROR_BUFSIZE> errbuf;
1149 
1150     auto error = errno;
1151     LOG(ERROR) << "Failed to send QUIC IPC message: "
1152                << xsi_strerror(error, errbuf.data(), errbuf.size());
1153 
1154     return -1;
1155   }
1156 
1157   return 0;
1158 }
1159 
quic_ipc_read()1160 int ConnectionHandler::quic_ipc_read() {
1161   std::array<uint8_t, 65536> buf;
1162 
1163   ssize_t nread;
1164 
1165   while ((nread = recv(quic_ipc_fd_, buf.data(), buf.size(), 0)) == -1 &&
1166          errno == EINTR)
1167     ;
1168 
1169   if (nread == -1) {
1170     std::array<char, STRERROR_BUFSIZE> errbuf;
1171 
1172     auto error = errno;
1173     LOG(ERROR) << "Failed to read data from QUIC IPC channel: "
1174                << xsi_strerror(error, errbuf.data(), errbuf.size());
1175 
1176     return -1;
1177   }
1178 
1179   if (nread == 0) {
1180     return 0;
1181   }
1182 
1183   size_t len = 1 + 1 + 1 + 1;
1184 
1185   // Wire format:
1186   // TYPE(1) REMOTE_ADDRLEN(1) REMOTE_ADDR(N) LOCAL_ADDRLEN(1) LOCAL_ADDR(N)
1187   // ECN(1) DGRAM_PAYLOAD(N)
1188   //
1189   // When encoding, REMOTE_ADDRLEN and LOCAL_ADDRLEN are decremented
1190   // by 1.
1191   if (static_cast<size_t>(nread) < len) {
1192     return 0;
1193   }
1194 
1195   auto p = buf.data();
1196   if (*p != static_cast<uint8_t>(QUICIPCType::DGRAM_FORWARD)) {
1197     LOG(ERROR) << "Unknown QUICIPCType: " << static_cast<uint32_t>(*p);
1198 
1199     return -1;
1200   }
1201 
1202   ++p;
1203 
1204   auto pkt = std::make_unique<QUICPacket>();
1205 
1206   auto remote_addrlen = static_cast<size_t>(*p++) + 1;
1207   if (remote_addrlen > sizeof(sockaddr_storage)) {
1208     LOG(ERROR) << "The length of remote address is too large: "
1209                << remote_addrlen;
1210 
1211     return -1;
1212   }
1213 
1214   len += remote_addrlen;
1215 
1216   if (static_cast<size_t>(nread) < len) {
1217     LOG(ERROR) << "Insufficient QUIC IPC message length";
1218 
1219     return -1;
1220   }
1221 
1222   pkt->remote_addr.len = remote_addrlen;
1223   memcpy(&pkt->remote_addr.su, p, remote_addrlen);
1224 
1225   p += remote_addrlen;
1226 
1227   auto local_addrlen = static_cast<size_t>(*p++) + 1;
1228   if (local_addrlen > sizeof(sockaddr_storage)) {
1229     LOG(ERROR) << "The length of local address is too large: " << local_addrlen;
1230 
1231     return -1;
1232   }
1233 
1234   len += local_addrlen;
1235 
1236   if (static_cast<size_t>(nread) < len) {
1237     LOG(ERROR) << "Insufficient QUIC IPC message length";
1238 
1239     return -1;
1240   }
1241 
1242   pkt->local_addr.len = local_addrlen;
1243   memcpy(&pkt->local_addr.su, p, local_addrlen);
1244 
1245   p += local_addrlen;
1246 
1247   pkt->pi.ecn = *p++;
1248 
1249   auto datalen = nread - (p - buf.data());
1250 
1251   pkt->data.assign(p, p + datalen);
1252 
1253   // At the moment, UpstreamAddr index is unknown.
1254   pkt->upstream_addr_index = static_cast<size_t>(-1);
1255 
1256   ngtcp2_version_cid vc;
1257 
1258   auto rv = ngtcp2_pkt_decode_version_cid(&vc, p, datalen, SHRPX_QUIC_SCIDLEN);
1259   if (rv < 0) {
1260     LOG(ERROR) << "ngtcp2_pkt_decode_version_cid: " << ngtcp2_strerror(rv);
1261 
1262     return -1;
1263   }
1264 
1265   if (vc.dcidlen != SHRPX_QUIC_SCIDLEN) {
1266     LOG(ERROR) << "DCID length is invalid";
1267     return -1;
1268   }
1269 
1270   if (single_worker_) {
1271     auto faddr = single_worker_->find_quic_upstream_addr(pkt->local_addr);
1272     if (faddr == nullptr) {
1273       LOG(ERROR) << "No suitable upstream address found";
1274 
1275       return 0;
1276     }
1277 
1278     auto quic_conn_handler = single_worker_->get_quic_connection_handler();
1279 
1280     // Ignore return value
1281     quic_conn_handler->handle_packet(faddr, pkt->remote_addr, pkt->local_addr,
1282                                      pkt->pi, pkt->data);
1283 
1284     return 0;
1285   }
1286 
1287   auto &qkm = quic_keying_materials_->keying_materials.front();
1288 
1289   ConnectionID decrypted_dcid;
1290 
1291   if (decrypt_quic_connection_id(decrypted_dcid,
1292                                  vc.dcid + SHRPX_QUIC_CID_WORKER_ID_OFFSET,
1293                                  qkm.cid_decryption_ctx) != 0) {
1294     return -1;
1295   }
1296 
1297   auto worker = find_worker(decrypted_dcid.worker);
1298   if (worker == nullptr) {
1299     if (LOG_ENABLED(INFO)) {
1300       LOG(INFO) << "No worker to match Worker ID";
1301     }
1302 
1303     return 0;
1304   }
1305 
1306   WorkerEvent wev{
1307       .type = WorkerEventType::QUIC_PKT_FORWARD,
1308       .quic_pkt = std::move(pkt),
1309   };
1310 
1311   worker->send(std::move(wev));
1312 
1313   return 0;
1314 }
1315 #endif // ENABLE_HTTP3
1316 
1317 } // namespace shrpx
1318