1 /*
2 * nghttp2 - HTTP/2 C Library
3 *
4 * Copyright (c) 2012 Tatsuhiro Tsujikawa
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining
7 * a copy of this software and associated documentation files (the
8 * "Software"), to deal in the Software without restriction, including
9 * without limitation the rights to use, copy, modify, merge, publish,
10 * distribute, sublicense, and/or sell copies of the Software, and to
11 * permit persons to whom the Software is furnished to do so, subject to
12 * the following conditions:
13 *
14 * The above copyright notice and this permission notice shall be
15 * included in all copies or substantial portions of the Software.
16 *
17 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
18 * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
19 * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
20 * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
21 * LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
22 * OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
23 * WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
24 */
25 #include "shrpx_connection_handler.h"
26
27 #ifdef HAVE_UNISTD_H
28 # include <unistd.h>
29 #endif // HAVE_UNISTD_H
30 #include <sys/types.h>
31 #include <sys/wait.h>
32
33 #include <cerrno>
34 #include <thread>
35 #include <random>
36
37 #include "shrpx_client_handler.h"
38 #include "shrpx_tls.h"
39 #include "shrpx_worker.h"
40 #include "shrpx_config.h"
41 #include "shrpx_http2_session.h"
42 #include "shrpx_connect_blocker.h"
43 #include "shrpx_downstream_connection.h"
44 #include "shrpx_accept_handler.h"
45 #include "shrpx_memcached_dispatcher.h"
46 #include "shrpx_signal.h"
47 #include "shrpx_log.h"
48 #include "xsi_strerror.h"
49 #include "util.h"
50 #include "template.h"
51 #include "ssl_compat.h"
52
53 using namespace nghttp2;
54
55 namespace shrpx {
56
57 namespace {
acceptor_disable_cb(struct ev_loop * loop,ev_timer * w,int revent)58 void acceptor_disable_cb(struct ev_loop *loop, ev_timer *w, int revent) {
59 auto h = static_cast<ConnectionHandler *>(w->data);
60
61 // If we are in graceful shutdown period, we must not enable
62 // acceptors again.
63 if (h->get_graceful_shutdown()) {
64 return;
65 }
66
67 h->enable_acceptor();
68 }
69 } // namespace
70
71 namespace {
ocsp_cb(struct ev_loop * loop,ev_timer * w,int revent)72 void ocsp_cb(struct ev_loop *loop, ev_timer *w, int revent) {
73 auto h = static_cast<ConnectionHandler *>(w->data);
74
75 // If we are in graceful shutdown period, we won't do ocsp query.
76 if (h->get_graceful_shutdown()) {
77 return;
78 }
79
80 LOG(NOTICE) << "Start ocsp update";
81
82 h->proceed_next_cert_ocsp();
83 }
84 } // namespace
85
86 namespace {
ocsp_read_cb(struct ev_loop * loop,ev_io * w,int revent)87 void ocsp_read_cb(struct ev_loop *loop, ev_io *w, int revent) {
88 auto h = static_cast<ConnectionHandler *>(w->data);
89
90 h->read_ocsp_chunk();
91 }
92 } // namespace
93
94 namespace {
ocsp_chld_cb(struct ev_loop * loop,ev_child * w,int revent)95 void ocsp_chld_cb(struct ev_loop *loop, ev_child *w, int revent) {
96 auto h = static_cast<ConnectionHandler *>(w->data);
97
98 h->handle_ocsp_complete();
99 }
100 } // namespace
101
102 namespace {
thread_join_async_cb(struct ev_loop * loop,ev_async * w,int revent)103 void thread_join_async_cb(struct ev_loop *loop, ev_async *w, int revent) {
104 ev_break(loop);
105 }
106 } // namespace
107
108 namespace {
serial_event_async_cb(struct ev_loop * loop,ev_async * w,int revent)109 void serial_event_async_cb(struct ev_loop *loop, ev_async *w, int revent) {
110 auto h = static_cast<ConnectionHandler *>(w->data);
111
112 h->handle_serial_event();
113 }
114 } // namespace
115
ConnectionHandler(struct ev_loop * loop,std::mt19937 & gen)116 ConnectionHandler::ConnectionHandler(struct ev_loop *loop, std::mt19937 &gen)
117 :
118 #ifdef ENABLE_HTTP3
119 quic_ipc_fd_(-1),
120 #endif // ENABLE_HTTP3
121 gen_(gen),
122 single_worker_(nullptr),
123 loop_(loop),
124 #ifdef HAVE_NEVERBLEED
125 nb_(nullptr),
126 #endif // HAVE_NEVERBLEED
127 tls_ticket_key_memcached_get_retry_count_(0),
128 tls_ticket_key_memcached_fail_count_(0),
129 worker_round_robin_cnt_(get_config()->api.enabled ? 1 : 0),
130 graceful_shutdown_(false),
131 enable_acceptor_on_ocsp_completion_(false) {
132 ev_timer_init(&disable_acceptor_timer_, acceptor_disable_cb, 0., 0.);
133 disable_acceptor_timer_.data = this;
134
135 ev_timer_init(&ocsp_timer_, ocsp_cb, 0., 0.);
136 ocsp_timer_.data = this;
137
138 ev_io_init(&ocsp_.rev, ocsp_read_cb, -1, EV_READ);
139 ocsp_.rev.data = this;
140
141 ev_async_init(&thread_join_asyncev_, thread_join_async_cb);
142
143 ev_async_init(&serial_event_asyncev_, serial_event_async_cb);
144 serial_event_asyncev_.data = this;
145
146 ev_async_start(loop_, &serial_event_asyncev_);
147
148 ev_child_init(&ocsp_.chldev, ocsp_chld_cb, 0, 0);
149 ocsp_.chldev.data = this;
150
151 ocsp_.next = 0;
152 ocsp_.proc.rfd = -1;
153
154 reset_ocsp();
155 }
156
~ConnectionHandler()157 ConnectionHandler::~ConnectionHandler() {
158 ev_child_stop(loop_, &ocsp_.chldev);
159 ev_async_stop(loop_, &serial_event_asyncev_);
160 ev_async_stop(loop_, &thread_join_asyncev_);
161 ev_io_stop(loop_, &ocsp_.rev);
162 ev_timer_stop(loop_, &ocsp_timer_);
163 ev_timer_stop(loop_, &disable_acceptor_timer_);
164
165 #ifdef ENABLE_HTTP3
166 for (auto ssl_ctx : quic_all_ssl_ctx_) {
167 if (ssl_ctx == nullptr) {
168 continue;
169 }
170
171 auto tls_ctx_data =
172 static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
173 delete tls_ctx_data;
174 SSL_CTX_free(ssl_ctx);
175 }
176 #endif // ENABLE_HTTP3
177
178 for (auto ssl_ctx : all_ssl_ctx_) {
179 auto tls_ctx_data =
180 static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
181 delete tls_ctx_data;
182 SSL_CTX_free(ssl_ctx);
183 }
184
185 // Free workers before destroying ev_loop
186 workers_.clear();
187
188 for (auto loop : worker_loops_) {
189 ev_loop_destroy(loop);
190 }
191 }
192
set_ticket_keys_to_worker(const std::shared_ptr<TicketKeys> & ticket_keys)193 void ConnectionHandler::set_ticket_keys_to_worker(
194 const std::shared_ptr<TicketKeys> &ticket_keys) {
195 for (auto &worker : workers_) {
196 worker->set_ticket_keys(ticket_keys);
197 }
198 }
199
worker_reopen_log_files()200 void ConnectionHandler::worker_reopen_log_files() {
201 for (auto &worker : workers_) {
202 WorkerEvent wev{};
203
204 wev.type = WorkerEventType::REOPEN_LOG;
205
206 worker->send(std::move(wev));
207 }
208 }
209
worker_replace_downstream(std::shared_ptr<DownstreamConfig> downstreamconf)210 void ConnectionHandler::worker_replace_downstream(
211 std::shared_ptr<DownstreamConfig> downstreamconf) {
212 for (auto &worker : workers_) {
213 WorkerEvent wev{};
214
215 wev.type = WorkerEventType::REPLACE_DOWNSTREAM;
216 wev.downstreamconf = downstreamconf;
217
218 worker->send(std::move(wev));
219 }
220 }
221
create_single_worker()222 int ConnectionHandler::create_single_worker() {
223 cert_tree_ = tls::create_cert_lookup_tree();
224 auto sv_ssl_ctx = tls::setup_server_ssl_context(
225 all_ssl_ctx_, indexed_ssl_ctx_, cert_tree_.get()
226 #ifdef HAVE_NEVERBLEED
227 ,
228 nb_
229 #endif // HAVE_NEVERBLEED
230 );
231
232 #ifdef ENABLE_HTTP3
233 quic_cert_tree_ = tls::create_cert_lookup_tree();
234 auto quic_sv_ssl_ctx = tls::setup_quic_server_ssl_context(
235 quic_all_ssl_ctx_, quic_indexed_ssl_ctx_, quic_cert_tree_.get()
236 # ifdef HAVE_NEVERBLEED
237 ,
238 nb_
239 # endif // HAVE_NEVERBLEED
240 );
241 #endif // ENABLE_HTTP3
242
243 auto cl_ssl_ctx = tls::setup_downstream_client_ssl_context(
244 #ifdef HAVE_NEVERBLEED
245 nb_
246 #endif // HAVE_NEVERBLEED
247 );
248
249 if (cl_ssl_ctx) {
250 all_ssl_ctx_.push_back(cl_ssl_ctx);
251 #ifdef ENABLE_HTTP3
252 quic_all_ssl_ctx_.push_back(nullptr);
253 #endif // ENABLE_HTTP3
254 }
255
256 auto config = get_config();
257 auto &tlsconf = config->tls;
258
259 SSL_CTX *session_cache_ssl_ctx = nullptr;
260 {
261 auto &memcachedconf = config->tls.session_cache.memcached;
262 if (memcachedconf.tls) {
263 session_cache_ssl_ctx = tls::create_ssl_client_context(
264 #ifdef HAVE_NEVERBLEED
265 nb_,
266 #endif // HAVE_NEVERBLEED
267 tlsconf.cacert, memcachedconf.cert_file,
268 memcachedconf.private_key_file);
269 all_ssl_ctx_.push_back(session_cache_ssl_ctx);
270 #ifdef ENABLE_HTTP3
271 quic_all_ssl_ctx_.push_back(nullptr);
272 #endif // ENABLE_HTTP3
273 }
274 }
275
276 #if defined(ENABLE_HTTP3) && defined(HAVE_LIBBPF)
277 quic_bpf_refs_.resize(config->conn.quic_listener.addrs.size());
278 #endif // ENABLE_HTTP3 && HAVE_LIBBPF
279
280 #ifdef ENABLE_HTTP3
281 assert(worker_ids_.size() == 1);
282 const auto &wid = worker_ids_[0];
283 #endif // ENABLE_HTTP3
284
285 single_worker_ = std::make_unique<Worker>(
286 loop_, sv_ssl_ctx, cl_ssl_ctx, session_cache_ssl_ctx, cert_tree_.get(),
287 #ifdef ENABLE_HTTP3
288 quic_sv_ssl_ctx, quic_cert_tree_.get(), wid,
289 # ifdef HAVE_LIBBPF
290 /* index = */ 0,
291 # endif // HAVE_LIBBPF
292 #endif // ENABLE_HTTP3
293 ticket_keys_, this, config->conn.downstream);
294 #ifdef HAVE_MRUBY
295 if (single_worker_->create_mruby_context() != 0) {
296 return -1;
297 }
298 #endif // HAVE_MRUBY
299
300 #ifdef ENABLE_HTTP3
301 if (single_worker_->setup_quic_server_socket() != 0) {
302 return -1;
303 }
304 #endif // ENABLE_HTTP3
305
306 return 0;
307 }
308
create_worker_thread(size_t num)309 int ConnectionHandler::create_worker_thread(size_t num) {
310 #ifndef NOTHREADS
311 assert(workers_.size() == 0);
312
313 cert_tree_ = tls::create_cert_lookup_tree();
314 auto sv_ssl_ctx = tls::setup_server_ssl_context(
315 all_ssl_ctx_, indexed_ssl_ctx_, cert_tree_.get()
316 # ifdef HAVE_NEVERBLEED
317 ,
318 nb_
319 # endif // HAVE_NEVERBLEED
320 );
321
322 # ifdef ENABLE_HTTP3
323 quic_cert_tree_ = tls::create_cert_lookup_tree();
324 auto quic_sv_ssl_ctx = tls::setup_quic_server_ssl_context(
325 quic_all_ssl_ctx_, quic_indexed_ssl_ctx_, quic_cert_tree_.get()
326 # ifdef HAVE_NEVERBLEED
327 ,
328 nb_
329 # endif // HAVE_NEVERBLEED
330 );
331 # endif // ENABLE_HTTP3
332
333 auto cl_ssl_ctx = tls::setup_downstream_client_ssl_context(
334 # ifdef HAVE_NEVERBLEED
335 nb_
336 # endif // HAVE_NEVERBLEED
337 );
338
339 if (cl_ssl_ctx) {
340 all_ssl_ctx_.push_back(cl_ssl_ctx);
341 # ifdef ENABLE_HTTP3
342 quic_all_ssl_ctx_.push_back(nullptr);
343 # endif // ENABLE_HTTP3
344 }
345
346 auto config = get_config();
347 auto &tlsconf = config->tls;
348 auto &apiconf = config->api;
349
350 # if defined(ENABLE_HTTP3) && defined(HAVE_LIBBPF)
351 quic_bpf_refs_.resize(config->conn.quic_listener.addrs.size());
352 # endif // ENABLE_HTTP3 && HAVE_LIBBPF
353
354 // We have dedicated worker for API request processing.
355 if (apiconf.enabled) {
356 ++num;
357 }
358
359 SSL_CTX *session_cache_ssl_ctx = nullptr;
360 {
361 auto &memcachedconf = config->tls.session_cache.memcached;
362
363 if (memcachedconf.tls) {
364 session_cache_ssl_ctx = tls::create_ssl_client_context(
365 # ifdef HAVE_NEVERBLEED
366 nb_,
367 # endif // HAVE_NEVERBLEED
368 tlsconf.cacert, memcachedconf.cert_file,
369 memcachedconf.private_key_file);
370 all_ssl_ctx_.push_back(session_cache_ssl_ctx);
371 # ifdef ENABLE_HTTP3
372 quic_all_ssl_ctx_.push_back(nullptr);
373 # endif // ENABLE_HTTP3
374 }
375 }
376
377 # ifdef ENABLE_HTTP3
378 assert(worker_ids_.size() == num);
379 # endif // ENABLE_HTTP3
380
381 for (size_t i = 0; i < num; ++i) {
382 auto loop = ev_loop_new(config->ev_loop_flags);
383
384 # ifdef ENABLE_HTTP3
385 const auto &wid = worker_ids_[i];
386 # endif // ENABLE_HTTP3
387
388 auto worker = std::make_unique<Worker>(
389 loop, sv_ssl_ctx, cl_ssl_ctx, session_cache_ssl_ctx, cert_tree_.get(),
390 # ifdef ENABLE_HTTP3
391 quic_sv_ssl_ctx, quic_cert_tree_.get(), wid,
392 # ifdef HAVE_LIBBPF
393 i,
394 # endif // HAVE_LIBBPF
395 # endif // ENABLE_HTTP3
396 ticket_keys_, this, config->conn.downstream);
397 # ifdef HAVE_MRUBY
398 if (worker->create_mruby_context() != 0) {
399 return -1;
400 }
401 # endif // HAVE_MRUBY
402
403 # ifdef ENABLE_HTTP3
404 if ((!apiconf.enabled || i != 0) &&
405 worker->setup_quic_server_socket() != 0) {
406 return -1;
407 }
408 # endif // ENABLE_HTTP3
409
410 workers_.push_back(std::move(worker));
411 worker_loops_.push_back(loop);
412
413 LLOG(NOTICE, this) << "Created worker thread #" << workers_.size() - 1;
414 }
415
416 for (auto &worker : workers_) {
417 worker->run_async();
418 }
419
420 #endif // NOTHREADS
421
422 return 0;
423 }
424
join_worker()425 void ConnectionHandler::join_worker() {
426 #ifndef NOTHREADS
427 int n = 0;
428
429 if (LOG_ENABLED(INFO)) {
430 LLOG(INFO, this) << "Waiting for worker thread to join: n="
431 << workers_.size();
432 }
433
434 for (auto &worker : workers_) {
435 worker->wait();
436 if (LOG_ENABLED(INFO)) {
437 LLOG(INFO, this) << "Thread #" << n << " joined";
438 }
439 ++n;
440 }
441 #endif // NOTHREADS
442 }
443
graceful_shutdown_worker()444 void ConnectionHandler::graceful_shutdown_worker() {
445 if (single_worker_) {
446 return;
447 }
448
449 if (LOG_ENABLED(INFO)) {
450 LLOG(INFO, this) << "Sending graceful shutdown signal to worker";
451 }
452
453 for (auto &worker : workers_) {
454 WorkerEvent wev{};
455 wev.type = WorkerEventType::GRACEFUL_SHUTDOWN;
456
457 worker->send(std::move(wev));
458 }
459
460 #ifndef NOTHREADS
461 ev_async_start(loop_, &thread_join_asyncev_);
462
463 thread_join_fut_ = std::async(std::launch::async, [this]() {
464 (void)reopen_log_files(get_config()->logging);
465 join_worker();
466 ev_async_send(get_loop(), &thread_join_asyncev_);
467 delete_log_config();
468 });
469 #endif // NOTHREADS
470 }
471
handle_connection(int fd,sockaddr * addr,int addrlen,const UpstreamAddr * faddr)472 int ConnectionHandler::handle_connection(int fd, sockaddr *addr, int addrlen,
473 const UpstreamAddr *faddr) {
474 if (LOG_ENABLED(INFO)) {
475 LLOG(INFO, this) << "Accepted connection from "
476 << util::numeric_name(addr, addrlen) << ", fd=" << fd;
477 }
478
479 auto config = get_config();
480
481 if (single_worker_) {
482 auto &upstreamconf = config->conn.upstream;
483 if (single_worker_->get_worker_stat()->num_connections >=
484 upstreamconf.worker_connections) {
485 if (LOG_ENABLED(INFO)) {
486 LLOG(INFO, this) << "Too many connections >="
487 << upstreamconf.worker_connections;
488 }
489
490 close(fd);
491 return -1;
492 }
493
494 auto client =
495 tls::accept_connection(single_worker_.get(), fd, addr, addrlen, faddr);
496 if (!client) {
497 LLOG(ERROR, this) << "ClientHandler creation failed";
498
499 close(fd);
500 return -1;
501 }
502
503 return 0;
504 }
505
506 Worker *worker;
507
508 if (faddr->alt_mode == UpstreamAltMode::API) {
509 worker = workers_[0].get();
510
511 if (LOG_ENABLED(INFO)) {
512 LOG(INFO) << "Dispatch connection to API worker #0";
513 }
514 } else {
515 worker = workers_[worker_round_robin_cnt_].get();
516
517 if (LOG_ENABLED(INFO)) {
518 LOG(INFO) << "Dispatch connection to worker #" << worker_round_robin_cnt_;
519 }
520
521 if (++worker_round_robin_cnt_ == workers_.size()) {
522 auto &apiconf = config->api;
523
524 if (apiconf.enabled) {
525 worker_round_robin_cnt_ = 1;
526 } else {
527 worker_round_robin_cnt_ = 0;
528 }
529 }
530 }
531
532 WorkerEvent wev{};
533 wev.type = WorkerEventType::NEW_CONNECTION;
534 wev.client_fd = fd;
535 memcpy(&wev.client_addr, addr, addrlen);
536 wev.client_addrlen = addrlen;
537 wev.faddr = faddr;
538
539 worker->send(std::move(wev));
540
541 return 0;
542 }
543
get_loop() const544 struct ev_loop *ConnectionHandler::get_loop() const { return loop_; }
545
get_single_worker() const546 Worker *ConnectionHandler::get_single_worker() const {
547 return single_worker_.get();
548 }
549
add_acceptor(std::unique_ptr<AcceptHandler> h)550 void ConnectionHandler::add_acceptor(std::unique_ptr<AcceptHandler> h) {
551 acceptors_.push_back(std::move(h));
552 }
553
delete_acceptor()554 void ConnectionHandler::delete_acceptor() { acceptors_.clear(); }
555
enable_acceptor()556 void ConnectionHandler::enable_acceptor() {
557 for (auto &a : acceptors_) {
558 a->enable();
559 }
560 }
561
disable_acceptor()562 void ConnectionHandler::disable_acceptor() {
563 for (auto &a : acceptors_) {
564 a->disable();
565 }
566 }
567
sleep_acceptor(ev_tstamp t)568 void ConnectionHandler::sleep_acceptor(ev_tstamp t) {
569 if (t == 0. || ev_is_active(&disable_acceptor_timer_)) {
570 return;
571 }
572
573 disable_acceptor();
574
575 ev_timer_set(&disable_acceptor_timer_, t, 0.);
576 ev_timer_start(loop_, &disable_acceptor_timer_);
577 }
578
accept_pending_connection()579 void ConnectionHandler::accept_pending_connection() {
580 for (auto &a : acceptors_) {
581 a->accept_connection();
582 }
583 }
584
set_ticket_keys(std::shared_ptr<TicketKeys> ticket_keys)585 void ConnectionHandler::set_ticket_keys(
586 std::shared_ptr<TicketKeys> ticket_keys) {
587 ticket_keys_ = std::move(ticket_keys);
588 if (single_worker_) {
589 single_worker_->set_ticket_keys(ticket_keys_);
590 }
591 }
592
get_ticket_keys() const593 const std::shared_ptr<TicketKeys> &ConnectionHandler::get_ticket_keys() const {
594 return ticket_keys_;
595 }
596
set_graceful_shutdown(bool f)597 void ConnectionHandler::set_graceful_shutdown(bool f) {
598 graceful_shutdown_ = f;
599 if (single_worker_) {
600 single_worker_->set_graceful_shutdown(f);
601 }
602 }
603
get_graceful_shutdown() const604 bool ConnectionHandler::get_graceful_shutdown() const {
605 return graceful_shutdown_;
606 }
607
cancel_ocsp_update()608 void ConnectionHandler::cancel_ocsp_update() {
609 enable_acceptor_on_ocsp_completion_ = false;
610 ev_timer_stop(loop_, &ocsp_timer_);
611
612 if (ocsp_.proc.pid == 0) {
613 return;
614 }
615
616 int rv;
617
618 rv = kill(ocsp_.proc.pid, SIGTERM);
619 if (rv != 0) {
620 auto error = errno;
621 LOG(ERROR) << "Could not send signal to OCSP query process: errno="
622 << error;
623 }
624
625 while ((rv = waitpid(ocsp_.proc.pid, nullptr, 0)) == -1 && errno == EINTR)
626 ;
627 if (rv == -1) {
628 auto error = errno;
629 LOG(ERROR) << "Error occurred while we were waiting for the completion of "
630 "OCSP query process: errno="
631 << error;
632 }
633 }
634
635 // inspired by h2o_read_command function from h2o project:
636 // https://github.com/h2o/h2o
start_ocsp_update(const char * cert_file)637 int ConnectionHandler::start_ocsp_update(const char *cert_file) {
638 int rv;
639
640 if (LOG_ENABLED(INFO)) {
641 LOG(INFO) << "Start ocsp update for " << cert_file;
642 }
643
644 assert(!ev_is_active(&ocsp_.rev));
645 assert(!ev_is_active(&ocsp_.chldev));
646
647 char *const argv[] = {
648 const_cast<char *>(get_config()->tls.ocsp.fetch_ocsp_response_file.data()),
649 const_cast<char *>(cert_file), nullptr};
650
651 Process proc;
652 rv = exec_read_command(proc, argv);
653 if (rv != 0) {
654 return -1;
655 }
656
657 ocsp_.proc = proc;
658
659 ev_io_set(&ocsp_.rev, ocsp_.proc.rfd, EV_READ);
660 ev_io_start(loop_, &ocsp_.rev);
661
662 ev_child_set(&ocsp_.chldev, ocsp_.proc.pid, 0);
663 ev_child_start(loop_, &ocsp_.chldev);
664
665 return 0;
666 }
667
read_ocsp_chunk()668 void ConnectionHandler::read_ocsp_chunk() {
669 std::array<uint8_t, 4_k> buf;
670 for (;;) {
671 ssize_t n;
672 while ((n = read(ocsp_.proc.rfd, buf.data(), buf.size())) == -1 &&
673 errno == EINTR)
674 ;
675
676 if (n == -1) {
677 if (errno == EAGAIN || errno == EWOULDBLOCK) {
678 return;
679 }
680 auto error = errno;
681 LOG(WARN) << "Reading from ocsp query command failed: errno=" << error;
682 ocsp_.error = error;
683
684 break;
685 }
686
687 if (n == 0) {
688 break;
689 }
690
691 std::copy_n(std::begin(buf), n, std::back_inserter(ocsp_.resp));
692 }
693
694 ev_io_stop(loop_, &ocsp_.rev);
695 }
696
handle_ocsp_complete()697 void ConnectionHandler::handle_ocsp_complete() {
698 ev_io_stop(loop_, &ocsp_.rev);
699 ev_child_stop(loop_, &ocsp_.chldev);
700
701 assert(ocsp_.next < all_ssl_ctx_.size());
702 #ifdef ENABLE_HTTP3
703 assert(all_ssl_ctx_.size() == quic_all_ssl_ctx_.size());
704 #endif // ENABLE_HTTP3
705
706 auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
707 auto tls_ctx_data =
708 static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
709
710 auto rstatus = ocsp_.chldev.rstatus;
711 auto status = WEXITSTATUS(rstatus);
712 if (ocsp_.error || !WIFEXITED(rstatus) || status != 0) {
713 LOG(WARN) << "ocsp query command for " << tls_ctx_data->cert_file
714 << " failed: error=" << ocsp_.error << ", rstatus=" << log::hex
715 << rstatus << log::dec << ", status=" << status;
716 ++ocsp_.next;
717 proceed_next_cert_ocsp();
718 return;
719 }
720
721 if (LOG_ENABLED(INFO)) {
722 LOG(INFO) << "ocsp update for " << tls_ctx_data->cert_file
723 << " finished successfully";
724 }
725
726 auto config = get_config();
727 auto &tlsconf = config->tls;
728
729 if (tlsconf.ocsp.no_verify ||
730 tls::verify_ocsp_response(ssl_ctx, ocsp_.resp.data(),
731 ocsp_.resp.size()) == 0) {
732 #ifdef ENABLE_HTTP3
733 // We have list of SSL_CTX with the same certificate in
734 // quic_all_ssl_ctx_ as well. Some SSL_CTXs are missing there in
735 // that case we get nullptr.
736 auto quic_ssl_ctx = quic_all_ssl_ctx_[ocsp_.next];
737 if (quic_ssl_ctx) {
738 auto quic_tls_ctx_data =
739 static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(quic_ssl_ctx));
740 # ifdef HAVE_ATOMIC_STD_SHARED_PTR
741 quic_tls_ctx_data->ocsp_data.store(
742 std::make_shared<std::vector<uint8_t>>(ocsp_.resp),
743 std::memory_order_release);
744 # else // !HAVE_ATOMIC_STD_SHARED_PTR
745 std::lock_guard<std::mutex> g(quic_tls_ctx_data->mu);
746 quic_tls_ctx_data->ocsp_data =
747 std::make_shared<std::vector<uint8_t>>(ocsp_.resp);
748 # endif // !HAVE_ATOMIC_STD_SHARED_PTR
749 }
750 #endif // ENABLE_HTTP3
751
752 #ifdef HAVE_ATOMIC_STD_SHARED_PTR
753 tls_ctx_data->ocsp_data.store(
754 std::make_shared<std::vector<uint8_t>>(std::move(ocsp_.resp)),
755 std::memory_order_release);
756 #else // !HAVE_ATOMIC_STD_SHARED_PTR
757 std::lock_guard<std::mutex> g(tls_ctx_data->mu);
758 tls_ctx_data->ocsp_data =
759 std::make_shared<std::vector<uint8_t>>(std::move(ocsp_.resp));
760 #endif // !HAVE_ATOMIC_STD_SHARED_PTR
761 }
762
763 ++ocsp_.next;
764 proceed_next_cert_ocsp();
765 }
766
reset_ocsp()767 void ConnectionHandler::reset_ocsp() {
768 if (ocsp_.proc.rfd != -1) {
769 close(ocsp_.proc.rfd);
770 }
771
772 ocsp_.proc.rfd = -1;
773 ocsp_.proc.pid = 0;
774 ocsp_.error = 0;
775 ocsp_.resp = std::vector<uint8_t>();
776 }
777
proceed_next_cert_ocsp()778 void ConnectionHandler::proceed_next_cert_ocsp() {
779 for (;;) {
780 reset_ocsp();
781 if (ocsp_.next == all_ssl_ctx_.size()) {
782 ocsp_.next = 0;
783 // We have updated all ocsp response, and schedule next update.
784 ev_timer_set(&ocsp_timer_, get_config()->tls.ocsp.update_interval, 0.);
785 ev_timer_start(loop_, &ocsp_timer_);
786
787 if (enable_acceptor_on_ocsp_completion_) {
788 enable_acceptor_on_ocsp_completion_ = false;
789 enable_acceptor();
790 }
791
792 return;
793 }
794
795 auto ssl_ctx = all_ssl_ctx_[ocsp_.next];
796 auto tls_ctx_data =
797 static_cast<tls::TLSContextData *>(SSL_CTX_get_app_data(ssl_ctx));
798
799 // client SSL_CTX is also included in all_ssl_ctx_, but has no
800 // tls_ctx_data.
801 if (!tls_ctx_data) {
802 ++ocsp_.next;
803 continue;
804 }
805
806 auto cert_file = tls_ctx_data->cert_file;
807
808 if (start_ocsp_update(cert_file) != 0) {
809 ++ocsp_.next;
810 continue;
811 }
812
813 break;
814 }
815 }
816
set_tls_ticket_key_memcached_dispatcher(std::unique_ptr<MemcachedDispatcher> dispatcher)817 void ConnectionHandler::set_tls_ticket_key_memcached_dispatcher(
818 std::unique_ptr<MemcachedDispatcher> dispatcher) {
819 tls_ticket_key_memcached_dispatcher_ = std::move(dispatcher);
820 }
821
822 MemcachedDispatcher *
get_tls_ticket_key_memcached_dispatcher() const823 ConnectionHandler::get_tls_ticket_key_memcached_dispatcher() const {
824 return tls_ticket_key_memcached_dispatcher_.get();
825 }
826
827 // Use the similar backoff algorithm described in
828 // https://github.com/grpc/grpc/blob/master/doc/connection-backoff.md
829 namespace {
830 constexpr size_t MAX_BACKOFF_EXP = 10;
831 constexpr auto MULTIPLIER = 3.2;
832 constexpr auto JITTER = 0.2;
833 } // namespace
834
on_tls_ticket_key_network_error(ev_timer * w)835 void ConnectionHandler::on_tls_ticket_key_network_error(ev_timer *w) {
836 if (++tls_ticket_key_memcached_get_retry_count_ >=
837 get_config()->tls.ticket.memcached.max_retry) {
838 LOG(WARN) << "Memcached: tls ticket get retry all failed "
839 << tls_ticket_key_memcached_get_retry_count_ << " times.";
840
841 on_tls_ticket_key_not_found(w);
842 return;
843 }
844
845 auto base_backoff = util::int_pow(
846 MULTIPLIER,
847 std::min(MAX_BACKOFF_EXP, tls_ticket_key_memcached_get_retry_count_));
848 auto dist = std::uniform_real_distribution<>(-JITTER * base_backoff,
849 JITTER * base_backoff);
850
851 auto backoff = base_backoff + dist(gen_);
852
853 LOG(WARN)
854 << "Memcached: tls ticket get failed due to network error, retrying in "
855 << backoff << " seconds";
856
857 ev_timer_set(w, backoff, 0.);
858 ev_timer_start(loop_, w);
859 }
860
on_tls_ticket_key_not_found(ev_timer * w)861 void ConnectionHandler::on_tls_ticket_key_not_found(ev_timer *w) {
862 tls_ticket_key_memcached_get_retry_count_ = 0;
863
864 if (++tls_ticket_key_memcached_fail_count_ >=
865 get_config()->tls.ticket.memcached.max_fail) {
866 LOG(WARN) << "Memcached: could not get tls ticket; disable tls ticket";
867
868 tls_ticket_key_memcached_fail_count_ = 0;
869
870 set_ticket_keys(nullptr);
871 set_ticket_keys_to_worker(nullptr);
872 }
873
874 LOG(WARN) << "Memcached: tls ticket get failed, schedule next";
875 schedule_next_tls_ticket_key_memcached_get(w);
876 }
877
on_tls_ticket_key_get_success(const std::shared_ptr<TicketKeys> & ticket_keys,ev_timer * w)878 void ConnectionHandler::on_tls_ticket_key_get_success(
879 const std::shared_ptr<TicketKeys> &ticket_keys, ev_timer *w) {
880 LOG(NOTICE) << "Memcached: tls ticket get success";
881
882 tls_ticket_key_memcached_get_retry_count_ = 0;
883 tls_ticket_key_memcached_fail_count_ = 0;
884
885 schedule_next_tls_ticket_key_memcached_get(w);
886
887 if (!ticket_keys || ticket_keys->keys.empty()) {
888 LOG(WARN) << "Memcached: tls ticket keys are empty; tls ticket disabled";
889 set_ticket_keys(nullptr);
890 set_ticket_keys_to_worker(nullptr);
891 return;
892 }
893
894 if (LOG_ENABLED(INFO)) {
895 LOG(INFO) << "ticket keys get done";
896 LOG(INFO) << 0 << " enc+dec: "
897 << util::format_hex(ticket_keys->keys[0].data.name);
898 for (size_t i = 1; i < ticket_keys->keys.size(); ++i) {
899 auto &key = ticket_keys->keys[i];
900 LOG(INFO) << i << " dec: " << util::format_hex(key.data.name);
901 }
902 }
903
904 set_ticket_keys(ticket_keys);
905 set_ticket_keys_to_worker(ticket_keys);
906 }
907
schedule_next_tls_ticket_key_memcached_get(ev_timer * w)908 void ConnectionHandler::schedule_next_tls_ticket_key_memcached_get(
909 ev_timer *w) {
910 ev_timer_set(w, get_config()->tls.ticket.memcached.interval, 0.);
911 ev_timer_start(loop_, w);
912 }
913
create_tls_ticket_key_memcached_ssl_ctx()914 SSL_CTX *ConnectionHandler::create_tls_ticket_key_memcached_ssl_ctx() {
915 auto config = get_config();
916 auto &tlsconf = config->tls;
917 auto &memcachedconf = config->tls.ticket.memcached;
918
919 auto ssl_ctx = tls::create_ssl_client_context(
920 #ifdef HAVE_NEVERBLEED
921 nb_,
922 #endif // HAVE_NEVERBLEED
923 tlsconf.cacert, memcachedconf.cert_file, memcachedconf.private_key_file);
924
925 all_ssl_ctx_.push_back(ssl_ctx);
926 #ifdef ENABLE_HTTP3
927 quic_all_ssl_ctx_.push_back(nullptr);
928 #endif // ENABLE_HTTP3
929
930 return ssl_ctx;
931 }
932
933 #ifdef HAVE_NEVERBLEED
set_neverbleed(neverbleed_t * nb)934 void ConnectionHandler::set_neverbleed(neverbleed_t *nb) { nb_ = nb; }
935 #endif // HAVE_NEVERBLEED
936
handle_serial_event()937 void ConnectionHandler::handle_serial_event() {
938 std::vector<SerialEvent> q;
939 {
940 std::lock_guard<std::mutex> g(serial_event_mu_);
941 q.swap(serial_events_);
942 }
943
944 for (auto &sev : q) {
945 switch (sev.type) {
946 case SerialEventType::REPLACE_DOWNSTREAM:
947 // Mmake sure that none of worker uses
948 // get_config()->conn.downstream
949 mod_config()->conn.downstream = sev.downstreamconf;
950
951 if (single_worker_) {
952 single_worker_->replace_downstream_config(sev.downstreamconf);
953
954 break;
955 }
956
957 worker_replace_downstream(sev.downstreamconf);
958
959 break;
960 default:
961 break;
962 }
963 }
964 }
965
send_replace_downstream(const std::shared_ptr<DownstreamConfig> & downstreamconf)966 void ConnectionHandler::send_replace_downstream(
967 const std::shared_ptr<DownstreamConfig> &downstreamconf) {
968 send_serial_event(
969 SerialEvent(SerialEventType::REPLACE_DOWNSTREAM, downstreamconf));
970 }
971
send_serial_event(SerialEvent ev)972 void ConnectionHandler::send_serial_event(SerialEvent ev) {
973 {
974 std::lock_guard<std::mutex> g(serial_event_mu_);
975
976 serial_events_.push_back(std::move(ev));
977 }
978
979 ev_async_send(loop_, &serial_event_asyncev_);
980 }
981
get_ssl_ctx(size_t idx) const982 SSL_CTX *ConnectionHandler::get_ssl_ctx(size_t idx) const {
983 return all_ssl_ctx_[idx];
984 }
985
986 const std::vector<SSL_CTX *> &
get_indexed_ssl_ctx(size_t idx) const987 ConnectionHandler::get_indexed_ssl_ctx(size_t idx) const {
988 return indexed_ssl_ctx_[idx];
989 }
990
991 #ifdef ENABLE_HTTP3
992 const std::vector<SSL_CTX *> &
get_quic_indexed_ssl_ctx(size_t idx) const993 ConnectionHandler::get_quic_indexed_ssl_ctx(size_t idx) const {
994 return quic_indexed_ssl_ctx_[idx];
995 }
996 #endif // ENABLE_HTTP3
997
set_enable_acceptor_on_ocsp_completion(bool f)998 void ConnectionHandler::set_enable_acceptor_on_ocsp_completion(bool f) {
999 enable_acceptor_on_ocsp_completion_ = f;
1000 }
1001
1002 #ifdef ENABLE_HTTP3
forward_quic_packet(const UpstreamAddr * faddr,const Address & remote_addr,const Address & local_addr,const ngtcp2_pkt_info & pi,const WorkerID & wid,std::span<const uint8_t> data)1003 int ConnectionHandler::forward_quic_packet(const UpstreamAddr *faddr,
1004 const Address &remote_addr,
1005 const Address &local_addr,
1006 const ngtcp2_pkt_info &pi,
1007 const WorkerID &wid,
1008 std::span<const uint8_t> data) {
1009 assert(!get_config()->single_thread);
1010
1011 auto worker = find_worker(wid);
1012 if (worker == nullptr) {
1013 return -1;
1014 }
1015
1016 WorkerEvent wev{};
1017 wev.type = WorkerEventType::QUIC_PKT_FORWARD;
1018 wev.quic_pkt = std::make_unique<QUICPacket>(faddr->index, remote_addr,
1019 local_addr, pi, data);
1020
1021 worker->send(std::move(wev));
1022
1023 return 0;
1024 }
1025
set_quic_keying_materials(std::shared_ptr<QUICKeyingMaterials> qkms)1026 void ConnectionHandler::set_quic_keying_materials(
1027 std::shared_ptr<QUICKeyingMaterials> qkms) {
1028 quic_keying_materials_ = std::move(qkms);
1029 }
1030
1031 const std::shared_ptr<QUICKeyingMaterials> &
get_quic_keying_materials() const1032 ConnectionHandler::get_quic_keying_materials() const {
1033 return quic_keying_materials_;
1034 }
1035
set_worker_ids(std::vector<WorkerID> worker_ids)1036 void ConnectionHandler::set_worker_ids(std::vector<WorkerID> worker_ids) {
1037 worker_ids_ = std::move(worker_ids);
1038 }
1039
1040 namespace {
find_worker_index(const std::vector<WorkerID> & worker_ids,const WorkerID & wid)1041 ssize_t find_worker_index(const std::vector<WorkerID> &worker_ids,
1042 const WorkerID &wid) {
1043 assert(!worker_ids.empty());
1044
1045 if (wid.server != worker_ids[0].server ||
1046 wid.worker_process != worker_ids[0].worker_process ||
1047 wid.thread >= worker_ids.size()) {
1048 return -1;
1049 }
1050
1051 return wid.thread;
1052 }
1053 } // namespace
1054
find_worker(const WorkerID & wid) const1055 Worker *ConnectionHandler::find_worker(const WorkerID &wid) const {
1056 auto idx = find_worker_index(worker_ids_, wid);
1057 if (idx == -1) {
1058 return nullptr;
1059 }
1060
1061 return workers_[idx].get();
1062 }
1063
1064 QUICLingeringWorkerProcess *
match_quic_lingering_worker_process_worker_id(const WorkerID & wid)1065 ConnectionHandler::match_quic_lingering_worker_process_worker_id(
1066 const WorkerID &wid) {
1067 for (auto &lwps : quic_lingering_worker_processes_) {
1068 if (find_worker_index(lwps.worker_ids, wid) != -1) {
1069 return &lwps;
1070 }
1071 }
1072
1073 return nullptr;
1074 }
1075
1076 # ifdef HAVE_LIBBPF
get_quic_bpf_refs()1077 std::vector<BPFRef> &ConnectionHandler::get_quic_bpf_refs() {
1078 return quic_bpf_refs_;
1079 }
1080
unload_bpf_objects()1081 void ConnectionHandler::unload_bpf_objects() {
1082 LOG(NOTICE) << "Unloading BPF objects";
1083
1084 for (auto &ref : quic_bpf_refs_) {
1085 if (ref.obj == nullptr) {
1086 continue;
1087 }
1088
1089 bpf_object__close(ref.obj);
1090
1091 ref.obj = nullptr;
1092 }
1093 }
1094 # endif // HAVE_LIBBPF
1095
set_quic_ipc_fd(int fd)1096 void ConnectionHandler::set_quic_ipc_fd(int fd) { quic_ipc_fd_ = fd; }
1097
set_quic_lingering_worker_processes(const std::vector<QUICLingeringWorkerProcess> & quic_lwps)1098 void ConnectionHandler::set_quic_lingering_worker_processes(
1099 const std::vector<QUICLingeringWorkerProcess> &quic_lwps) {
1100 quic_lingering_worker_processes_ = quic_lwps;
1101 }
1102
forward_quic_packet_to_lingering_worker_process(QUICLingeringWorkerProcess * quic_lwp,const Address & remote_addr,const Address & local_addr,const ngtcp2_pkt_info & pi,std::span<const uint8_t> data)1103 int ConnectionHandler::forward_quic_packet_to_lingering_worker_process(
1104 QUICLingeringWorkerProcess *quic_lwp, const Address &remote_addr,
1105 const Address &local_addr, const ngtcp2_pkt_info &pi,
1106 std::span<const uint8_t> data) {
1107 std::array<uint8_t, 512> header;
1108
1109 assert(header.size() >= 1 + 1 + 1 + 1 + sizeof(sockaddr_storage) * 2);
1110 assert(remote_addr.len > 0);
1111 assert(local_addr.len > 0);
1112
1113 auto p = header.data();
1114
1115 *p++ = static_cast<uint8_t>(QUICIPCType::DGRAM_FORWARD);
1116 *p++ = static_cast<uint8_t>(remote_addr.len - 1);
1117 p = std::copy_n(reinterpret_cast<const uint8_t *>(&remote_addr.su),
1118 remote_addr.len, p);
1119 *p++ = static_cast<uint8_t>(local_addr.len - 1);
1120 p = std::copy_n(reinterpret_cast<const uint8_t *>(&local_addr.su),
1121 local_addr.len, p);
1122 *p++ = pi.ecn;
1123
1124 iovec msg_iov[] = {
1125 {
1126 .iov_base = header.data(),
1127 .iov_len = static_cast<size_t>(p - header.data()),
1128 },
1129 {
1130 .iov_base = const_cast<uint8_t *>(data.data()),
1131 .iov_len = data.size(),
1132 },
1133 };
1134
1135 msghdr msg{};
1136 msg.msg_iov = msg_iov;
1137 msg.msg_iovlen = array_size(msg_iov);
1138
1139 ssize_t nwrite;
1140
1141 while ((nwrite = sendmsg(quic_lwp->quic_ipc_fd, &msg, 0)) == -1 &&
1142 errno == EINTR)
1143 ;
1144
1145 if (nwrite == -1) {
1146 std::array<char, STRERROR_BUFSIZE> errbuf;
1147
1148 auto error = errno;
1149 LOG(ERROR) << "Failed to send QUIC IPC message: "
1150 << xsi_strerror(error, errbuf.data(), errbuf.size());
1151
1152 return -1;
1153 }
1154
1155 return 0;
1156 }
1157
quic_ipc_read()1158 int ConnectionHandler::quic_ipc_read() {
1159 std::array<uint8_t, 65536> buf;
1160
1161 ssize_t nread;
1162
1163 while ((nread = recv(quic_ipc_fd_, buf.data(), buf.size(), 0)) == -1 &&
1164 errno == EINTR)
1165 ;
1166
1167 if (nread == -1) {
1168 std::array<char, STRERROR_BUFSIZE> errbuf;
1169
1170 auto error = errno;
1171 LOG(ERROR) << "Failed to read data from QUIC IPC channel: "
1172 << xsi_strerror(error, errbuf.data(), errbuf.size());
1173
1174 return -1;
1175 }
1176
1177 if (nread == 0) {
1178 return 0;
1179 }
1180
1181 size_t len = 1 + 1 + 1 + 1;
1182
1183 // Wire format:
1184 // TYPE(1) REMOTE_ADDRLEN(1) REMOTE_ADDR(N) LOCAL_ADDRLEN(1) LOCAL_ADDR(N)
1185 // ECN(1) DGRAM_PAYLOAD(N)
1186 //
1187 // When encoding, REMOTE_ADDRLEN and LOCAL_ADDRLEN are decremented
1188 // by 1.
1189 if (static_cast<size_t>(nread) < len) {
1190 return 0;
1191 }
1192
1193 auto p = buf.data();
1194 if (*p != static_cast<uint8_t>(QUICIPCType::DGRAM_FORWARD)) {
1195 LOG(ERROR) << "Unknown QUICIPCType: " << static_cast<uint32_t>(*p);
1196
1197 return -1;
1198 }
1199
1200 ++p;
1201
1202 auto pkt = std::make_unique<QUICPacket>();
1203
1204 auto remote_addrlen = static_cast<size_t>(*p++) + 1;
1205 if (remote_addrlen > sizeof(sockaddr_storage)) {
1206 LOG(ERROR) << "The length of remote address is too large: "
1207 << remote_addrlen;
1208
1209 return -1;
1210 }
1211
1212 len += remote_addrlen;
1213
1214 if (static_cast<size_t>(nread) < len) {
1215 LOG(ERROR) << "Insufficient QUIC IPC message length";
1216
1217 return -1;
1218 }
1219
1220 pkt->remote_addr.len = remote_addrlen;
1221 memcpy(&pkt->remote_addr.su, p, remote_addrlen);
1222
1223 p += remote_addrlen;
1224
1225 auto local_addrlen = static_cast<size_t>(*p++) + 1;
1226 if (local_addrlen > sizeof(sockaddr_storage)) {
1227 LOG(ERROR) << "The length of local address is too large: " << local_addrlen;
1228
1229 return -1;
1230 }
1231
1232 len += local_addrlen;
1233
1234 if (static_cast<size_t>(nread) < len) {
1235 LOG(ERROR) << "Insufficient QUIC IPC message length";
1236
1237 return -1;
1238 }
1239
1240 pkt->local_addr.len = local_addrlen;
1241 memcpy(&pkt->local_addr.su, p, local_addrlen);
1242
1243 p += local_addrlen;
1244
1245 pkt->pi.ecn = *p++;
1246
1247 auto datalen = nread - (p - buf.data());
1248
1249 pkt->data.assign(p, p + datalen);
1250
1251 // At the moment, UpstreamAddr index is unknown.
1252 pkt->upstream_addr_index = static_cast<size_t>(-1);
1253
1254 ngtcp2_version_cid vc;
1255
1256 auto rv = ngtcp2_pkt_decode_version_cid(&vc, p, datalen, SHRPX_QUIC_SCIDLEN);
1257 if (rv < 0) {
1258 LOG(ERROR) << "ngtcp2_pkt_decode_version_cid: " << ngtcp2_strerror(rv);
1259
1260 return -1;
1261 }
1262
1263 if (vc.dcidlen != SHRPX_QUIC_SCIDLEN) {
1264 LOG(ERROR) << "DCID length is invalid";
1265 return -1;
1266 }
1267
1268 if (single_worker_) {
1269 auto faddr = single_worker_->find_quic_upstream_addr(pkt->local_addr);
1270 if (faddr == nullptr) {
1271 LOG(ERROR) << "No suitable upstream address found";
1272
1273 return 0;
1274 }
1275
1276 auto quic_conn_handler = single_worker_->get_quic_connection_handler();
1277
1278 // Ignore return value
1279 quic_conn_handler->handle_packet(faddr, pkt->remote_addr, pkt->local_addr,
1280 pkt->pi, pkt->data);
1281
1282 return 0;
1283 }
1284
1285 auto &qkm = quic_keying_materials_->keying_materials.front();
1286
1287 ConnectionID decrypted_dcid;
1288
1289 if (decrypt_quic_connection_id(decrypted_dcid,
1290 vc.dcid + SHRPX_QUIC_CID_WORKER_ID_OFFSET,
1291 qkm.cid_decryption_ctx) != 0) {
1292 return -1;
1293 }
1294
1295 auto worker = find_worker(decrypted_dcid.worker);
1296 if (worker == nullptr) {
1297 if (LOG_ENABLED(INFO)) {
1298 LOG(INFO) << "No worker to match Worker ID";
1299 }
1300
1301 return 0;
1302 }
1303
1304 WorkerEvent wev{
1305 .type = WorkerEventType::QUIC_PKT_FORWARD,
1306 .quic_pkt = std::move(pkt),
1307 };
1308
1309 worker->send(std::move(wev));
1310
1311 return 0;
1312 }
1313 #endif // ENABLE_HTTP3
1314
1315 } // namespace shrpx
1316