1
2 /* Copyright 1998 by the Massachusetts Institute of Technology.
3 * Copyright (C) 2004-2017 by Daniel Stenberg
4 *
5 * Permission to use, copy, modify, and distribute this
6 * software and its documentation for any purpose and without
7 * fee is hereby granted, provided that the above copyright
8 * notice appear in all copies and that both that copyright
9 * notice and this permission notice appear in supporting
10 * documentation, and that the name of M.I.T. not be used in
11 * advertising or publicity pertaining to distribution of the
12 * software without specific, written prior permission.
13 * M.I.T. makes no representations about the suitability of
14 * this software for any purpose. It is provided "as is"
15 * without express or implied warranty.
16 */
17
18 #include "ares_setup.h"
19
20 #ifdef HAVE_SYS_UIO_H
21 # include <sys/uio.h>
22 #endif
23 #ifdef HAVE_NETINET_IN_H
24 # include <netinet/in.h>
25 #endif
26 #ifdef HAVE_NETINET_TCP_H
27 # include <netinet/tcp.h>
28 #endif
29 #ifdef HAVE_NETDB_H
30 # include <netdb.h>
31 #endif
32 #ifdef HAVE_ARPA_INET_H
33 # include <arpa/inet.h>
34 #endif
35 #ifdef HAVE_ARPA_NAMESER_H
36 # include <arpa/nameser.h>
37 #else
38 # include "nameser.h"
39 #endif
40 #ifdef HAVE_ARPA_NAMESER_COMPAT_H
41 # include <arpa/nameser_compat.h>
42 #endif
43
44 #ifdef HAVE_STRINGS_H
45 # include <strings.h>
46 #endif
47 #ifdef HAVE_SYS_IOCTL_H
48 # include <sys/ioctl.h>
49 #endif
50 #ifdef NETWARE
51 # include <sys/filio.h>
52 #endif
53
54 #include <assert.h>
55 #include <fcntl.h>
56 #include <limits.h>
57
58 #include "ares.h"
59 #include "ares_dns.h"
60 #include "ares_nowarn.h"
61 #include "ares_private.h"
62
63
64 static int try_again(int errnum);
65 static void write_tcp_data(ares_channel channel, fd_set *write_fds,
66 ares_socket_t write_fd, struct timeval *now);
67 static void read_tcp_data(ares_channel channel, fd_set *read_fds,
68 ares_socket_t read_fd, struct timeval *now);
69 static void read_udp_packets(ares_channel channel, fd_set *read_fds,
70 ares_socket_t read_fd, struct timeval *now);
71 static void advance_tcp_send_queue(ares_channel channel, int whichserver,
72 ares_ssize_t num_bytes);
73 static void process_timeouts(ares_channel channel, struct timeval *now);
74 static void process_broken_connections(ares_channel channel,
75 struct timeval *now);
76 static void process_answer(ares_channel channel, unsigned char *abuf,
77 int alen, int whichserver, int tcp,
78 struct timeval *now);
79 static void handle_error(ares_channel channel, int whichserver,
80 struct timeval *now);
81 static void skip_server(ares_channel channel, struct query *query,
82 int whichserver);
83 static void next_server(ares_channel channel, struct query *query,
84 struct timeval *now);
85 static int open_tcp_socket(ares_channel channel, struct server_state *server);
86 static int open_udp_socket(ares_channel channel, struct server_state *server);
87 static int same_questions(const unsigned char *qbuf, int qlen,
88 const unsigned char *abuf, int alen);
89 static int same_address(struct sockaddr *sa, struct ares_addr *aa);
90 static void end_query(ares_channel channel, struct query *query, int status,
91 unsigned char *abuf, int alen);
92
93 /* return true if now is exactly check time or later */
ares__timedout(struct timeval * now,struct timeval * check)94 int ares__timedout(struct timeval *now,
95 struct timeval *check)
96 {
97 long secs = (now->tv_sec - check->tv_sec);
98
99 if(secs > 0)
100 return 1; /* yes, timed out */
101 if(secs < 0)
102 return 0; /* nope, not timed out */
103
104 /* if the full seconds were identical, check the sub second parts */
105 return (now->tv_usec - check->tv_usec >= 0);
106 }
107
108 /* add the specific number of milliseconds to the time in the first argument */
timeadd(struct timeval * now,int millisecs)109 static void timeadd(struct timeval *now, int millisecs)
110 {
111 now->tv_sec += millisecs/1000;
112 now->tv_usec += (millisecs%1000)*1000;
113
114 if(now->tv_usec >= 1000000) {
115 ++(now->tv_sec);
116 now->tv_usec -= 1000000;
117 }
118 }
119
120 /*
121 * generic process function
122 */
processfds(ares_channel channel,fd_set * read_fds,ares_socket_t read_fd,fd_set * write_fds,ares_socket_t write_fd)123 static void processfds(ares_channel channel,
124 fd_set *read_fds, ares_socket_t read_fd,
125 fd_set *write_fds, ares_socket_t write_fd)
126 {
127 struct timeval now = ares__tvnow();
128
129 write_tcp_data(channel, write_fds, write_fd, &now);
130 read_tcp_data(channel, read_fds, read_fd, &now);
131 read_udp_packets(channel, read_fds, read_fd, &now);
132 process_timeouts(channel, &now);
133 process_broken_connections(channel, &now);
134 }
135
136 /* Something interesting happened on the wire, or there was a timeout.
137 * See what's up and respond accordingly.
138 */
ares_process(ares_channel channel,fd_set * read_fds,fd_set * write_fds)139 void ares_process(ares_channel channel, fd_set *read_fds, fd_set *write_fds)
140 {
141 processfds(channel, read_fds, ARES_SOCKET_BAD, write_fds, ARES_SOCKET_BAD);
142 }
143
144 /* Something interesting happened on the wire, or there was a timeout.
145 * See what's up and respond accordingly.
146 */
ares_process_fd(ares_channel channel,ares_socket_t read_fd,ares_socket_t write_fd)147 void ares_process_fd(ares_channel channel,
148 ares_socket_t read_fd, /* use ARES_SOCKET_BAD or valid
149 file descriptors */
150 ares_socket_t write_fd)
151 {
152 processfds(channel, NULL, read_fd, NULL, write_fd);
153 }
154
155
156 /* Return 1 if the specified error number describes a readiness error, or 0
157 * otherwise. This is mostly for HP-UX, which could return EAGAIN or
158 * EWOULDBLOCK. See this man page
159 *
160 * http://devrsrc1.external.hp.com/STKS/cgi-bin/man2html?
161 * manpage=/usr/share/man/man2.Z/send.2
162 */
try_again(int errnum)163 static int try_again(int errnum)
164 {
165 #if !defined EWOULDBLOCK && !defined EAGAIN
166 #error "Neither EWOULDBLOCK nor EAGAIN defined"
167 #endif
168 switch (errnum)
169 {
170 #ifdef EWOULDBLOCK
171 case EWOULDBLOCK:
172 return 1;
173 #endif
174 #if defined EAGAIN && EAGAIN != EWOULDBLOCK
175 case EAGAIN:
176 return 1;
177 #endif
178 }
179 return 0;
180 }
181
socket_writev(ares_channel channel,ares_socket_t s,const struct iovec * vec,int len)182 static ares_ssize_t socket_writev(ares_channel channel, ares_socket_t s, const struct iovec * vec, int len)
183 {
184 if (channel->sock_funcs)
185 return channel->sock_funcs->asendv(s, vec, len, channel->sock_func_cb_data);
186
187 return writev(s, vec, len);
188 }
189
socket_write(ares_channel channel,ares_socket_t s,const void * data,size_t len)190 static ares_ssize_t socket_write(ares_channel channel, ares_socket_t s, const void * data, size_t len)
191 {
192 if (channel->sock_funcs)
193 {
194 struct iovec vec;
195 vec.iov_base = (void*)data;
196 vec.iov_len = len;
197 return channel->sock_funcs->asendv(s, &vec, 1, channel->sock_func_cb_data);
198 }
199 return swrite(s, data, len);
200 }
201
202 /* If any TCP sockets select true for writing, write out queued data
203 * we have for them.
204 */
write_tcp_data(ares_channel channel,fd_set * write_fds,ares_socket_t write_fd,struct timeval * now)205 static void write_tcp_data(ares_channel channel,
206 fd_set *write_fds,
207 ares_socket_t write_fd,
208 struct timeval *now)
209 {
210 struct server_state *server;
211 struct send_request *sendreq;
212 struct iovec *vec;
213 int i;
214 ares_ssize_t scount;
215 ares_ssize_t wcount;
216 size_t n;
217
218 if(!write_fds && (write_fd == ARES_SOCKET_BAD))
219 /* no possible action */
220 return;
221
222 for (i = 0; i < channel->nservers; i++)
223 {
224 /* Make sure server has data to send and is selected in write_fds or
225 write_fd. */
226 server = &channel->servers[i];
227 if (!server->qhead || server->tcp_socket == ARES_SOCKET_BAD ||
228 server->is_broken)
229 continue;
230
231 if(write_fds) {
232 if(!FD_ISSET(server->tcp_socket, write_fds))
233 continue;
234 }
235 else {
236 if(server->tcp_socket != write_fd)
237 continue;
238 }
239
240 if(write_fds)
241 /* If there's an error and we close this socket, then open
242 * another with the same fd to talk to another server, then we
243 * don't want to think that it was the new socket that was
244 * ready. This is not disastrous, but is likely to result in
245 * extra system calls and confusion. */
246 FD_CLR(server->tcp_socket, write_fds);
247
248 /* Count the number of send queue items. */
249 n = 0;
250 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
251 n++;
252
253 /* Allocate iovecs so we can send all our data at once. */
254 vec = ares_malloc(n * sizeof(struct iovec));
255 if (vec)
256 {
257 /* Fill in the iovecs and send. */
258 n = 0;
259 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
260 {
261 vec[n].iov_base = (char *) sendreq->data;
262 vec[n].iov_len = sendreq->len;
263 n++;
264 }
265 wcount = socket_writev(channel, server->tcp_socket, vec, (int)n);
266 ares_free(vec);
267 if (wcount < 0)
268 {
269 if (!try_again(SOCKERRNO))
270 handle_error(channel, i, now);
271 continue;
272 }
273
274 /* Advance the send queue by as many bytes as we sent. */
275 advance_tcp_send_queue(channel, i, wcount);
276 }
277 else
278 {
279 /* Can't allocate iovecs; just send the first request. */
280 sendreq = server->qhead;
281
282 scount = socket_write(channel, server->tcp_socket, sendreq->data, sendreq->len);
283 if (scount < 0)
284 {
285 if (!try_again(SOCKERRNO))
286 handle_error(channel, i, now);
287 continue;
288 }
289
290 /* Advance the send queue by as many bytes as we sent. */
291 advance_tcp_send_queue(channel, i, scount);
292 }
293 }
294 }
295
296 /* Consume the given number of bytes from the head of the TCP send queue. */
advance_tcp_send_queue(ares_channel channel,int whichserver,ares_ssize_t num_bytes)297 static void advance_tcp_send_queue(ares_channel channel, int whichserver,
298 ares_ssize_t num_bytes)
299 {
300 struct send_request *sendreq;
301 struct server_state *server = &channel->servers[whichserver];
302 while (num_bytes > 0) {
303 sendreq = server->qhead;
304 if ((size_t)num_bytes >= sendreq->len) {
305 num_bytes -= sendreq->len;
306 server->qhead = sendreq->next;
307 if (sendreq->data_storage)
308 ares_free(sendreq->data_storage);
309 ares_free(sendreq);
310 if (server->qhead == NULL) {
311 SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 0);
312 server->qtail = NULL;
313
314 /* qhead is NULL so we cannot continue this loop */
315 break;
316 }
317 }
318 else {
319 sendreq->data += num_bytes;
320 sendreq->len -= num_bytes;
321 num_bytes = 0;
322 }
323 }
324 }
325
socket_recvfrom(ares_channel channel,ares_socket_t s,void * data,size_t data_len,int flags,struct sockaddr * from,ares_socklen_t * from_len)326 static ares_ssize_t socket_recvfrom(ares_channel channel,
327 ares_socket_t s,
328 void * data,
329 size_t data_len,
330 int flags,
331 struct sockaddr *from,
332 ares_socklen_t *from_len)
333 {
334 if (channel->sock_funcs)
335 return channel->sock_funcs->arecvfrom(s, data, data_len,
336 flags, from, from_len,
337 channel->sock_func_cb_data);
338
339 #ifdef HAVE_RECVFROM
340 return recvfrom(s, data, data_len, flags, from, from_len);
341 #else
342 return sread(s, data, data_len);
343 #endif
344 }
345
socket_recv(ares_channel channel,ares_socket_t s,void * data,size_t data_len)346 static ares_ssize_t socket_recv(ares_channel channel,
347 ares_socket_t s,
348 void * data,
349 size_t data_len)
350 {
351 if (channel->sock_funcs)
352 return channel->sock_funcs->arecvfrom(s, data, data_len, 0, 0, 0,
353 channel->sock_func_cb_data);
354
355 return sread(s, data, data_len);
356 }
357
358 /* If any TCP socket selects true for reading, read some data,
359 * allocate a buffer if we finish reading the length word, and process
360 * a packet if we finish reading one.
361 */
read_tcp_data(ares_channel channel,fd_set * read_fds,ares_socket_t read_fd,struct timeval * now)362 static void read_tcp_data(ares_channel channel, fd_set *read_fds,
363 ares_socket_t read_fd, struct timeval *now)
364 {
365 struct server_state *server;
366 int i;
367 ares_ssize_t count;
368
369 if(!read_fds && (read_fd == ARES_SOCKET_BAD))
370 /* no possible action */
371 return;
372
373 for (i = 0; i < channel->nservers; i++)
374 {
375 /* Make sure the server has a socket and is selected in read_fds. */
376 server = &channel->servers[i];
377 if (server->tcp_socket == ARES_SOCKET_BAD || server->is_broken)
378 continue;
379
380 if(read_fds) {
381 if(!FD_ISSET(server->tcp_socket, read_fds))
382 continue;
383 }
384 else {
385 if(server->tcp_socket != read_fd)
386 continue;
387 }
388
389 if(read_fds)
390 /* If there's an error and we close this socket, then open another
391 * with the same fd to talk to another server, then we don't want to
392 * think that it was the new socket that was ready. This is not
393 * disastrous, but is likely to result in extra system calls and
394 * confusion. */
395 FD_CLR(server->tcp_socket, read_fds);
396
397 if (server->tcp_lenbuf_pos != 2)
398 {
399 /* We haven't yet read a length word, so read that (or
400 * what's left to read of it).
401 */
402 count = socket_recv(channel, server->tcp_socket,
403 server->tcp_lenbuf + server->tcp_lenbuf_pos,
404 2 - server->tcp_lenbuf_pos);
405 if (count <= 0)
406 {
407 if (!(count == -1 && try_again(SOCKERRNO)))
408 handle_error(channel, i, now);
409 continue;
410 }
411
412 server->tcp_lenbuf_pos += (int)count;
413 if (server->tcp_lenbuf_pos == 2)
414 {
415 /* We finished reading the length word. Decode the
416 * length and allocate a buffer for the data.
417 */
418 server->tcp_length = server->tcp_lenbuf[0] << 8
419 | server->tcp_lenbuf[1];
420 server->tcp_buffer = ares_malloc(server->tcp_length);
421 if (!server->tcp_buffer) {
422 handle_error(channel, i, now);
423 return; /* bail out on malloc failure. TODO: make this
424 function return error codes */
425 }
426 server->tcp_buffer_pos = 0;
427 }
428 }
429 else
430 {
431 /* Read data into the allocated buffer. */
432 count = socket_recv(channel, server->tcp_socket,
433 server->tcp_buffer + server->tcp_buffer_pos,
434 server->tcp_length - server->tcp_buffer_pos);
435 if (count <= 0)
436 {
437 if (!(count == -1 && try_again(SOCKERRNO)))
438 handle_error(channel, i, now);
439 continue;
440 }
441
442 server->tcp_buffer_pos += (int)count;
443 if (server->tcp_buffer_pos == server->tcp_length)
444 {
445 /* We finished reading this answer; process it and
446 * prepare to read another length word.
447 */
448 process_answer(channel, server->tcp_buffer, server->tcp_length,
449 i, 1, now);
450 ares_free(server->tcp_buffer);
451 server->tcp_buffer = NULL;
452 server->tcp_lenbuf_pos = 0;
453 server->tcp_buffer_pos = 0;
454 }
455 }
456 }
457 }
458
459 /* If any UDP sockets select true for reading, process them. */
read_udp_packets(ares_channel channel,fd_set * read_fds,ares_socket_t read_fd,struct timeval * now)460 static void read_udp_packets(ares_channel channel, fd_set *read_fds,
461 ares_socket_t read_fd, struct timeval *now)
462 {
463 struct server_state *server;
464 int i;
465 ares_ssize_t count;
466 unsigned char buf[MAXENDSSZ + 1];
467 #ifdef HAVE_RECVFROM
468 ares_socklen_t fromlen;
469 union {
470 struct sockaddr sa;
471 struct sockaddr_in sa4;
472 struct sockaddr_in6 sa6;
473 } from;
474 #endif
475
476 if(!read_fds && (read_fd == ARES_SOCKET_BAD))
477 /* no possible action */
478 return;
479
480 for (i = 0; i < channel->nservers; i++)
481 {
482 /* Make sure the server has a socket and is selected in read_fds. */
483 server = &channel->servers[i];
484
485 if (server->udp_socket == ARES_SOCKET_BAD || server->is_broken)
486 continue;
487
488 if(read_fds) {
489 if(!FD_ISSET(server->udp_socket, read_fds))
490 continue;
491 }
492 else {
493 if(server->udp_socket != read_fd)
494 continue;
495 }
496
497 if(read_fds)
498 /* If there's an error and we close this socket, then open
499 * another with the same fd to talk to another server, then we
500 * don't want to think that it was the new socket that was
501 * ready. This is not disastrous, but is likely to result in
502 * extra system calls and confusion. */
503 FD_CLR(server->udp_socket, read_fds);
504
505 /* To reduce event loop overhead, read and process as many
506 * packets as we can. */
507 do {
508 if (server->udp_socket == ARES_SOCKET_BAD)
509 count = 0;
510
511 else {
512 if (server->addr.family == AF_INET)
513 fromlen = sizeof(from.sa4);
514 else
515 fromlen = sizeof(from.sa6);
516 count = socket_recvfrom(channel, server->udp_socket, (void *)buf,
517 sizeof(buf), 0, &from.sa, &fromlen);
518 }
519
520 if (count == -1 && try_again(SOCKERRNO))
521 continue;
522 else if (count <= 0)
523 handle_error(channel, i, now);
524 #ifdef HAVE_RECVFROM
525 else if (!same_address(&from.sa, &server->addr))
526 /* The address the response comes from does not match the address we
527 * sent the request to. Someone may be attempting to perform a cache
528 * poisoning attack. */
529 break;
530 #endif
531 else
532 process_answer(channel, buf, (int)count, i, 0, now);
533 } while (count > 0);
534 }
535 }
536
537 /* If any queries have timed out, note the timeout and move them on. */
process_timeouts(ares_channel channel,struct timeval * now)538 static void process_timeouts(ares_channel channel, struct timeval *now)
539 {
540 time_t t; /* the time of the timeouts we're processing */
541 struct query *query;
542 struct list_node* list_head;
543 struct list_node* list_node;
544
545 /* Process all the timeouts that have fired since the last time we processed
546 * timeouts. If things are going well, then we'll have hundreds/thousands of
547 * queries that fall into future buckets, and only a handful of requests
548 * that fall into the "now" bucket, so this should be quite quick.
549 */
550 for (t = channel->last_timeout_processed; t <= now->tv_sec; t++)
551 {
552 list_head = &(channel->queries_by_timeout[t % ARES_TIMEOUT_TABLE_SIZE]);
553 for (list_node = list_head->next; list_node != list_head; )
554 {
555 query = list_node->data;
556 list_node = list_node->next; /* in case the query gets deleted */
557 if (query->timeout.tv_sec && ares__timedout(now, &query->timeout))
558 {
559 query->error_status = ARES_ETIMEOUT;
560 ++query->timeouts;
561 next_server(channel, query, now);
562 }
563 }
564 }
565 channel->last_timeout_processed = now->tv_sec;
566 }
567
568 /* Handle an answer from a server. */
process_answer(ares_channel channel,unsigned char * abuf,int alen,int whichserver,int tcp,struct timeval * now)569 static void process_answer(ares_channel channel, unsigned char *abuf,
570 int alen, int whichserver, int tcp,
571 struct timeval *now)
572 {
573 int tc, rcode, packetsz;
574 unsigned short id;
575 struct query *query;
576 struct list_node* list_head;
577 struct list_node* list_node;
578
579 /* If there's no room in the answer for a header, we can't do much
580 * with it. */
581 if (alen < HFIXEDSZ)
582 return;
583
584 /* Grab the query ID, truncate bit, and response code from the packet. */
585 id = DNS_HEADER_QID(abuf);
586 tc = DNS_HEADER_TC(abuf);
587 rcode = DNS_HEADER_RCODE(abuf);
588
589 /* Find the query corresponding to this packet. The queries are
590 * hashed/bucketed by query id, so this lookup should be quick. Note that
591 * both the query id and the questions must be the same; when the query id
592 * wraps around we can have multiple outstanding queries with the same query
593 * id, so we need to check both the id and question.
594 */
595 query = NULL;
596 list_head = &(channel->queries_by_qid[id % ARES_QID_TABLE_SIZE]);
597 for (list_node = list_head->next; list_node != list_head;
598 list_node = list_node->next)
599 {
600 struct query *q = list_node->data;
601 if ((q->qid == id) && same_questions(q->qbuf, q->qlen, abuf, alen))
602 {
603 query = q;
604 break;
605 }
606 }
607 if (!query)
608 return;
609
610 packetsz = PACKETSZ;
611 /* If we use EDNS and server answers with one of these RCODES, the protocol
612 * extension is not understood by the responder. We must retry the query
613 * without EDNS enabled.
614 */
615 if (channel->flags & ARES_FLAG_EDNS)
616 {
617 packetsz = channel->ednspsz;
618 if (rcode == NOTIMP || rcode == FORMERR || rcode == SERVFAIL)
619 {
620 int qlen = (query->tcplen - 2) - EDNSFIXEDSZ;
621 channel->flags ^= ARES_FLAG_EDNS;
622 query->tcplen -= EDNSFIXEDSZ;
623 query->qlen -= EDNSFIXEDSZ;
624 query->tcpbuf[0] = (unsigned char)((qlen >> 8) & 0xff);
625 query->tcpbuf[1] = (unsigned char)(qlen & 0xff);
626 DNS_HEADER_SET_ARCOUNT(query->tcpbuf + 2, 0);
627 query->tcpbuf = ares_realloc(query->tcpbuf, query->tcplen);
628 query->qbuf = query->tcpbuf + 2;
629 ares__send_query(channel, query, now);
630 return;
631 }
632 }
633
634 /* If we got a truncated UDP packet and are not ignoring truncation,
635 * don't accept the packet, and switch the query to TCP if we hadn't
636 * done so already.
637 */
638 if ((tc || alen > packetsz) && !tcp && !(channel->flags & ARES_FLAG_IGNTC))
639 {
640 if (!query->using_tcp)
641 {
642 query->using_tcp = 1;
643 ares__send_query(channel, query, now);
644 }
645 return;
646 }
647
648 /* Limit alen to PACKETSZ if we aren't using TCP (only relevant if we
649 * are ignoring truncation.
650 */
651 if (alen > packetsz && !tcp)
652 alen = packetsz;
653
654 /* If we aren't passing through all error packets, discard packets
655 * with SERVFAIL, NOTIMP, or REFUSED response codes.
656 */
657 if (!(channel->flags & ARES_FLAG_NOCHECKRESP))
658 {
659 if (rcode == SERVFAIL || rcode == NOTIMP || rcode == REFUSED)
660 {
661 skip_server(channel, query, whichserver);
662 if (query->server == whichserver)
663 next_server(channel, query, now);
664 return;
665 }
666 }
667
668 end_query(channel, query, ARES_SUCCESS, abuf, alen);
669 }
670
671 /* Close all the connections that are no longer usable. */
process_broken_connections(ares_channel channel,struct timeval * now)672 static void process_broken_connections(ares_channel channel,
673 struct timeval *now)
674 {
675 int i;
676 for (i = 0; i < channel->nservers; i++)
677 {
678 struct server_state *server = &channel->servers[i];
679 if (server->is_broken)
680 {
681 handle_error(channel, i, now);
682 }
683 }
684 }
685
686 /* Swap the contents of two lists */
swap_lists(struct list_node * head_a,struct list_node * head_b)687 static void swap_lists(struct list_node* head_a,
688 struct list_node* head_b)
689 {
690 int is_a_empty = ares__is_list_empty(head_a);
691 int is_b_empty = ares__is_list_empty(head_b);
692 struct list_node old_a = *head_a;
693 struct list_node old_b = *head_b;
694
695 if (is_a_empty) {
696 ares__init_list_head(head_b);
697 } else {
698 *head_b = old_a;
699 old_a.next->prev = head_b;
700 old_a.prev->next = head_b;
701 }
702 if (is_b_empty) {
703 ares__init_list_head(head_a);
704 } else {
705 *head_a = old_b;
706 old_b.next->prev = head_a;
707 old_b.prev->next = head_a;
708 }
709 }
710
handle_error(ares_channel channel,int whichserver,struct timeval * now)711 static void handle_error(ares_channel channel, int whichserver,
712 struct timeval *now)
713 {
714 struct server_state *server;
715 struct query *query;
716 struct list_node list_head;
717 struct list_node* list_node;
718
719 server = &channel->servers[whichserver];
720
721 /* Reset communications with this server. */
722 ares__close_sockets(channel, server);
723
724 /* Tell all queries talking to this server to move on and not try this
725 * server again. We steal the current list of queries that were in-flight to
726 * this server, since when we call next_server this can cause the queries to
727 * be re-sent to this server, which will re-insert these queries in that
728 * same server->queries_to_server list.
729 */
730 ares__init_list_head(&list_head);
731 swap_lists(&list_head, &(server->queries_to_server));
732 for (list_node = list_head.next; list_node != &list_head; )
733 {
734 query = list_node->data;
735 list_node = list_node->next; /* in case the query gets deleted */
736 assert(query->server == whichserver);
737 skip_server(channel, query, whichserver);
738 next_server(channel, query, now);
739 }
740 /* Each query should have removed itself from our temporary list as
741 * it re-sent itself or finished up...
742 */
743 assert(ares__is_list_empty(&list_head));
744 }
745
skip_server(ares_channel channel,struct query * query,int whichserver)746 static void skip_server(ares_channel channel, struct query *query,
747 int whichserver)
748 {
749 /* The given server gave us problems with this query, so if we have the
750 * luxury of using other servers, then let's skip the potentially broken
751 * server and just use the others. If we only have one server and we need to
752 * retry then we should just go ahead and re-use that server, since it's our
753 * only hope; perhaps we just got unlucky, and retrying will work (eg, the
754 * server timed out our TCP connection just as we were sending another
755 * request).
756 */
757 if (channel->nservers > 1)
758 {
759 query->server_info[whichserver].skip_server = 1;
760 }
761 }
762
next_server(ares_channel channel,struct query * query,struct timeval * now)763 static void next_server(ares_channel channel, struct query *query,
764 struct timeval *now)
765 {
766 /* We need to try each server channel->tries times. We have channel->nservers
767 * servers to try. In total, we need to do channel->nservers * channel->tries
768 * attempts. Use query->try to remember how many times we already attempted
769 * this query. Use modular arithmetic to find the next server to try. */
770 while (++(query->try_count) < (channel->nservers * channel->tries))
771 {
772 struct server_state *server;
773
774 /* Move on to the next server. */
775 query->server = (query->server + 1) % channel->nservers;
776 server = &channel->servers[query->server];
777
778 /* We don't want to use this server if (1) we decided this connection is
779 * broken, and thus about to be closed, (2) we've decided to skip this
780 * server because of earlier errors we encountered, or (3) we already
781 * sent this query over this exact connection.
782 */
783 if (!server->is_broken &&
784 !query->server_info[query->server].skip_server &&
785 !(query->using_tcp &&
786 (query->server_info[query->server].tcp_connection_generation ==
787 server->tcp_connection_generation)))
788 {
789 ares__send_query(channel, query, now);
790 return;
791 }
792
793 /* You might think that with TCP we only need one try. However, even
794 * when using TCP, servers can time-out our connection just as we're
795 * sending a request, or close our connection because they die, or never
796 * send us a reply because they get wedged or tickle a bug that drops
797 * our request.
798 */
799 }
800
801 /* If we are here, all attempts to perform query failed. */
802 end_query(channel, query, query->error_status, NULL, 0);
803 }
804
ares__send_query(ares_channel channel,struct query * query,struct timeval * now)805 void ares__send_query(ares_channel channel, struct query *query,
806 struct timeval *now)
807 {
808 struct send_request *sendreq;
809 struct server_state *server;
810 int timeplus;
811
812 server = &channel->servers[query->server];
813 if (query->using_tcp)
814 {
815 /* Make sure the TCP socket for this server is set up and queue
816 * a send request.
817 */
818 if (server->tcp_socket == ARES_SOCKET_BAD)
819 {
820 if (open_tcp_socket(channel, server) == -1)
821 {
822 skip_server(channel, query, query->server);
823 next_server(channel, query, now);
824 return;
825 }
826 }
827 sendreq = ares_malloc(sizeof(struct send_request));
828 if (!sendreq)
829 {
830 end_query(channel, query, ARES_ENOMEM, NULL, 0);
831 return;
832 }
833 memset(sendreq, 0, sizeof(struct send_request));
834 /* To make the common case fast, we avoid copies by using the query's
835 * tcpbuf for as long as the query is alive. In the rare case where the
836 * query ends while it's queued for transmission, then we give the
837 * sendreq its own copy of the request packet and put it in
838 * sendreq->data_storage.
839 */
840 sendreq->data_storage = NULL;
841 sendreq->data = query->tcpbuf;
842 sendreq->len = query->tcplen;
843 sendreq->owner_query = query;
844 sendreq->next = NULL;
845 if (server->qtail)
846 server->qtail->next = sendreq;
847 else
848 {
849 SOCK_STATE_CALLBACK(channel, server->tcp_socket, 1, 1);
850 server->qhead = sendreq;
851 }
852 server->qtail = sendreq;
853 query->server_info[query->server].tcp_connection_generation =
854 server->tcp_connection_generation;
855 }
856 else
857 {
858 if (server->udp_socket == ARES_SOCKET_BAD)
859 {
860 if (open_udp_socket(channel, server) == -1)
861 {
862 skip_server(channel, query, query->server);
863 next_server(channel, query, now);
864 return;
865 }
866 }
867 if (socket_write(channel, server->udp_socket, query->qbuf, query->qlen) == -1)
868 {
869 /* FIXME: Handle EAGAIN here since it likely can happen. */
870 skip_server(channel, query, query->server);
871 next_server(channel, query, now);
872 return;
873 }
874 }
875
876 /* For each trip through the entire server list, double the channel's
877 * assigned timeout, avoiding overflow. If channel->timeout is negative,
878 * leave it as-is, even though that should be impossible here.
879 */
880 timeplus = channel->timeout;
881 {
882 /* How many times do we want to double it? Presume sane values here. */
883 const int shift = query->try_count / channel->nservers;
884
885 /* Is there enough room to shift timeplus left that many times?
886 *
887 * To find out, confirm that all of the bits we'll shift away are zero.
888 * Stop considering a shift if we get to the point where we could shift
889 * a 1 into the sign bit (i.e. when shift is within two of the bit
890 * count).
891 *
892 * This has the side benefit of leaving negative numbers unchanged.
893 */
894 if(shift <= (int)(sizeof(int) * CHAR_BIT - 1)
895 && (timeplus >> (sizeof(int) * CHAR_BIT - 1 - shift)) == 0)
896 {
897 timeplus <<= shift;
898 }
899 }
900
901 query->timeout = *now;
902 timeadd(&query->timeout, timeplus);
903 /* Keep track of queries bucketed by timeout, so we can process
904 * timeout events quickly.
905 */
906 ares__remove_from_list(&(query->queries_by_timeout));
907 ares__insert_in_list(
908 &(query->queries_by_timeout),
909 &(channel->queries_by_timeout[query->timeout.tv_sec %
910 ARES_TIMEOUT_TABLE_SIZE]));
911
912 /* Keep track of queries bucketed by server, so we can process server
913 * errors quickly.
914 */
915 ares__remove_from_list(&(query->queries_to_server));
916 ares__insert_in_list(&(query->queries_to_server),
917 &(server->queries_to_server));
918 }
919
920 /*
921 * setsocknonblock sets the given socket to either blocking or non-blocking
922 * mode based on the 'nonblock' boolean argument. This function is highly
923 * portable.
924 */
setsocknonblock(ares_socket_t sockfd,int nonblock)925 static int setsocknonblock(ares_socket_t sockfd, /* operate on this */
926 int nonblock /* TRUE or FALSE */)
927 {
928 #if defined(USE_BLOCKING_SOCKETS)
929
930 return 0; /* returns success */
931
932 #elif defined(HAVE_FCNTL_O_NONBLOCK)
933
934 /* most recent unix versions */
935 int flags;
936 flags = fcntl(sockfd, F_GETFL, 0);
937 if (FALSE != nonblock)
938 return fcntl(sockfd, F_SETFL, flags | O_NONBLOCK);
939 else
940 return fcntl(sockfd, F_SETFL, flags & (~O_NONBLOCK)); /* LCOV_EXCL_LINE */
941
942 #elif defined(HAVE_IOCTL_FIONBIO)
943
944 /* older unix versions */
945 int flags = nonblock ? 1 : 0;
946 return ioctl(sockfd, FIONBIO, &flags);
947
948 #elif defined(HAVE_IOCTLSOCKET_FIONBIO)
949
950 #ifdef WATT32
951 char flags = nonblock ? 1 : 0;
952 #else
953 /* Windows */
954 unsigned long flags = nonblock ? 1UL : 0UL;
955 #endif
956 return ioctlsocket(sockfd, FIONBIO, &flags);
957
958 #elif defined(HAVE_IOCTLSOCKET_CAMEL_FIONBIO)
959
960 /* Amiga */
961 long flags = nonblock ? 1L : 0L;
962 return IoctlSocket(sockfd, FIONBIO, flags);
963
964 #elif defined(HAVE_SETSOCKOPT_SO_NONBLOCK)
965
966 /* BeOS */
967 long b = nonblock ? 1L : 0L;
968 return setsockopt(sockfd, SOL_SOCKET, SO_NONBLOCK, &b, sizeof(b));
969
970 #else
971 # error "no non-blocking method was found/used/set"
972 #endif
973 }
974
configure_socket(ares_socket_t s,int family,ares_channel channel)975 static int configure_socket(ares_socket_t s, int family, ares_channel channel)
976 {
977 union {
978 struct sockaddr sa;
979 struct sockaddr_in sa4;
980 struct sockaddr_in6 sa6;
981 } local;
982
983 /* do not set options for user-managed sockets */
984 if (channel->sock_funcs)
985 return 0;
986
987 (void)setsocknonblock(s, TRUE);
988
989 #if defined(FD_CLOEXEC) && !defined(MSDOS)
990 /* Configure the socket fd as close-on-exec. */
991 if (fcntl(s, F_SETFD, FD_CLOEXEC) == -1)
992 return -1; /* LCOV_EXCL_LINE */
993 #endif
994
995 /* Set the socket's send and receive buffer sizes. */
996 if ((channel->socket_send_buffer_size > 0) &&
997 setsockopt(s, SOL_SOCKET, SO_SNDBUF,
998 (void *)&channel->socket_send_buffer_size,
999 sizeof(channel->socket_send_buffer_size)) == -1)
1000 return -1;
1001
1002 if ((channel->socket_receive_buffer_size > 0) &&
1003 setsockopt(s, SOL_SOCKET, SO_RCVBUF,
1004 (void *)&channel->socket_receive_buffer_size,
1005 sizeof(channel->socket_receive_buffer_size)) == -1)
1006 return -1;
1007
1008 #ifdef SO_BINDTODEVICE
1009 if (channel->local_dev_name[0]) {
1010 if (setsockopt(s, SOL_SOCKET, SO_BINDTODEVICE,
1011 channel->local_dev_name, sizeof(channel->local_dev_name))) {
1012 /* Only root can do this, and usually not fatal if it doesn't work, so */
1013 /* just continue on. */
1014 }
1015 }
1016 #endif
1017
1018 if (family == AF_INET) {
1019 if (channel->local_ip4) {
1020 memset(&local.sa4, 0, sizeof(local.sa4));
1021 local.sa4.sin_family = AF_INET;
1022 local.sa4.sin_addr.s_addr = htonl(channel->local_ip4);
1023 if (bind(s, &local.sa, sizeof(local.sa4)) < 0)
1024 return -1;
1025 }
1026 }
1027 else if (family == AF_INET6) {
1028 if (memcmp(channel->local_ip6, &ares_in6addr_any,
1029 sizeof(channel->local_ip6)) != 0) {
1030 memset(&local.sa6, 0, sizeof(local.sa6));
1031 local.sa6.sin6_family = AF_INET6;
1032 memcpy(&local.sa6.sin6_addr, channel->local_ip6,
1033 sizeof(channel->local_ip6));
1034 if (bind(s, &local.sa, sizeof(local.sa6)) < 0)
1035 return -1;
1036 }
1037 }
1038
1039 return 0;
1040 }
1041
open_socket(ares_channel channel,int af,int type,int protocol)1042 static ares_socket_t open_socket(ares_channel channel, int af, int type, int protocol)
1043 {
1044 if (channel->sock_funcs != 0)
1045 return channel->sock_funcs->asocket(af,
1046 type,
1047 protocol,
1048 channel->sock_func_cb_data);
1049
1050 return socket(af, type, protocol);
1051 }
1052
connect_socket(ares_channel channel,ares_socket_t sockfd,const struct sockaddr * addr,ares_socklen_t addrlen)1053 static int connect_socket(ares_channel channel, ares_socket_t sockfd,
1054 const struct sockaddr * addr,
1055 ares_socklen_t addrlen)
1056 {
1057 if (channel->sock_funcs != 0)
1058 return channel->sock_funcs->aconnect(sockfd,
1059 addr,
1060 addrlen,
1061 channel->sock_func_cb_data);
1062
1063 return connect(sockfd, addr, addrlen);
1064 }
1065
open_tcp_socket(ares_channel channel,struct server_state * server)1066 static int open_tcp_socket(ares_channel channel, struct server_state *server)
1067 {
1068 ares_socket_t s;
1069 int opt;
1070 ares_socklen_t salen;
1071 union {
1072 struct sockaddr_in sa4;
1073 struct sockaddr_in6 sa6;
1074 } saddr;
1075 struct sockaddr *sa;
1076
1077 switch (server->addr.family)
1078 {
1079 case AF_INET:
1080 sa = (void *)&saddr.sa4;
1081 salen = sizeof(saddr.sa4);
1082 memset(sa, 0, salen);
1083 saddr.sa4.sin_family = AF_INET;
1084 if (server->addr.tcp_port) {
1085 saddr.sa4.sin_port = aresx_sitous(server->addr.tcp_port);
1086 } else {
1087 saddr.sa4.sin_port = aresx_sitous(channel->tcp_port);
1088 }
1089 memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
1090 sizeof(server->addr.addrV4));
1091 break;
1092 case AF_INET6:
1093 sa = (void *)&saddr.sa6;
1094 salen = sizeof(saddr.sa6);
1095 memset(sa, 0, salen);
1096 saddr.sa6.sin6_family = AF_INET6;
1097 if (server->addr.tcp_port) {
1098 saddr.sa6.sin6_port = aresx_sitous(server->addr.tcp_port);
1099 } else {
1100 saddr.sa6.sin6_port = aresx_sitous(channel->tcp_port);
1101 }
1102 memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
1103 sizeof(server->addr.addrV6));
1104 break;
1105 default:
1106 return -1; /* LCOV_EXCL_LINE */
1107 }
1108
1109 /* Acquire a socket. */
1110 s = open_socket(channel, server->addr.family, SOCK_STREAM, 0);
1111 if (s == ARES_SOCKET_BAD)
1112 return -1;
1113
1114 /* Configure it. */
1115 if (configure_socket(s, server->addr.family, channel) < 0)
1116 {
1117 ares__socket_close(channel, s);
1118 return -1;
1119 }
1120
1121 #ifdef TCP_NODELAY
1122 /*
1123 * Disable the Nagle algorithm (only relevant for TCP sockets, and thus not
1124 * in configure_socket). In general, in DNS lookups we're pretty much
1125 * interested in firing off a single request and then waiting for a reply,
1126 * so batching isn't very interesting.
1127 */
1128 opt = 1;
1129 if (channel->sock_funcs == 0
1130 &&
1131 setsockopt(s, IPPROTO_TCP, TCP_NODELAY,
1132 (void *)&opt, sizeof(opt)) == -1)
1133 {
1134 ares__socket_close(channel, s);
1135 return -1;
1136 }
1137 #endif
1138
1139 if (channel->sock_config_cb)
1140 {
1141 int err = channel->sock_config_cb(s, SOCK_STREAM,
1142 channel->sock_config_cb_data);
1143 if (err < 0)
1144 {
1145 ares__socket_close(channel, s);
1146 return err;
1147 }
1148 }
1149
1150 /* Connect to the server. */
1151 if (connect_socket(channel, s, sa, salen) == -1)
1152 {
1153 int err = SOCKERRNO;
1154
1155 if (err != EINPROGRESS && err != EWOULDBLOCK)
1156 {
1157 ares__socket_close(channel, s);
1158 return -1;
1159 }
1160 }
1161
1162 if (channel->sock_create_cb)
1163 {
1164 int err = channel->sock_create_cb(s, SOCK_STREAM,
1165 channel->sock_create_cb_data);
1166 if (err < 0)
1167 {
1168 ares__socket_close(channel, s);
1169 return err;
1170 }
1171 }
1172
1173 SOCK_STATE_CALLBACK(channel, s, 1, 0);
1174 server->tcp_buffer_pos = 0;
1175 server->tcp_socket = s;
1176 server->tcp_connection_generation = ++channel->tcp_connection_generation;
1177 return 0;
1178 }
1179
open_udp_socket(ares_channel channel,struct server_state * server)1180 static int open_udp_socket(ares_channel channel, struct server_state *server)
1181 {
1182 ares_socket_t s;
1183 ares_socklen_t salen;
1184 union {
1185 struct sockaddr_in sa4;
1186 struct sockaddr_in6 sa6;
1187 } saddr;
1188 struct sockaddr *sa;
1189
1190 switch (server->addr.family)
1191 {
1192 case AF_INET:
1193 sa = (void *)&saddr.sa4;
1194 salen = sizeof(saddr.sa4);
1195 memset(sa, 0, salen);
1196 saddr.sa4.sin_family = AF_INET;
1197 if (server->addr.udp_port) {
1198 saddr.sa4.sin_port = aresx_sitous(server->addr.udp_port);
1199 } else {
1200 saddr.sa4.sin_port = aresx_sitous(channel->udp_port);
1201 }
1202 memcpy(&saddr.sa4.sin_addr, &server->addr.addrV4,
1203 sizeof(server->addr.addrV4));
1204 break;
1205 case AF_INET6:
1206 sa = (void *)&saddr.sa6;
1207 salen = sizeof(saddr.sa6);
1208 memset(sa, 0, salen);
1209 saddr.sa6.sin6_family = AF_INET6;
1210 if (server->addr.udp_port) {
1211 saddr.sa6.sin6_port = aresx_sitous(server->addr.udp_port);
1212 } else {
1213 saddr.sa6.sin6_port = aresx_sitous(channel->udp_port);
1214 }
1215 memcpy(&saddr.sa6.sin6_addr, &server->addr.addrV6,
1216 sizeof(server->addr.addrV6));
1217 break;
1218 default:
1219 return -1; /* LCOV_EXCL_LINE */
1220 }
1221
1222 /* Acquire a socket. */
1223 s = open_socket(channel, server->addr.family, SOCK_DGRAM, 0);
1224 if (s == ARES_SOCKET_BAD)
1225 return -1;
1226
1227 /* Set the socket non-blocking. */
1228 if (configure_socket(s, server->addr.family, channel) < 0)
1229 {
1230 ares__socket_close(channel, s);
1231 return -1;
1232 }
1233
1234 if (channel->sock_config_cb)
1235 {
1236 int err = channel->sock_config_cb(s, SOCK_DGRAM,
1237 channel->sock_config_cb_data);
1238 if (err < 0)
1239 {
1240 ares__socket_close(channel, s);
1241 return err;
1242 }
1243 }
1244
1245 /* Connect to the server. */
1246 if (connect_socket(channel, s, sa, salen) == -1)
1247 {
1248 int err = SOCKERRNO;
1249
1250 if (err != EINPROGRESS && err != EWOULDBLOCK)
1251 {
1252 ares__socket_close(channel, s);
1253 return -1;
1254 }
1255 }
1256
1257 if (channel->sock_create_cb)
1258 {
1259 int err = channel->sock_create_cb(s, SOCK_DGRAM,
1260 channel->sock_create_cb_data);
1261 if (err < 0)
1262 {
1263 ares__socket_close(channel, s);
1264 return err;
1265 }
1266 }
1267
1268 SOCK_STATE_CALLBACK(channel, s, 1, 0);
1269
1270 server->udp_socket = s;
1271 return 0;
1272 }
1273
same_questions(const unsigned char * qbuf,int qlen,const unsigned char * abuf,int alen)1274 static int same_questions(const unsigned char *qbuf, int qlen,
1275 const unsigned char *abuf, int alen)
1276 {
1277 struct {
1278 const unsigned char *p;
1279 int qdcount;
1280 char *name;
1281 long namelen;
1282 int type;
1283 int dnsclass;
1284 } q, a;
1285 int i, j;
1286
1287 if (qlen < HFIXEDSZ || alen < HFIXEDSZ)
1288 return 0;
1289
1290 /* Extract qdcount from the request and reply buffers and compare them. */
1291 q.qdcount = DNS_HEADER_QDCOUNT(qbuf);
1292 a.qdcount = DNS_HEADER_QDCOUNT(abuf);
1293 if (q.qdcount != a.qdcount)
1294 return 0;
1295
1296 /* For each question in qbuf, find it in abuf. */
1297 q.p = qbuf + HFIXEDSZ;
1298 for (i = 0; i < q.qdcount; i++)
1299 {
1300 /* Decode the question in the query. */
1301 if (ares_expand_name(q.p, qbuf, qlen, &q.name, &q.namelen)
1302 != ARES_SUCCESS)
1303 return 0;
1304 q.p += q.namelen;
1305 if (q.p + QFIXEDSZ > qbuf + qlen)
1306 {
1307 ares_free(q.name);
1308 return 0;
1309 }
1310 q.type = DNS_QUESTION_TYPE(q.p);
1311 q.dnsclass = DNS_QUESTION_CLASS(q.p);
1312 q.p += QFIXEDSZ;
1313
1314 /* Search for this question in the answer. */
1315 a.p = abuf + HFIXEDSZ;
1316 for (j = 0; j < a.qdcount; j++)
1317 {
1318 /* Decode the question in the answer. */
1319 if (ares_expand_name(a.p, abuf, alen, &a.name, &a.namelen)
1320 != ARES_SUCCESS)
1321 {
1322 ares_free(q.name);
1323 return 0;
1324 }
1325 a.p += a.namelen;
1326 if (a.p + QFIXEDSZ > abuf + alen)
1327 {
1328 ares_free(q.name);
1329 ares_free(a.name);
1330 return 0;
1331 }
1332 a.type = DNS_QUESTION_TYPE(a.p);
1333 a.dnsclass = DNS_QUESTION_CLASS(a.p);
1334 a.p += QFIXEDSZ;
1335
1336 /* Compare the decoded questions. */
1337 if (strcasecmp(q.name, a.name) == 0 && q.type == a.type
1338 && q.dnsclass == a.dnsclass)
1339 {
1340 ares_free(a.name);
1341 break;
1342 }
1343 ares_free(a.name);
1344 }
1345
1346 ares_free(q.name);
1347 if (j == a.qdcount)
1348 return 0;
1349 }
1350 return 1;
1351 }
1352
same_address(struct sockaddr * sa,struct ares_addr * aa)1353 static int same_address(struct sockaddr *sa, struct ares_addr *aa)
1354 {
1355 void *addr1;
1356 void *addr2;
1357
1358 if (sa->sa_family == aa->family)
1359 {
1360 switch (aa->family)
1361 {
1362 case AF_INET:
1363 addr1 = &aa->addrV4;
1364 addr2 = &((struct sockaddr_in *)sa)->sin_addr;
1365 if (memcmp(addr1, addr2, sizeof(aa->addrV4)) == 0)
1366 return 1; /* match */
1367 break;
1368 case AF_INET6:
1369 addr1 = &aa->addrV6;
1370 addr2 = &((struct sockaddr_in6 *)sa)->sin6_addr;
1371 if (memcmp(addr1, addr2, sizeof(aa->addrV6)) == 0)
1372 return 1; /* match */
1373 break;
1374 default:
1375 break; /* LCOV_EXCL_LINE */
1376 }
1377 }
1378 return 0; /* different */
1379 }
1380
end_query(ares_channel channel,struct query * query,int status,unsigned char * abuf,int alen)1381 static void end_query (ares_channel channel, struct query *query, int status,
1382 unsigned char *abuf, int alen)
1383 {
1384 int i;
1385
1386 /* First we check to see if this query ended while one of our send
1387 * queues still has pointers to it.
1388 */
1389 for (i = 0; i < channel->nservers; i++)
1390 {
1391 struct server_state *server = &channel->servers[i];
1392 struct send_request *sendreq;
1393 for (sendreq = server->qhead; sendreq; sendreq = sendreq->next)
1394 if (sendreq->owner_query == query)
1395 {
1396 sendreq->owner_query = NULL;
1397 assert(sendreq->data_storage == NULL);
1398 if (status == ARES_SUCCESS)
1399 {
1400 /* We got a reply for this query, but this queued sendreq
1401 * points into this soon-to-be-gone query's tcpbuf. Probably
1402 * this means we timed out and queued the query for
1403 * retransmission, then received a response before actually
1404 * retransmitting. This is perfectly fine, so we want to keep
1405 * the connection running smoothly if we can. But in the worst
1406 * case we may have sent only some prefix of the query, with
1407 * some suffix of the query left to send. Also, the buffer may
1408 * be queued on multiple queues. To prevent dangling pointers
1409 * to the query's tcpbuf and handle these cases, we just give
1410 * such sendreqs their own copy of the query packet.
1411 */
1412 sendreq->data_storage = ares_malloc(sendreq->len);
1413 if (sendreq->data_storage != NULL)
1414 {
1415 memcpy(sendreq->data_storage, sendreq->data, sendreq->len);
1416 sendreq->data = sendreq->data_storage;
1417 }
1418 }
1419 if ((status != ARES_SUCCESS) || (sendreq->data_storage == NULL))
1420 {
1421 /* We encountered an error (probably a timeout, suggesting the
1422 * DNS server we're talking to is probably unreachable,
1423 * wedged, or severely overloaded) or we couldn't copy the
1424 * request, so mark the connection as broken. When we get to
1425 * process_broken_connections() we'll close the connection and
1426 * try to re-send requests to another server.
1427 */
1428 server->is_broken = 1;
1429 /* Just to be paranoid, zero out this sendreq... */
1430 sendreq->data = NULL;
1431 sendreq->len = 0;
1432 }
1433 }
1434 }
1435
1436 /* Invoke the callback */
1437 query->callback(query->arg, status, query->timeouts, abuf, alen);
1438 ares__free_query(query);
1439
1440 /* Simple cleanup policy: if no queries are remaining, close all network
1441 * sockets unless STAYOPEN is set.
1442 */
1443 if (!(channel->flags & ARES_FLAG_STAYOPEN) &&
1444 ares__is_list_empty(&(channel->all_queries)))
1445 {
1446 for (i = 0; i < channel->nservers; i++)
1447 ares__close_sockets(channel, &channel->servers[i]);
1448 }
1449 }
1450
ares__free_query(struct query * query)1451 void ares__free_query(struct query *query)
1452 {
1453 /* Remove the query from all the lists in which it is linked */
1454 ares__remove_from_list(&(query->queries_by_qid));
1455 ares__remove_from_list(&(query->queries_by_timeout));
1456 ares__remove_from_list(&(query->queries_to_server));
1457 ares__remove_from_list(&(query->all_queries));
1458 /* Zero out some important stuff, to help catch bugs */
1459 query->callback = NULL;
1460 query->arg = NULL;
1461 /* Deallocate the memory associated with the query */
1462 ares_free(query->tcpbuf);
1463 ares_free(query->server_info);
1464 ares_free(query);
1465 }
1466
ares__socket_close(ares_channel channel,ares_socket_t s)1467 void ares__socket_close(ares_channel channel, ares_socket_t s)
1468 {
1469 if (channel->sock_funcs)
1470 channel->sock_funcs->aclose(s, channel->sock_func_cb_data);
1471 else
1472 sclose(s);
1473 }
1474