• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "resolv_cache.h"
30 
31 #include <resolv.h>
32 #include <stdarg.h>
33 #include <stdio.h>
34 #include <stdlib.h>
35 #include <string.h>
36 #include <time.h>
37 #include "pthread.h"
38 
39 #include <errno.h>
40 #include <arpa/nameser.h>
41 #include <net/if.h>
42 #include <netdb.h>
43 #include <linux/if.h>
44 
45 #include <arpa/inet.h>
46 #include "resolv_private.h"
47 #include "resolv_netid.h"
48 #include "res_private.h"
49 
50 #include "private/libc_logging.h"
51 
52 /* This code implements a small and *simple* DNS resolver cache.
53  *
54  * It is only used to cache DNS answers for a time defined by the smallest TTL
55  * among the answer records in order to reduce DNS traffic. It is not supposed
56  * to be a full DNS cache, since we plan to implement that in the future in a
57  * dedicated process running on the system.
58  *
59  * Note that its design is kept simple very intentionally, i.e.:
60  *
61  *  - it takes raw DNS query packet data as input, and returns raw DNS
62  *    answer packet data as output
63  *
64  *    (this means that two similar queries that encode the DNS name
65  *     differently will be treated distinctly).
66  *
67  *    the smallest TTL value among the answer records are used as the time
68  *    to keep an answer in the cache.
69  *
70  *    this is bad, but we absolutely want to avoid parsing the answer packets
71  *    (and should be solved by the later full DNS cache process).
72  *
73  *  - the implementation is just a (query-data) => (answer-data) hash table
74  *    with a trivial least-recently-used expiration policy.
75  *
76  * Doing this keeps the code simple and avoids to deal with a lot of things
77  * that a full DNS cache is expected to do.
78  *
79  * The API is also very simple:
80  *
81  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
82  *     this will initialize the cache on first usage. the result can be NULL
83  *     if the cache is disabled.
84  *
85  *   - the client calls _resolv_cache_lookup() before performing a query
86  *
87  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
88  *     has been copied into the client-provided answer buffer.
89  *
90  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
91  *     a request normally, *then* call _resolv_cache_add() to add the received
92  *     answer to the cache.
93  *
94  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
95  *     perform a request normally, and *not* call _resolv_cache_add()
96  *
97  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
98  *     is too short to accomodate the cached result.
99  */
100 
101 /* the name of an environment variable that will be checked the first time
102  * this code is called if its value is "0", then the resolver cache is
103  * disabled.
104  */
105 #define  CONFIG_ENV  "BIONIC_DNSCACHE"
106 
107 /* default number of entries kept in the cache. This value has been
108  * determined by browsing through various sites and counting the number
109  * of corresponding requests. Keep in mind that our framework is currently
110  * performing two requests per name lookup (one for IPv4, the other for IPv6)
111  *
112  *    www.google.com      4
113  *    www.ysearch.com     6
114  *    www.amazon.com      8
115  *    www.nytimes.com     22
116  *    www.espn.com        28
117  *    www.msn.com         28
118  *    www.lemonde.fr      35
119  *
120  * (determined in 2009-2-17 from Paris, France, results may vary depending
121  *  on location)
122  *
123  * most high-level websites use lots of media/ad servers with different names
124  * but these are generally reused when browsing through the site.
125  *
126  * As such, a value of 64 should be relatively comfortable at the moment.
127  *
128  * ******************************************
129  * * NOTE - this has changed.
130  * * 1) we've added IPv6 support so each dns query results in 2 responses
131  * * 2) we've made this a system-wide cache, so the cost is less (it's not
132  * *    duplicated in each process) and the need is greater (more processes
133  * *    making different requests).
134  * * Upping by 2x for IPv6
135  * * Upping by another 5x for the centralized nature
136  * *****************************************
137  */
138 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
139 
140 /****************************************************************************/
141 /****************************************************************************/
142 /*****                                                                  *****/
143 /*****                                                                  *****/
144 /*****                                                                  *****/
145 /****************************************************************************/
146 /****************************************************************************/
147 
148 /* set to 1 to debug cache operations */
149 #define  DEBUG       0
150 
151 /* set to 1 to debug query data */
152 #define  DEBUG_DATA  0
153 
154 #if DEBUG
155 #define __DEBUG__
156 #else
157 #define __DEBUG__ __attribute__((unused))
158 #endif
159 
160 #undef XLOG
161 
162 #define XLOG(...) ({ \
163     if (DEBUG) { \
164         __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__); \
165     } else { \
166         ((void)0); \
167     } \
168 })
169 
170 /** BOUNDED BUFFER FORMATTING
171  **/
172 
173 /* technical note:
174  *
175  *   the following debugging routines are used to append data to a bounded
176  *   buffer they take two parameters that are:
177  *
178  *   - p : a pointer to the current cursor position in the buffer
179  *         this value is initially set to the buffer's address.
180  *
181  *   - end : the address of the buffer's limit, i.e. of the first byte
182  *           after the buffer. this address should never be touched.
183  *
184  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
185  *                      that the buffer is at least one byte.
186  *
187  *   the _bprint_() functions return the new value of 'p' after the data
188  *   has been appended, and also ensure the following:
189  *
190  *   - the returned value will never be strictly greater than 'end'
191  *
192  *   - a return value equal to 'end' means that truncation occured
193  *     (in which case, end[-1] will be set to 0)
194  *
195  *   - after returning from a _bprint_() function, the content of the buffer
196  *     is always 0-terminated, even in the event of truncation.
197  *
198  *  these conventions allow you to call _bprint_ functions multiple times and
199  *  only check for truncation at the end of the sequence, as in:
200  *
201  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
202  *
203  *     p = _bprint_c(p, end, '"');
204  *     p = _bprint_s(p, end, my_string);
205  *     p = _bprint_c(p, end, '"');
206  *
207  *     if (p >= end) {
208  *        // buffer was too small
209  *     }
210  *
211  *     printf( "%s", buff );
212  */
213 
214 /* add a char to a bounded buffer */
215 char*
_bprint_c(char * p,char * end,int c)216 _bprint_c( char*  p, char*  end, int  c )
217 {
218     if (p < end) {
219         if (p+1 == end)
220             *p++ = 0;
221         else {
222             *p++ = (char) c;
223             *p   = 0;
224         }
225     }
226     return p;
227 }
228 
229 /* add a sequence of bytes to a bounded buffer */
230 char*
_bprint_b(char * p,char * end,const char * buf,int len)231 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
232 {
233     int  avail = end - p;
234 
235     if (avail <= 0 || len <= 0)
236         return p;
237 
238     if (avail > len)
239         avail = len;
240 
241     memcpy( p, buf, avail );
242     p += avail;
243 
244     if (p < end)
245         p[0] = 0;
246     else
247         end[-1] = 0;
248 
249     return p;
250 }
251 
252 /* add a string to a bounded buffer */
253 char*
_bprint_s(char * p,char * end,const char * str)254 _bprint_s( char*  p, char*  end, const char*  str )
255 {
256     return _bprint_b(p, end, str, strlen(str));
257 }
258 
259 /* add a formatted string to a bounded buffer */
260 char* _bprint( char*  p, char*  end, const char*  format, ... ) __DEBUG__;
_bprint(char * p,char * end,const char * format,...)261 char* _bprint( char*  p, char*  end, const char*  format, ... )
262 {
263     int      avail, n;
264     va_list  args;
265 
266     avail = end - p;
267 
268     if (avail <= 0)
269         return p;
270 
271     va_start(args, format);
272     n = vsnprintf( p, avail, format, args);
273     va_end(args);
274 
275     /* certain C libraries return -1 in case of truncation */
276     if (n < 0 || n > avail)
277         n = avail;
278 
279     p += n;
280     /* certain C libraries do not zero-terminate in case of truncation */
281     if (p == end)
282         p[-1] = 0;
283 
284     return p;
285 }
286 
287 /* add a hex value to a bounded buffer, up to 8 digits */
288 char*
_bprint_hex(char * p,char * end,unsigned value,int numDigits)289 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
290 {
291     char   text[sizeof(unsigned)*2];
292     int    nn = 0;
293 
294     while (numDigits-- > 0) {
295         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
296     }
297     return _bprint_b(p, end, text, nn);
298 }
299 
300 /* add the hexadecimal dump of some memory area to a bounded buffer */
301 char*
_bprint_hexdump(char * p,char * end,const uint8_t * data,int datalen)302 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
303 {
304     int   lineSize = 16;
305 
306     while (datalen > 0) {
307         int  avail = datalen;
308         int  nn;
309 
310         if (avail > lineSize)
311             avail = lineSize;
312 
313         for (nn = 0; nn < avail; nn++) {
314             if (nn > 0)
315                 p = _bprint_c(p, end, ' ');
316             p = _bprint_hex(p, end, data[nn], 2);
317         }
318         for ( ; nn < lineSize; nn++ ) {
319             p = _bprint_s(p, end, "   ");
320         }
321         p = _bprint_s(p, end, "  ");
322 
323         for (nn = 0; nn < avail; nn++) {
324             int  c = data[nn];
325 
326             if (c < 32 || c > 127)
327                 c = '.';
328 
329             p = _bprint_c(p, end, c);
330         }
331         p = _bprint_c(p, end, '\n');
332 
333         data    += avail;
334         datalen -= avail;
335     }
336     return p;
337 }
338 
339 /* dump the content of a query of packet to the log */
340 void XLOG_BYTES( const void*  base, int  len ) __DEBUG__;
XLOG_BYTES(const void * base,int len)341 void XLOG_BYTES( const void*  base, int  len )
342 {
343     if (DEBUG_DATA) {
344         char  buff[1024];
345         char*  p = buff, *end = p + sizeof(buff);
346 
347         p = _bprint_hexdump(p, end, base, len);
348         XLOG("%s",buff);
349     }
350 } __DEBUG__
351 
352 static time_t
_time_now(void)353 _time_now( void )
354 {
355     struct timeval  tv;
356 
357     gettimeofday( &tv, NULL );
358     return tv.tv_sec;
359 }
360 
361 /* reminder: the general format of a DNS packet is the following:
362  *
363  *    HEADER  (12 bytes)
364  *    QUESTION  (variable)
365  *    ANSWER (variable)
366  *    AUTHORITY (variable)
367  *    ADDITIONNAL (variable)
368  *
369  * the HEADER is made of:
370  *
371  *   ID     : 16 : 16-bit unique query identification field
372  *
373  *   QR     :  1 : set to 0 for queries, and 1 for responses
374  *   Opcode :  4 : set to 0 for queries
375  *   AA     :  1 : set to 0 for queries
376  *   TC     :  1 : truncation flag, will be set to 0 in queries
377  *   RD     :  1 : recursion desired
378  *
379  *   RA     :  1 : recursion available (0 in queries)
380  *   Z      :  3 : three reserved zero bits
381  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
382  *
383  *   QDCount: 16 : question count
384  *   ANCount: 16 : Answer count (0 in queries)
385  *   NSCount: 16: Authority Record count (0 in queries)
386  *   ARCount: 16: Additionnal Record count (0 in queries)
387  *
388  * the QUESTION is made of QDCount Question Record (QRs)
389  * the ANSWER is made of ANCount RRs
390  * the AUTHORITY is made of NSCount RRs
391  * the ADDITIONNAL is made of ARCount RRs
392  *
393  * Each Question Record (QR) is made of:
394  *
395  *   QNAME   : variable : Query DNS NAME
396  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
397  *   CLASS   : 16       : class of query (IN=1)
398  *
399  * Each Resource Record (RR) is made of:
400  *
401  *   NAME    : variable : DNS NAME
402  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
403  *   CLASS   : 16       : class of query (IN=1)
404  *   TTL     : 32       : seconds to cache this RR (0=none)
405  *   RDLENGTH: 16       : size of RDDATA in bytes
406  *   RDDATA  : variable : RR data (depends on TYPE)
407  *
408  * Each QNAME contains a domain name encoded as a sequence of 'labels'
409  * terminated by a zero. Each label has the following format:
410  *
411  *    LEN  : 8     : lenght of label (MUST be < 64)
412  *    NAME : 8*LEN : label length (must exclude dots)
413  *
414  * A value of 0 in the encoding is interpreted as the 'root' domain and
415  * terminates the encoding. So 'www.android.com' will be encoded as:
416  *
417  *   <3>www<7>android<3>com<0>
418  *
419  * Where <n> represents the byte with value 'n'
420  *
421  * Each NAME reflects the QNAME of the question, but has a slightly more
422  * complex encoding in order to provide message compression. This is achieved
423  * by using a 2-byte pointer, with format:
424  *
425  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
426  *    OFFSET : 14 : offset to another part of the DNS packet
427  *
428  * The offset is relative to the start of the DNS packet and must point
429  * A pointer terminates the encoding.
430  *
431  * The NAME can be encoded in one of the following formats:
432  *
433  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
434  *   - a single pointer
435  *   - a sequence of simple labels terminated by a pointer
436  *
437  * A pointer shall always point to either a pointer of a sequence of
438  * labels (which can themselves be terminated by either a 0 or a pointer)
439  *
440  * The expanded length of a given domain name should not exceed 255 bytes.
441  *
442  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
443  *       records, only QNAMEs.
444  */
445 
446 #define  DNS_HEADER_SIZE  12
447 
448 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
449 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
450 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
451 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
452 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
453 
454 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
455 
456 typedef struct {
457     const uint8_t*  base;
458     const uint8_t*  end;
459     const uint8_t*  cursor;
460 } DnsPacket;
461 
462 static void
_dnsPacket_init(DnsPacket * packet,const uint8_t * buff,int bufflen)463 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
464 {
465     packet->base   = buff;
466     packet->end    = buff + bufflen;
467     packet->cursor = buff;
468 }
469 
470 static void
_dnsPacket_rewind(DnsPacket * packet)471 _dnsPacket_rewind( DnsPacket*  packet )
472 {
473     packet->cursor = packet->base;
474 }
475 
476 static void
_dnsPacket_skip(DnsPacket * packet,int count)477 _dnsPacket_skip( DnsPacket*  packet, int  count )
478 {
479     const uint8_t*  p = packet->cursor + count;
480 
481     if (p > packet->end)
482         p = packet->end;
483 
484     packet->cursor = p;
485 }
486 
487 static int
_dnsPacket_readInt16(DnsPacket * packet)488 _dnsPacket_readInt16( DnsPacket*  packet )
489 {
490     const uint8_t*  p = packet->cursor;
491 
492     if (p+2 > packet->end)
493         return -1;
494 
495     packet->cursor = p+2;
496     return (p[0]<< 8) | p[1];
497 }
498 
499 /** QUERY CHECKING
500  **/
501 
502 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
503  * the cursor is only advanced in the case of success
504  */
505 static int
_dnsPacket_checkBytes(DnsPacket * packet,int numBytes,const void * bytes)506 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
507 {
508     const uint8_t*  p = packet->cursor;
509 
510     if (p + numBytes > packet->end)
511         return 0;
512 
513     if (memcmp(p, bytes, numBytes) != 0)
514         return 0;
515 
516     packet->cursor = p + numBytes;
517     return 1;
518 }
519 
520 /* parse and skip a given QNAME stored in a query packet,
521  * from the current cursor position. returns 1 on success,
522  * or 0 for malformed data.
523  */
524 static int
_dnsPacket_checkQName(DnsPacket * packet)525 _dnsPacket_checkQName( DnsPacket*  packet )
526 {
527     const uint8_t*  p   = packet->cursor;
528     const uint8_t*  end = packet->end;
529 
530     for (;;) {
531         int  c;
532 
533         if (p >= end)
534             break;
535 
536         c = *p++;
537 
538         if (c == 0) {
539             packet->cursor = p;
540             return 1;
541         }
542 
543         /* we don't expect label compression in QNAMEs */
544         if (c >= 64)
545             break;
546 
547         p += c;
548         /* we rely on the bound check at the start
549          * of the loop here */
550     }
551     /* malformed data */
552     XLOG("malformed QNAME");
553     return 0;
554 }
555 
556 /* parse and skip a given QR stored in a packet.
557  * returns 1 on success, and 0 on failure
558  */
559 static int
_dnsPacket_checkQR(DnsPacket * packet)560 _dnsPacket_checkQR( DnsPacket*  packet )
561 {
562     if (!_dnsPacket_checkQName(packet))
563         return 0;
564 
565     /* TYPE must be one of the things we support */
566     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
567         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
568         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
569         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
570         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
571     {
572         XLOG("unsupported TYPE");
573         return 0;
574     }
575     /* CLASS must be IN */
576     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
577         XLOG("unsupported CLASS");
578         return 0;
579     }
580 
581     return 1;
582 }
583 
584 /* check the header of a DNS Query packet, return 1 if it is one
585  * type of query we can cache, or 0 otherwise
586  */
587 static int
_dnsPacket_checkQuery(DnsPacket * packet)588 _dnsPacket_checkQuery( DnsPacket*  packet )
589 {
590     const uint8_t*  p = packet->base;
591     int             qdCount, anCount, dnCount, arCount;
592 
593     if (p + DNS_HEADER_SIZE > packet->end) {
594         XLOG("query packet too small");
595         return 0;
596     }
597 
598     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
599     /* RA, Z, and RCODE must be 0 */
600     if ((p[2] & 0xFC) != 0 || p[3] != 0) {
601         XLOG("query packet flags unsupported");
602         return 0;
603     }
604 
605     /* Note that we ignore the TC and RD bits here for the
606      * following reasons:
607      *
608      * - there is no point for a query packet sent to a server
609      *   to have the TC bit set, but the implementation might
610      *   set the bit in the query buffer for its own needs
611      *   between a _resolv_cache_lookup and a
612      *   _resolv_cache_add. We should not freak out if this
613      *   is the case.
614      *
615      * - we consider that the result from a RD=0 or a RD=1
616      *   query might be different, hence that the RD bit
617      *   should be used to differentiate cached result.
618      *
619      *   this implies that RD is checked when hashing or
620      *   comparing query packets, but not TC
621      */
622 
623     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
624     qdCount = (p[4] << 8) | p[5];
625     anCount = (p[6] << 8) | p[7];
626     dnCount = (p[8] << 8) | p[9];
627     arCount = (p[10]<< 8) | p[11];
628 
629     if (anCount != 0 || dnCount != 0 || arCount != 0) {
630         XLOG("query packet contains non-query records");
631         return 0;
632     }
633 
634     if (qdCount == 0) {
635         XLOG("query packet doesn't contain query record");
636         return 0;
637     }
638 
639     /* Check QDCOUNT QRs */
640     packet->cursor = p + DNS_HEADER_SIZE;
641 
642     for (;qdCount > 0; qdCount--)
643         if (!_dnsPacket_checkQR(packet))
644             return 0;
645 
646     return 1;
647 }
648 
649 /** QUERY DEBUGGING
650  **/
651 #if DEBUG
652 static char*
_dnsPacket_bprintQName(DnsPacket * packet,char * bp,char * bend)653 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
654 {
655     const uint8_t*  p   = packet->cursor;
656     const uint8_t*  end = packet->end;
657     int             first = 1;
658 
659     for (;;) {
660         int  c;
661 
662         if (p >= end)
663             break;
664 
665         c = *p++;
666 
667         if (c == 0) {
668             packet->cursor = p;
669             return bp;
670         }
671 
672         /* we don't expect label compression in QNAMEs */
673         if (c >= 64)
674             break;
675 
676         if (first)
677             first = 0;
678         else
679             bp = _bprint_c(bp, bend, '.');
680 
681         bp = _bprint_b(bp, bend, (const char*)p, c);
682 
683         p += c;
684         /* we rely on the bound check at the start
685          * of the loop here */
686     }
687     /* malformed data */
688     bp = _bprint_s(bp, bend, "<MALFORMED>");
689     return bp;
690 }
691 
692 static char*
_dnsPacket_bprintQR(DnsPacket * packet,char * p,char * end)693 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
694 {
695 #define  QQ(x)   { DNS_TYPE_##x, #x }
696     static const struct {
697         const char*  typeBytes;
698         const char*  typeString;
699     } qTypes[] =
700     {
701         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
702         { NULL, NULL }
703     };
704     int          nn;
705     const char*  typeString = NULL;
706 
707     /* dump QNAME */
708     p = _dnsPacket_bprintQName(packet, p, end);
709 
710     /* dump TYPE */
711     p = _bprint_s(p, end, " (");
712 
713     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
714         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
715             typeString = qTypes[nn].typeString;
716             break;
717         }
718     }
719 
720     if (typeString != NULL)
721         p = _bprint_s(p, end, typeString);
722     else {
723         int  typeCode = _dnsPacket_readInt16(packet);
724         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
725     }
726 
727     p = _bprint_c(p, end, ')');
728 
729     /* skip CLASS */
730     _dnsPacket_skip(packet, 2);
731     return p;
732 }
733 
734 /* this function assumes the packet has already been checked */
735 static char*
_dnsPacket_bprintQuery(DnsPacket * packet,char * p,char * end)736 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
737 {
738     int   qdCount;
739 
740     if (packet->base[2] & 0x1) {
741         p = _bprint_s(p, end, "RECURSIVE ");
742     }
743 
744     _dnsPacket_skip(packet, 4);
745     qdCount = _dnsPacket_readInt16(packet);
746     _dnsPacket_skip(packet, 6);
747 
748     for ( ; qdCount > 0; qdCount-- ) {
749         p = _dnsPacket_bprintQR(packet, p, end);
750     }
751     return p;
752 }
753 #endif
754 
755 
756 /** QUERY HASHING SUPPORT
757  **
758  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
759  ** BEEN SUCCESFULLY CHECKED.
760  **/
761 
762 /* use 32-bit FNV hash function */
763 #define  FNV_MULT   16777619U
764 #define  FNV_BASIS  2166136261U
765 
766 static unsigned
_dnsPacket_hashBytes(DnsPacket * packet,int numBytes,unsigned hash)767 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
768 {
769     const uint8_t*  p   = packet->cursor;
770     const uint8_t*  end = packet->end;
771 
772     while (numBytes > 0 && p < end) {
773         hash = hash*FNV_MULT ^ *p++;
774     }
775     packet->cursor = p;
776     return hash;
777 }
778 
779 
780 static unsigned
_dnsPacket_hashQName(DnsPacket * packet,unsigned hash)781 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
782 {
783     const uint8_t*  p   = packet->cursor;
784     const uint8_t*  end = packet->end;
785 
786     for (;;) {
787         int  c;
788 
789         if (p >= end) {  /* should not happen */
790             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
791             break;
792         }
793 
794         c = *p++;
795 
796         if (c == 0)
797             break;
798 
799         if (c >= 64) {
800             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
801             break;
802         }
803         if (p + c >= end) {
804             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
805                     __FUNCTION__);
806             break;
807         }
808         while (c > 0) {
809             hash = hash*FNV_MULT ^ *p++;
810             c   -= 1;
811         }
812     }
813     packet->cursor = p;
814     return hash;
815 }
816 
817 static unsigned
_dnsPacket_hashQR(DnsPacket * packet,unsigned hash)818 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
819 {
820     hash = _dnsPacket_hashQName(packet, hash);
821     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
822     return hash;
823 }
824 
825 static unsigned
_dnsPacket_hashQuery(DnsPacket * packet)826 _dnsPacket_hashQuery( DnsPacket*  packet )
827 {
828     unsigned  hash = FNV_BASIS;
829     int       count;
830     _dnsPacket_rewind(packet);
831 
832     /* we ignore the TC bit for reasons explained in
833      * _dnsPacket_checkQuery().
834      *
835      * however we hash the RD bit to differentiate
836      * between answers for recursive and non-recursive
837      * queries.
838      */
839     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
840 
841     /* assume: other flags are 0 */
842     _dnsPacket_skip(packet, 4);
843 
844     /* read QDCOUNT */
845     count = _dnsPacket_readInt16(packet);
846 
847     /* assume: ANcount, NScount, ARcount are 0 */
848     _dnsPacket_skip(packet, 6);
849 
850     /* hash QDCOUNT QRs */
851     for ( ; count > 0; count-- )
852         hash = _dnsPacket_hashQR(packet, hash);
853 
854     return hash;
855 }
856 
857 
858 /** QUERY COMPARISON
859  **
860  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
861  ** BEEN SUCCESFULLY CHECKED.
862  **/
863 
864 static int
_dnsPacket_isEqualDomainName(DnsPacket * pack1,DnsPacket * pack2)865 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
866 {
867     const uint8_t*  p1   = pack1->cursor;
868     const uint8_t*  end1 = pack1->end;
869     const uint8_t*  p2   = pack2->cursor;
870     const uint8_t*  end2 = pack2->end;
871 
872     for (;;) {
873         int  c1, c2;
874 
875         if (p1 >= end1 || p2 >= end2) {
876             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
877             break;
878         }
879         c1 = *p1++;
880         c2 = *p2++;
881         if (c1 != c2)
882             break;
883 
884         if (c1 == 0) {
885             pack1->cursor = p1;
886             pack2->cursor = p2;
887             return 1;
888         }
889         if (c1 >= 64) {
890             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
891             break;
892         }
893         if ((p1+c1 > end1) || (p2+c1 > end2)) {
894             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
895                     __FUNCTION__);
896             break;
897         }
898         if (memcmp(p1, p2, c1) != 0)
899             break;
900         p1 += c1;
901         p2 += c1;
902         /* we rely on the bound checks at the start of the loop */
903     }
904     /* not the same, or one is malformed */
905     XLOG("different DN");
906     return 0;
907 }
908 
909 static int
_dnsPacket_isEqualBytes(DnsPacket * pack1,DnsPacket * pack2,int numBytes)910 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
911 {
912     const uint8_t*  p1 = pack1->cursor;
913     const uint8_t*  p2 = pack2->cursor;
914 
915     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
916         return 0;
917 
918     if ( memcmp(p1, p2, numBytes) != 0 )
919         return 0;
920 
921     pack1->cursor += numBytes;
922     pack2->cursor += numBytes;
923     return 1;
924 }
925 
926 static int
_dnsPacket_isEqualQR(DnsPacket * pack1,DnsPacket * pack2)927 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
928 {
929     /* compare domain name encoding + TYPE + CLASS */
930     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
931          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
932         return 0;
933 
934     return 1;
935 }
936 
937 static int
_dnsPacket_isEqualQuery(DnsPacket * pack1,DnsPacket * pack2)938 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
939 {
940     int  count1, count2;
941 
942     /* compare the headers, ignore most fields */
943     _dnsPacket_rewind(pack1);
944     _dnsPacket_rewind(pack2);
945 
946     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
947     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
948         XLOG("different RD");
949         return 0;
950     }
951 
952     /* assume: other flags are all 0 */
953     _dnsPacket_skip(pack1, 4);
954     _dnsPacket_skip(pack2, 4);
955 
956     /* compare QDCOUNT */
957     count1 = _dnsPacket_readInt16(pack1);
958     count2 = _dnsPacket_readInt16(pack2);
959     if (count1 != count2 || count1 < 0) {
960         XLOG("different QDCOUNT");
961         return 0;
962     }
963 
964     /* assume: ANcount, NScount and ARcount are all 0 */
965     _dnsPacket_skip(pack1, 6);
966     _dnsPacket_skip(pack2, 6);
967 
968     /* compare the QDCOUNT QRs */
969     for ( ; count1 > 0; count1-- ) {
970         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
971             XLOG("different QR");
972             return 0;
973         }
974     }
975     return 1;
976 }
977 
978 /****************************************************************************/
979 /****************************************************************************/
980 /*****                                                                  *****/
981 /*****                                                                  *****/
982 /*****                                                                  *****/
983 /****************************************************************************/
984 /****************************************************************************/
985 
986 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
987  * structure though they are conceptually part of the hash table.
988  *
989  * similarly, mru_next and mru_prev are part of the global MRU list
990  */
991 typedef struct Entry {
992     unsigned int     hash;   /* hash value */
993     struct Entry*    hlink;  /* next in collision chain */
994     struct Entry*    mru_prev;
995     struct Entry*    mru_next;
996 
997     const uint8_t*   query;
998     int              querylen;
999     const uint8_t*   answer;
1000     int              answerlen;
1001     time_t           expires;   /* time_t when the entry isn't valid any more */
1002     int              id;        /* for debugging purpose */
1003 } Entry;
1004 
1005 /**
1006  * Find the TTL for a negative DNS result.  This is defined as the minimum
1007  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1008  *
1009  * Return 0 if not found.
1010  */
1011 static u_long
answer_getNegativeTTL(ns_msg handle)1012 answer_getNegativeTTL(ns_msg handle) {
1013     int n, nscount;
1014     u_long result = 0;
1015     ns_rr rr;
1016 
1017     nscount = ns_msg_count(handle, ns_s_ns);
1018     for (n = 0; n < nscount; n++) {
1019         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1020             const u_char *rdata = ns_rr_rdata(rr); // find the data
1021             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1022             int len;
1023             u_long ttl, rec_result = ns_rr_ttl(rr);
1024 
1025             // find the MINIMUM-TTL field from the blob of binary data for this record
1026             // skip the server name
1027             len = dn_skipname(rdata, edata);
1028             if (len == -1) continue; // error skipping
1029             rdata += len;
1030 
1031             // skip the admin name
1032             len = dn_skipname(rdata, edata);
1033             if (len == -1) continue; // error skipping
1034             rdata += len;
1035 
1036             if (edata - rdata != 5*NS_INT32SZ) continue;
1037             // skip: serial number + refresh interval + retry interval + expiry
1038             rdata += NS_INT32SZ * 4;
1039             // finally read the MINIMUM TTL
1040             ttl = ns_get32(rdata);
1041             if (ttl < rec_result) {
1042                 rec_result = ttl;
1043             }
1044             // Now that the record is read successfully, apply the new min TTL
1045             if (n == 0 || rec_result < result) {
1046                 result = rec_result;
1047             }
1048         }
1049     }
1050     return result;
1051 }
1052 
1053 /**
1054  * Parse the answer records and find the appropriate
1055  * smallest TTL among the records.  This might be from
1056  * the answer records if found or from the SOA record
1057  * if it's a negative result.
1058  *
1059  * The returned TTL is the number of seconds to
1060  * keep the answer in the cache.
1061  *
1062  * In case of parse error zero (0) is returned which
1063  * indicates that the answer shall not be cached.
1064  */
1065 static u_long
answer_getTTL(const void * answer,int answerlen)1066 answer_getTTL(const void* answer, int answerlen)
1067 {
1068     ns_msg handle;
1069     int ancount, n;
1070     u_long result, ttl;
1071     ns_rr rr;
1072 
1073     result = 0;
1074     if (ns_initparse(answer, answerlen, &handle) >= 0) {
1075         // get number of answer records
1076         ancount = ns_msg_count(handle, ns_s_an);
1077 
1078         if (ancount == 0) {
1079             // a response with no answers?  Cache this negative result.
1080             result = answer_getNegativeTTL(handle);
1081         } else {
1082             for (n = 0; n < ancount; n++) {
1083                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1084                     ttl = ns_rr_ttl(rr);
1085                     if (n == 0 || ttl < result) {
1086                         result = ttl;
1087                     }
1088                 } else {
1089                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1090                 }
1091             }
1092         }
1093     } else {
1094         XLOG("ns_parserr failed. %s\n", strerror(errno));
1095     }
1096 
1097     XLOG("TTL = %lu\n", result);
1098 
1099     return result;
1100 }
1101 
1102 static void
entry_free(Entry * e)1103 entry_free( Entry*  e )
1104 {
1105     /* everything is allocated in a single memory block */
1106     if (e) {
1107         free(e);
1108     }
1109 }
1110 
1111 static __inline__ void
entry_mru_remove(Entry * e)1112 entry_mru_remove( Entry*  e )
1113 {
1114     e->mru_prev->mru_next = e->mru_next;
1115     e->mru_next->mru_prev = e->mru_prev;
1116 }
1117 
1118 static __inline__ void
entry_mru_add(Entry * e,Entry * list)1119 entry_mru_add( Entry*  e, Entry*  list )
1120 {
1121     Entry*  first = list->mru_next;
1122 
1123     e->mru_next = first;
1124     e->mru_prev = list;
1125 
1126     list->mru_next  = e;
1127     first->mru_prev = e;
1128 }
1129 
1130 /* compute the hash of a given entry, this is a hash of most
1131  * data in the query (key) */
1132 static unsigned
entry_hash(const Entry * e)1133 entry_hash( const Entry*  e )
1134 {
1135     DnsPacket  pack[1];
1136 
1137     _dnsPacket_init(pack, e->query, e->querylen);
1138     return _dnsPacket_hashQuery(pack);
1139 }
1140 
1141 /* initialize an Entry as a search key, this also checks the input query packet
1142  * returns 1 on success, or 0 in case of unsupported/malformed data */
1143 static int
entry_init_key(Entry * e,const void * query,int querylen)1144 entry_init_key( Entry*  e, const void*  query, int  querylen )
1145 {
1146     DnsPacket  pack[1];
1147 
1148     memset(e, 0, sizeof(*e));
1149 
1150     e->query    = query;
1151     e->querylen = querylen;
1152     e->hash     = entry_hash(e);
1153 
1154     _dnsPacket_init(pack, query, querylen);
1155 
1156     return _dnsPacket_checkQuery(pack);
1157 }
1158 
1159 /* allocate a new entry as a cache node */
1160 static Entry*
entry_alloc(const Entry * init,const void * answer,int answerlen)1161 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
1162 {
1163     Entry*  e;
1164     int     size;
1165 
1166     size = sizeof(*e) + init->querylen + answerlen;
1167     e    = calloc(size, 1);
1168     if (e == NULL)
1169         return e;
1170 
1171     e->hash     = init->hash;
1172     e->query    = (const uint8_t*)(e+1);
1173     e->querylen = init->querylen;
1174 
1175     memcpy( (char*)e->query, init->query, e->querylen );
1176 
1177     e->answer    = e->query + e->querylen;
1178     e->answerlen = answerlen;
1179 
1180     memcpy( (char*)e->answer, answer, e->answerlen );
1181 
1182     return e;
1183 }
1184 
1185 static int
entry_equals(const Entry * e1,const Entry * e2)1186 entry_equals( const Entry*  e1, const Entry*  e2 )
1187 {
1188     DnsPacket  pack1[1], pack2[1];
1189 
1190     if (e1->querylen != e2->querylen) {
1191         return 0;
1192     }
1193     _dnsPacket_init(pack1, e1->query, e1->querylen);
1194     _dnsPacket_init(pack2, e2->query, e2->querylen);
1195 
1196     return _dnsPacket_isEqualQuery(pack1, pack2);
1197 }
1198 
1199 /****************************************************************************/
1200 /****************************************************************************/
1201 /*****                                                                  *****/
1202 /*****                                                                  *****/
1203 /*****                                                                  *****/
1204 /****************************************************************************/
1205 /****************************************************************************/
1206 
1207 /* We use a simple hash table with external collision lists
1208  * for simplicity, the hash-table fields 'hash' and 'hlink' are
1209  * inlined in the Entry structure.
1210  */
1211 
1212 /* Maximum time for a thread to wait for an pending request */
1213 #define PENDING_REQUEST_TIMEOUT 20;
1214 
1215 typedef struct pending_req_info {
1216     unsigned int                hash;
1217     pthread_cond_t              cond;
1218     struct pending_req_info*    next;
1219 } PendingReqInfo;
1220 
1221 typedef struct resolv_cache {
1222     int              max_entries;
1223     int              num_entries;
1224     Entry            mru_list;
1225     int              last_id;
1226     Entry*           entries;
1227     PendingReqInfo   pending_requests;
1228 } Cache;
1229 
1230 struct resolv_cache_info {
1231     unsigned                    netid;
1232     Cache*                      cache;
1233     struct resolv_cache_info*   next;
1234     int                         nscount;
1235     char*                       nameservers[MAXNS];
1236     struct addrinfo*            nsaddrinfo[MAXNS];
1237     int                         revision_id; // # times the nameservers have been replaced
1238     struct __res_params         params;
1239     struct __res_stats          nsstats[MAXNS];
1240     char                        defdname[MAXDNSRCHPATH];
1241     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
1242 };
1243 
1244 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
1245 
1246 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
1247 static void _res_cache_init(void);
1248 
1249 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1250 static pthread_mutex_t _res_cache_list_lock;
1251 
1252 /* gets cache associated with a network, or NULL if none exists */
1253 static struct resolv_cache* _find_named_cache_locked(unsigned netid);
1254 
1255 static void
_cache_flush_pending_requests_locked(struct resolv_cache * cache)1256 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1257 {
1258     struct pending_req_info *ri, *tmp;
1259     if (cache) {
1260         ri = cache->pending_requests.next;
1261 
1262         while (ri) {
1263             tmp = ri;
1264             ri = ri->next;
1265             pthread_cond_broadcast(&tmp->cond);
1266 
1267             pthread_cond_destroy(&tmp->cond);
1268             free(tmp);
1269         }
1270 
1271         cache->pending_requests.next = NULL;
1272     }
1273 }
1274 
1275 /* Return 0 if no pending request is found matching the key.
1276  * If a matching request is found the calling thread will wait until
1277  * the matching request completes, then update *cache and return 1. */
1278 static int
_cache_check_pending_request_locked(struct resolv_cache ** cache,Entry * key,unsigned netid)1279 _cache_check_pending_request_locked( struct resolv_cache** cache, Entry* key, unsigned netid )
1280 {
1281     struct pending_req_info *ri, *prev;
1282     int exist = 0;
1283 
1284     if (*cache && key) {
1285         ri = (*cache)->pending_requests.next;
1286         prev = &(*cache)->pending_requests;
1287         while (ri) {
1288             if (ri->hash == key->hash) {
1289                 exist = 1;
1290                 break;
1291             }
1292             prev = ri;
1293             ri = ri->next;
1294         }
1295 
1296         if (!exist) {
1297             ri = calloc(1, sizeof(struct pending_req_info));
1298             if (ri) {
1299                 ri->hash = key->hash;
1300                 pthread_cond_init(&ri->cond, NULL);
1301                 prev->next = ri;
1302             }
1303         } else {
1304             struct timespec ts = {0,0};
1305             XLOG("Waiting for previous request");
1306             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1307             pthread_cond_timedwait(&ri->cond, &_res_cache_list_lock, &ts);
1308             /* Must update *cache as it could have been deleted. */
1309             *cache = _find_named_cache_locked(netid);
1310         }
1311     }
1312 
1313     return exist;
1314 }
1315 
1316 /* notify any waiting thread that waiting on a request
1317  * matching the key has been added to the cache */
1318 static void
_cache_notify_waiting_tid_locked(struct resolv_cache * cache,Entry * key)1319 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1320 {
1321     struct pending_req_info *ri, *prev;
1322 
1323     if (cache && key) {
1324         ri = cache->pending_requests.next;
1325         prev = &cache->pending_requests;
1326         while (ri) {
1327             if (ri->hash == key->hash) {
1328                 pthread_cond_broadcast(&ri->cond);
1329                 break;
1330             }
1331             prev = ri;
1332             ri = ri->next;
1333         }
1334 
1335         // remove item from list and destroy
1336         if (ri) {
1337             prev->next = ri->next;
1338             pthread_cond_destroy(&ri->cond);
1339             free(ri);
1340         }
1341     }
1342 }
1343 
1344 /* notify the cache that the query failed */
1345 void
_resolv_cache_query_failed(unsigned netid,const void * query,int querylen)1346 _resolv_cache_query_failed( unsigned    netid,
1347                    const void* query,
1348                    int         querylen)
1349 {
1350     Entry    key[1];
1351     Cache*   cache;
1352 
1353     if (!entry_init_key(key, query, querylen))
1354         return;
1355 
1356     pthread_mutex_lock(&_res_cache_list_lock);
1357 
1358     cache = _find_named_cache_locked(netid);
1359 
1360     if (cache) {
1361         _cache_notify_waiting_tid_locked(cache, key);
1362     }
1363 
1364     pthread_mutex_unlock(&_res_cache_list_lock);
1365 }
1366 
1367 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
1368 
1369 static void
_cache_flush_locked(Cache * cache)1370 _cache_flush_locked( Cache*  cache )
1371 {
1372     int     nn;
1373 
1374     for (nn = 0; nn < cache->max_entries; nn++)
1375     {
1376         Entry**  pnode = (Entry**) &cache->entries[nn];
1377 
1378         while (*pnode != NULL) {
1379             Entry*  node = *pnode;
1380             *pnode = node->hlink;
1381             entry_free(node);
1382         }
1383     }
1384 
1385     // flush pending request
1386     _cache_flush_pending_requests_locked(cache);
1387 
1388     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1389     cache->num_entries       = 0;
1390     cache->last_id           = 0;
1391 
1392     XLOG("*************************\n"
1393          "*** DNS CACHE FLUSHED ***\n"
1394          "*************************");
1395 }
1396 
1397 static int
_res_cache_get_max_entries(void)1398 _res_cache_get_max_entries( void )
1399 {
1400     int cache_size = CONFIG_MAX_ENTRIES;
1401 
1402     const char* cache_mode = getenv("ANDROID_DNS_MODE");
1403     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1404         // Don't use the cache in local mode. This is used by the proxy itself.
1405         cache_size = 0;
1406     }
1407 
1408     XLOG("cache size: %d", cache_size);
1409     return cache_size;
1410 }
1411 
1412 static struct resolv_cache*
_resolv_cache_create(void)1413 _resolv_cache_create( void )
1414 {
1415     struct resolv_cache*  cache;
1416 
1417     cache = calloc(sizeof(*cache), 1);
1418     if (cache) {
1419         cache->max_entries = _res_cache_get_max_entries();
1420         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1421         if (cache->entries) {
1422             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1423             XLOG("%s: cache created\n", __FUNCTION__);
1424         } else {
1425             free(cache);
1426             cache = NULL;
1427         }
1428     }
1429     return cache;
1430 }
1431 
1432 
1433 #if DEBUG
1434 static void
_dump_query(const uint8_t * query,int querylen)1435 _dump_query( const uint8_t*  query, int  querylen )
1436 {
1437     char       temp[256], *p=temp, *end=p+sizeof(temp);
1438     DnsPacket  pack[1];
1439 
1440     _dnsPacket_init(pack, query, querylen);
1441     p = _dnsPacket_bprintQuery(pack, p, end);
1442     XLOG("QUERY: %s", temp);
1443 }
1444 
1445 static void
_cache_dump_mru(Cache * cache)1446 _cache_dump_mru( Cache*  cache )
1447 {
1448     char    temp[512], *p=temp, *end=p+sizeof(temp);
1449     Entry*  e;
1450 
1451     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1452     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1453         p = _bprint(p, end, " %d", e->id);
1454 
1455     XLOG("%s", temp);
1456 }
1457 
1458 static void
_dump_answer(const void * answer,int answerlen)1459 _dump_answer(const void* answer, int answerlen)
1460 {
1461     res_state statep;
1462     FILE* fp;
1463     char* buf;
1464     int fileLen;
1465 
1466     fp = fopen("/data/reslog.txt", "w+e");
1467     if (fp != NULL) {
1468         statep = __res_get_state();
1469 
1470         res_pquery(statep, answer, answerlen, fp);
1471 
1472         //Get file length
1473         fseek(fp, 0, SEEK_END);
1474         fileLen=ftell(fp);
1475         fseek(fp, 0, SEEK_SET);
1476         buf = (char *)malloc(fileLen+1);
1477         if (buf != NULL) {
1478             //Read file contents into buffer
1479             fread(buf, fileLen, 1, fp);
1480             XLOG("%s\n", buf);
1481             free(buf);
1482         }
1483         fclose(fp);
1484         remove("/data/reslog.txt");
1485     }
1486     else {
1487         errno = 0; // else debug is introducing error signals
1488         XLOG("%s: can't open file\n", __FUNCTION__);
1489     }
1490 }
1491 #endif
1492 
1493 #if DEBUG
1494 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
1495 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
1496 #else
1497 #  define  XLOG_QUERY(q,len)   ((void)0)
1498 #  define  XLOG_ANSWER(a,len)  ((void)0)
1499 #endif
1500 
1501 /* This function tries to find a key within the hash table
1502  * In case of success, it will return a *pointer* to the hashed key.
1503  * In case of failure, it will return a *pointer* to NULL
1504  *
1505  * So, the caller must check '*result' to check for success/failure.
1506  *
1507  * The main idea is that the result can later be used directly in
1508  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1509  * parameter. This makes the code simpler and avoids re-searching
1510  * for the key position in the htable.
1511  *
1512  * The result of a lookup_p is only valid until you alter the hash
1513  * table.
1514  */
1515 static Entry**
_cache_lookup_p(Cache * cache,Entry * key)1516 _cache_lookup_p( Cache*   cache,
1517                  Entry*   key )
1518 {
1519     int      index = key->hash % cache->max_entries;
1520     Entry**  pnode = (Entry**) &cache->entries[ index ];
1521 
1522     while (*pnode != NULL) {
1523         Entry*  node = *pnode;
1524 
1525         if (node == NULL)
1526             break;
1527 
1528         if (node->hash == key->hash && entry_equals(node, key))
1529             break;
1530 
1531         pnode = &node->hlink;
1532     }
1533     return pnode;
1534 }
1535 
1536 /* Add a new entry to the hash table. 'lookup' must be the
1537  * result of an immediate previous failed _lookup_p() call
1538  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1539  * newly created entry
1540  */
1541 static void
_cache_add_p(Cache * cache,Entry ** lookup,Entry * e)1542 _cache_add_p( Cache*   cache,
1543               Entry**  lookup,
1544               Entry*   e )
1545 {
1546     *lookup = e;
1547     e->id = ++cache->last_id;
1548     entry_mru_add(e, &cache->mru_list);
1549     cache->num_entries += 1;
1550 
1551     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1552          e->id, cache->num_entries);
1553 }
1554 
1555 /* Remove an existing entry from the hash table,
1556  * 'lookup' must be the result of an immediate previous
1557  * and succesful _lookup_p() call.
1558  */
1559 static void
_cache_remove_p(Cache * cache,Entry ** lookup)1560 _cache_remove_p( Cache*   cache,
1561                  Entry**  lookup )
1562 {
1563     Entry*  e  = *lookup;
1564 
1565     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1566          e->id, cache->num_entries-1);
1567 
1568     entry_mru_remove(e);
1569     *lookup = e->hlink;
1570     entry_free(e);
1571     cache->num_entries -= 1;
1572 }
1573 
1574 /* Remove the oldest entry from the hash table.
1575  */
1576 static void
_cache_remove_oldest(Cache * cache)1577 _cache_remove_oldest( Cache*  cache )
1578 {
1579     Entry*   oldest = cache->mru_list.mru_prev;
1580     Entry**  lookup = _cache_lookup_p(cache, oldest);
1581 
1582     if (*lookup == NULL) { /* should not happen */
1583         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1584         return;
1585     }
1586     if (DEBUG) {
1587         XLOG("Cache full - removing oldest");
1588         XLOG_QUERY(oldest->query, oldest->querylen);
1589     }
1590     _cache_remove_p(cache, lookup);
1591 }
1592 
1593 /* Remove all expired entries from the hash table.
1594  */
_cache_remove_expired(Cache * cache)1595 static void _cache_remove_expired(Cache* cache) {
1596     Entry* e;
1597     time_t now = _time_now();
1598 
1599     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1600         // Entry is old, remove
1601         if (now >= e->expires) {
1602             Entry** lookup = _cache_lookup_p(cache, e);
1603             if (*lookup == NULL) { /* should not happen */
1604                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1605                 return;
1606             }
1607             e = e->mru_next;
1608             _cache_remove_p(cache, lookup);
1609         } else {
1610             e = e->mru_next;
1611         }
1612     }
1613 }
1614 
1615 ResolvCacheStatus
_resolv_cache_lookup(unsigned netid,const void * query,int querylen,void * answer,int answersize,int * answerlen)1616 _resolv_cache_lookup( unsigned              netid,
1617                       const void*           query,
1618                       int                   querylen,
1619                       void*                 answer,
1620                       int                   answersize,
1621                       int                  *answerlen )
1622 {
1623     Entry      key[1];
1624     Entry**    lookup;
1625     Entry*     e;
1626     time_t     now;
1627     Cache*     cache;
1628 
1629     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
1630 
1631     XLOG("%s: lookup", __FUNCTION__);
1632     XLOG_QUERY(query, querylen);
1633 
1634     /* we don't cache malformed queries */
1635     if (!entry_init_key(key, query, querylen)) {
1636         XLOG("%s: unsupported query", __FUNCTION__);
1637         return RESOLV_CACHE_UNSUPPORTED;
1638     }
1639     /* lookup cache */
1640     pthread_once(&_res_cache_once, _res_cache_init);
1641     pthread_mutex_lock(&_res_cache_list_lock);
1642 
1643     cache = _find_named_cache_locked(netid);
1644     if (cache == NULL) {
1645         result = RESOLV_CACHE_UNSUPPORTED;
1646         goto Exit;
1647     }
1648 
1649     /* see the description of _lookup_p to understand this.
1650      * the function always return a non-NULL pointer.
1651      */
1652     lookup = _cache_lookup_p(cache, key);
1653     e      = *lookup;
1654 
1655     if (e == NULL) {
1656         XLOG( "NOT IN CACHE");
1657         // calling thread will wait if an outstanding request is found
1658         // that matching this query
1659         if (!_cache_check_pending_request_locked(&cache, key, netid) || cache == NULL) {
1660             goto Exit;
1661         } else {
1662             lookup = _cache_lookup_p(cache, key);
1663             e = *lookup;
1664             if (e == NULL) {
1665                 goto Exit;
1666             }
1667         }
1668     }
1669 
1670     now = _time_now();
1671 
1672     /* remove stale entries here */
1673     if (now >= e->expires) {
1674         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1675         XLOG_QUERY(e->query, e->querylen);
1676         _cache_remove_p(cache, lookup);
1677         goto Exit;
1678     }
1679 
1680     *answerlen = e->answerlen;
1681     if (e->answerlen > answersize) {
1682         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1683         result = RESOLV_CACHE_UNSUPPORTED;
1684         XLOG(" ANSWER TOO LONG");
1685         goto Exit;
1686     }
1687 
1688     memcpy( answer, e->answer, e->answerlen );
1689 
1690     /* bump up this entry to the top of the MRU list */
1691     if (e != cache->mru_list.mru_next) {
1692         entry_mru_remove( e );
1693         entry_mru_add( e, &cache->mru_list );
1694     }
1695 
1696     XLOG( "FOUND IN CACHE entry=%p", e );
1697     result = RESOLV_CACHE_FOUND;
1698 
1699 Exit:
1700     pthread_mutex_unlock(&_res_cache_list_lock);
1701     return result;
1702 }
1703 
1704 
1705 void
_resolv_cache_add(unsigned netid,const void * query,int querylen,const void * answer,int answerlen)1706 _resolv_cache_add( unsigned              netid,
1707                    const void*           query,
1708                    int                   querylen,
1709                    const void*           answer,
1710                    int                   answerlen )
1711 {
1712     Entry    key[1];
1713     Entry*   e;
1714     Entry**  lookup;
1715     u_long   ttl;
1716     Cache*   cache = NULL;
1717 
1718     /* don't assume that the query has already been cached
1719      */
1720     if (!entry_init_key( key, query, querylen )) {
1721         XLOG( "%s: passed invalid query ?", __FUNCTION__);
1722         return;
1723     }
1724 
1725     pthread_mutex_lock(&_res_cache_list_lock);
1726 
1727     cache = _find_named_cache_locked(netid);
1728     if (cache == NULL) {
1729         goto Exit;
1730     }
1731 
1732     XLOG( "%s: query:", __FUNCTION__ );
1733     XLOG_QUERY(query,querylen);
1734     XLOG_ANSWER(answer, answerlen);
1735 #if DEBUG_DATA
1736     XLOG( "answer:");
1737     XLOG_BYTES(answer,answerlen);
1738 #endif
1739 
1740     lookup = _cache_lookup_p(cache, key);
1741     e      = *lookup;
1742 
1743     if (e != NULL) { /* should not happen */
1744         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1745              __FUNCTION__, e);
1746         goto Exit;
1747     }
1748 
1749     if (cache->num_entries >= cache->max_entries) {
1750         _cache_remove_expired(cache);
1751         if (cache->num_entries >= cache->max_entries) {
1752             _cache_remove_oldest(cache);
1753         }
1754         /* need to lookup again */
1755         lookup = _cache_lookup_p(cache, key);
1756         e      = *lookup;
1757         if (e != NULL) {
1758             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1759                 __FUNCTION__, e);
1760             goto Exit;
1761         }
1762     }
1763 
1764     ttl = answer_getTTL(answer, answerlen);
1765     if (ttl > 0) {
1766         e = entry_alloc(key, answer, answerlen);
1767         if (e != NULL) {
1768             e->expires = ttl + _time_now();
1769             _cache_add_p(cache, lookup, e);
1770         }
1771     }
1772 #if DEBUG
1773     _cache_dump_mru(cache);
1774 #endif
1775 Exit:
1776     if (cache != NULL) {
1777       _cache_notify_waiting_tid_locked(cache, key);
1778     }
1779     pthread_mutex_unlock(&_res_cache_list_lock);
1780 }
1781 
1782 /****************************************************************************/
1783 /****************************************************************************/
1784 /*****                                                                  *****/
1785 /*****                                                                  *****/
1786 /*****                                                                  *****/
1787 /****************************************************************************/
1788 /****************************************************************************/
1789 
1790 // Head of the list of caches.  Protected by _res_cache_list_lock.
1791 static struct resolv_cache_info _res_cache_list;
1792 
1793 /* insert resolv_cache_info into the list of resolv_cache_infos */
1794 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1795 /* creates a resolv_cache_info */
1796 static struct resolv_cache_info* _create_cache_info( void );
1797 /* gets a resolv_cache_info associated with a network, or NULL if not found */
1798 static struct resolv_cache_info* _find_cache_info_locked(unsigned netid);
1799 /* look up the named cache, and creates one if needed */
1800 static struct resolv_cache* _get_res_cache_for_net_locked(unsigned netid);
1801 /* empty the named cache */
1802 static void _flush_cache_for_net_locked(unsigned netid);
1803 /* empty the nameservers set for the named cache */
1804 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1805 /* return 1 if the provided list of name servers differs from the list of name servers
1806  * currently attached to the provided cache_info */
1807 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1808         const char** servers, int numservers);
1809 /* clears the stats samples contained withing the given cache_info */
1810 static void _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info);
1811 
1812 static void
_res_cache_init(void)1813 _res_cache_init(void)
1814 {
1815     const char*  env = getenv(CONFIG_ENV);
1816 
1817     if (env && atoi(env) == 0) {
1818         /* the cache is disabled */
1819         return;
1820     }
1821 
1822     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1823     pthread_mutex_init(&_res_cache_list_lock, NULL);
1824 }
1825 
1826 static struct resolv_cache*
_get_res_cache_for_net_locked(unsigned netid)1827 _get_res_cache_for_net_locked(unsigned netid)
1828 {
1829     struct resolv_cache* cache = _find_named_cache_locked(netid);
1830     if (!cache) {
1831         struct resolv_cache_info* cache_info = _create_cache_info();
1832         if (cache_info) {
1833             cache = _resolv_cache_create();
1834             if (cache) {
1835                 cache_info->cache = cache;
1836                 cache_info->netid = netid;
1837                 _insert_cache_info_locked(cache_info);
1838             } else {
1839                 free(cache_info);
1840             }
1841         }
1842     }
1843     return cache;
1844 }
1845 
1846 void
_resolv_flush_cache_for_net(unsigned netid)1847 _resolv_flush_cache_for_net(unsigned netid)
1848 {
1849     pthread_once(&_res_cache_once, _res_cache_init);
1850     pthread_mutex_lock(&_res_cache_list_lock);
1851 
1852     _flush_cache_for_net_locked(netid);
1853 
1854     pthread_mutex_unlock(&_res_cache_list_lock);
1855 }
1856 
1857 static void
_flush_cache_for_net_locked(unsigned netid)1858 _flush_cache_for_net_locked(unsigned netid)
1859 {
1860     struct resolv_cache* cache = _find_named_cache_locked(netid);
1861     if (cache) {
1862         _cache_flush_locked(cache);
1863     }
1864 
1865     // Also clear the NS statistics.
1866     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
1867     _res_cache_clear_stats_locked(cache_info);
1868 }
1869 
_resolv_delete_cache_for_net(unsigned netid)1870 void _resolv_delete_cache_for_net(unsigned netid)
1871 {
1872     pthread_once(&_res_cache_once, _res_cache_init);
1873     pthread_mutex_lock(&_res_cache_list_lock);
1874 
1875     struct resolv_cache_info* prev_cache_info = &_res_cache_list;
1876 
1877     while (prev_cache_info->next) {
1878         struct resolv_cache_info* cache_info = prev_cache_info->next;
1879 
1880         if (cache_info->netid == netid) {
1881             prev_cache_info->next = cache_info->next;
1882             _cache_flush_locked(cache_info->cache);
1883             free(cache_info->cache->entries);
1884             free(cache_info->cache);
1885             _free_nameservers_locked(cache_info);
1886             free(cache_info);
1887             break;
1888         }
1889 
1890         prev_cache_info = prev_cache_info->next;
1891     }
1892 
1893     pthread_mutex_unlock(&_res_cache_list_lock);
1894 }
1895 
1896 static struct resolv_cache_info*
_create_cache_info(void)1897 _create_cache_info(void)
1898 {
1899     struct resolv_cache_info* cache_info;
1900 
1901     cache_info = calloc(sizeof(*cache_info), 1);
1902     return cache_info;
1903 }
1904 
1905 static void
_insert_cache_info_locked(struct resolv_cache_info * cache_info)1906 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
1907 {
1908     struct resolv_cache_info* last;
1909 
1910     for (last = &_res_cache_list; last->next; last = last->next);
1911 
1912     last->next = cache_info;
1913 
1914 }
1915 
1916 static struct resolv_cache*
_find_named_cache_locked(unsigned netid)1917 _find_named_cache_locked(unsigned netid) {
1918 
1919     struct resolv_cache_info* info = _find_cache_info_locked(netid);
1920 
1921     if (info != NULL) return info->cache;
1922 
1923     return NULL;
1924 }
1925 
1926 static struct resolv_cache_info*
_find_cache_info_locked(unsigned netid)1927 _find_cache_info_locked(unsigned netid)
1928 {
1929     struct resolv_cache_info* cache_info = _res_cache_list.next;
1930 
1931     while (cache_info) {
1932         if (cache_info->netid == netid) {
1933             break;
1934         }
1935 
1936         cache_info = cache_info->next;
1937     }
1938     return cache_info;
1939 }
1940 
1941 void
_resolv_set_default_params(struct __res_params * params)1942 _resolv_set_default_params(struct __res_params* params) {
1943     params->sample_validity = NSSAMPLE_VALIDITY;
1944     params->success_threshold = SUCCESS_THRESHOLD;
1945     params->min_samples = 0;
1946     params->max_samples = 0;
1947 }
1948 
1949 int
_resolv_set_nameservers_for_net(unsigned netid,const char ** servers,unsigned numservers,const char * domains,const struct __res_params * params)1950 _resolv_set_nameservers_for_net(unsigned netid, const char** servers, unsigned numservers,
1951         const char *domains, const struct __res_params* params)
1952 {
1953     char sbuf[NI_MAXSERV];
1954     register char *cp;
1955     int *offset;
1956     struct addrinfo* nsaddrinfo[MAXNS];
1957 
1958     if (numservers > MAXNS) {
1959         XLOG("%s: numservers=%u, MAXNS=%u", __FUNCTION__, numservers, MAXNS);
1960         return E2BIG;
1961     }
1962 
1963     // Parse the addresses before actually locking or changing any state, in case there is an error.
1964     // As a side effect this also reduces the time the lock is kept.
1965     struct addrinfo hints = {
1966         .ai_family = AF_UNSPEC,
1967         .ai_socktype = SOCK_DGRAM,
1968         .ai_flags = AI_NUMERICHOST
1969     };
1970     snprintf(sbuf, sizeof(sbuf), "%u", NAMESERVER_PORT);
1971     for (unsigned i = 0; i < numservers; i++) {
1972         // The addrinfo structures allocated here are freed in _free_nameservers_locked().
1973         int rt = getaddrinfo(servers[i], sbuf, &hints, &nsaddrinfo[i]);
1974         if (rt != 0) {
1975             for (unsigned j = 0 ; j < i ; j++) {
1976                 freeaddrinfo(nsaddrinfo[j]);
1977                 nsaddrinfo[j] = NULL;
1978             }
1979             XLOG("%s: getaddrinfo(%s)=%s", __FUNCTION__, servers[i], gai_strerror(rt));
1980             return EINVAL;
1981         }
1982     }
1983 
1984     pthread_once(&_res_cache_once, _res_cache_init);
1985     pthread_mutex_lock(&_res_cache_list_lock);
1986 
1987     // creates the cache if not created
1988     _get_res_cache_for_net_locked(netid);
1989 
1990     struct resolv_cache_info* cache_info = _find_cache_info_locked(netid);
1991 
1992     if (cache_info != NULL) {
1993         uint8_t old_max_samples = cache_info->params.max_samples;
1994         if (params != NULL) {
1995             cache_info->params = *params;
1996         } else {
1997             _resolv_set_default_params(&cache_info->params);
1998         }
1999 
2000         if (!_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
2001             // free current before adding new
2002             _free_nameservers_locked(cache_info);
2003             unsigned i;
2004             for (i = 0; i < numservers; i++) {
2005                 cache_info->nsaddrinfo[i] = nsaddrinfo[i];
2006                 cache_info->nameservers[i] = strdup(servers[i]);
2007                 XLOG("%s: netid = %u, addr = %s\n", __FUNCTION__, netid, servers[i]);
2008             }
2009             cache_info->nscount = numservers;
2010 
2011             // Clear the NS statistics because the mapping to nameservers might have changed.
2012             _res_cache_clear_stats_locked(cache_info);
2013 
2014             // increment the revision id to ensure that sample state is not written back if the
2015             // servers change; in theory it would suffice to do so only if the servers or
2016             // max_samples actually change, in practice the overhead of checking is higher than the
2017             // cost, and overflows are unlikely
2018             ++cache_info->revision_id;
2019         } else if (cache_info->params.max_samples != old_max_samples) {
2020             // If the maximum number of samples changes, the overhead of keeping the most recent
2021             // samples around is not considered worth the effort, so they are cleared instead. All
2022             // other parameters do not affect shared state: Changing these parameters does not
2023             // invalidate the samples, as they only affect aggregation and the conditions under
2024             // which servers are considered usable.
2025             _res_cache_clear_stats_locked(cache_info);
2026             ++cache_info->revision_id;
2027         }
2028 
2029         // Always update the search paths, since determining whether they actually changed is
2030         // complex due to the zero-padding, and probably not worth the effort. Cache-flushing
2031         // however is not // necessary, since the stored cache entries do contain the domain, not
2032         // just the host name.
2033         // code moved from res_init.c, load_domain_search_list
2034         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
2035         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
2036             *cp = '\0';
2037 
2038         cp = cache_info->defdname;
2039         offset = cache_info->dnsrch_offset;
2040         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
2041             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
2042                 cp++;
2043             if (*cp == '\0') /* stop if nothing more to do */
2044                 break;
2045             *offset++ = cp - cache_info->defdname; /* record this search domain */
2046             while (*cp) { /* zero-terminate it */
2047                 if (*cp == ' '|| *cp == '\t') {
2048                     *cp++ = '\0';
2049                     break;
2050                 }
2051                 cp++;
2052             }
2053         }
2054         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
2055     }
2056 
2057     pthread_mutex_unlock(&_res_cache_list_lock);
2058     return 0;
2059 }
2060 
2061 static int
_resolv_is_nameservers_equal_locked(struct resolv_cache_info * cache_info,const char ** servers,int numservers)2062 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
2063         const char** servers, int numservers)
2064 {
2065     if (cache_info->nscount != numservers) {
2066         return 0;
2067     }
2068 
2069     // Compare each name server against current name servers.
2070     // TODO: this is incorrect if the list of current or previous nameservers
2071     // contains duplicates. This does not really matter because the framework
2072     // filters out duplicates, but we should probably fix it. It's also
2073     // insensitive to the order of the nameservers; we should probably fix that
2074     // too.
2075     for (int i = 0; i < numservers; i++) {
2076         for (int j = 0 ; ; j++) {
2077             if (j >= numservers) {
2078                 return 0;
2079             }
2080             if (strcmp(cache_info->nameservers[i], servers[j]) == 0) {
2081                 break;
2082             }
2083         }
2084     }
2085 
2086     return 1;
2087 }
2088 
2089 static void
_free_nameservers_locked(struct resolv_cache_info * cache_info)2090 _free_nameservers_locked(struct resolv_cache_info* cache_info)
2091 {
2092     int i;
2093     for (i = 0; i < cache_info->nscount; i++) {
2094         free(cache_info->nameservers[i]);
2095         cache_info->nameservers[i] = NULL;
2096         if (cache_info->nsaddrinfo[i] != NULL) {
2097             freeaddrinfo(cache_info->nsaddrinfo[i]);
2098             cache_info->nsaddrinfo[i] = NULL;
2099         }
2100         cache_info->nsstats[i].sample_count =
2101             cache_info->nsstats[i].sample_next = 0;
2102     }
2103     cache_info->nscount = 0;
2104     _res_cache_clear_stats_locked(cache_info);
2105     ++cache_info->revision_id;
2106 }
2107 
2108 void
_resolv_populate_res_for_net(res_state statp)2109 _resolv_populate_res_for_net(res_state statp)
2110 {
2111     if (statp == NULL) {
2112         return;
2113     }
2114 
2115     pthread_once(&_res_cache_once, _res_cache_init);
2116     pthread_mutex_lock(&_res_cache_list_lock);
2117 
2118     struct resolv_cache_info* info = _find_cache_info_locked(statp->netid);
2119     if (info != NULL) {
2120         int nserv;
2121         struct addrinfo* ai;
2122         XLOG("%s: %u\n", __FUNCTION__, statp->netid);
2123         for (nserv = 0; nserv < MAXNS; nserv++) {
2124             ai = info->nsaddrinfo[nserv];
2125             if (ai == NULL) {
2126                 break;
2127             }
2128 
2129             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2130                 if (statp->_u._ext.ext != NULL) {
2131                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2132                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2133                 } else {
2134                     if ((size_t) ai->ai_addrlen
2135                             <= sizeof(statp->nsaddr_list[0])) {
2136                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2137                                 ai->ai_addrlen);
2138                     } else {
2139                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2140                     }
2141                 }
2142             } else {
2143                 XLOG("%s: found too long addrlen", __FUNCTION__);
2144             }
2145         }
2146         statp->nscount = nserv;
2147         // now do search domains.  Note that we cache the offsets as this code runs alot
2148         // but the setting/offset-computer only runs when set/changed
2149         // WARNING: Don't use str*cpy() here, this string contains zeroes.
2150         memcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2151         register char **pp = statp->dnsrch;
2152         register int *p = info->dnsrch_offset;
2153         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2154             *pp++ = &statp->defdname[0] + *p++;
2155         }
2156     }
2157     pthread_mutex_unlock(&_res_cache_list_lock);
2158 }
2159 
2160 /* Resolver reachability statistics. */
2161 
2162 static void
_res_cache_add_stats_sample_locked(struct __res_stats * stats,const struct __res_sample * sample,int max_samples)2163 _res_cache_add_stats_sample_locked(struct __res_stats* stats, const struct __res_sample* sample,
2164         int max_samples) {
2165     // Note: This function expects max_samples > 0, otherwise a (harmless) modification of the
2166     // allocated but supposedly unused memory for samples[0] will happen
2167     XLOG("%s: adding sample to stats, next = %d, count = %d", __FUNCTION__,
2168             stats->sample_next, stats->sample_count);
2169     stats->samples[stats->sample_next] = *sample;
2170     if (stats->sample_count < max_samples) {
2171         ++stats->sample_count;
2172     }
2173     if (++stats->sample_next >= max_samples) {
2174         stats->sample_next = 0;
2175     }
2176 }
2177 
2178 static void
_res_cache_clear_stats_locked(struct resolv_cache_info * cache_info)2179 _res_cache_clear_stats_locked(struct resolv_cache_info* cache_info) {
2180     if (cache_info) {
2181         for (int i = 0 ; i < MAXNS ; ++i) {
2182             cache_info->nsstats->sample_count = cache_info->nsstats->sample_next = 0;
2183         }
2184     }
2185 }
2186 
2187 int
android_net_res_stats_get_info_for_net(unsigned netid,int * nscount,struct sockaddr_storage servers[MAXNS],int * dcount,char domains[MAXDNSRCH][MAXDNSRCHPATH],struct __res_params * params,struct __res_stats stats[MAXNS])2188 android_net_res_stats_get_info_for_net(unsigned netid, int* nscount,
2189         struct sockaddr_storage servers[MAXNS], int* dcount, char domains[MAXDNSRCH][MAXDNSRCHPATH],
2190         struct __res_params* params, struct __res_stats stats[MAXNS]) {
2191     int revision_id = -1;
2192     pthread_mutex_lock(&_res_cache_list_lock);
2193 
2194     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2195     if (info) {
2196         if (info->nscount > MAXNS) {
2197             pthread_mutex_unlock(&_res_cache_list_lock);
2198             XLOG("%s: nscount %d > MAXNS %d", __FUNCTION__, info->nscount, MAXNS);
2199             errno = EFAULT;
2200             return -1;
2201         }
2202         int i;
2203         for (i = 0; i < info->nscount; i++) {
2204             // Verify that the following assumptions are held, failure indicates corruption:
2205             //  - getaddrinfo() may never return a sockaddr > sockaddr_storage
2206             //  - all addresses are valid
2207             //  - there is only one address per addrinfo thanks to numeric resolution
2208             int addrlen = info->nsaddrinfo[i]->ai_addrlen;
2209             if (addrlen < (int) sizeof(struct sockaddr) ||
2210                     addrlen > (int) sizeof(servers[0])) {
2211                 pthread_mutex_unlock(&_res_cache_list_lock);
2212                 XLOG("%s: nsaddrinfo[%d].ai_addrlen == %d", __FUNCTION__, i, addrlen);
2213                 errno = EMSGSIZE;
2214                 return -1;
2215             }
2216             if (info->nsaddrinfo[i]->ai_addr == NULL) {
2217                 pthread_mutex_unlock(&_res_cache_list_lock);
2218                 XLOG("%s: nsaddrinfo[%d].ai_addr == NULL", __FUNCTION__, i);
2219                 errno = ENOENT;
2220                 return -1;
2221             }
2222             if (info->nsaddrinfo[i]->ai_next != NULL) {
2223                 pthread_mutex_unlock(&_res_cache_list_lock);
2224                 XLOG("%s: nsaddrinfo[%d].ai_next != NULL", __FUNCTION__, i);
2225                 errno = ENOTUNIQ;
2226                 return -1;
2227             }
2228         }
2229         *nscount = info->nscount;
2230         for (i = 0; i < info->nscount; i++) {
2231             memcpy(&servers[i], info->nsaddrinfo[i]->ai_addr, info->nsaddrinfo[i]->ai_addrlen);
2232             stats[i] = info->nsstats[i];
2233         }
2234         for (i = 0; i < MAXDNSRCH; i++) {
2235             const char* cur_domain = info->defdname + info->dnsrch_offset[i];
2236             // dnsrch_offset[i] can either be -1 or point to an empty string to indicate the end
2237             // of the search offsets. Checking for < 0 is not strictly necessary, but safer.
2238             // TODO: Pass in a search domain array instead of a string to
2239             // _resolv_set_nameservers_for_net() and make this double check unnecessary.
2240             if (info->dnsrch_offset[i] < 0 ||
2241                     ((size_t)info->dnsrch_offset[i]) >= sizeof(info->defdname) || !cur_domain[0]) {
2242                 break;
2243             }
2244             strlcpy(domains[i], cur_domain, MAXDNSRCHPATH);
2245         }
2246         *dcount = i;
2247         *params = info->params;
2248         revision_id = info->revision_id;
2249     }
2250 
2251     pthread_mutex_unlock(&_res_cache_list_lock);
2252     return revision_id;
2253 }
2254 
2255 int
_resolv_cache_get_resolver_stats(unsigned netid,struct __res_params * params,struct __res_stats stats[MAXNS])2256 _resolv_cache_get_resolver_stats( unsigned netid, struct __res_params* params,
2257         struct __res_stats stats[MAXNS]) {
2258     int revision_id = -1;
2259     pthread_mutex_lock(&_res_cache_list_lock);
2260 
2261     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2262     if (info) {
2263         memcpy(stats, info->nsstats, sizeof(info->nsstats));
2264         *params = info->params;
2265         revision_id = info->revision_id;
2266     }
2267 
2268     pthread_mutex_unlock(&_res_cache_list_lock);
2269     return revision_id;
2270 }
2271 
2272 void
_resolv_cache_add_resolver_stats_sample(unsigned netid,int revision_id,int ns,const struct __res_sample * sample,int max_samples)2273 _resolv_cache_add_resolver_stats_sample( unsigned netid, int revision_id, int ns,
2274        const struct __res_sample* sample, int max_samples) {
2275     if (max_samples <= 0) return;
2276 
2277     pthread_mutex_lock(&_res_cache_list_lock);
2278 
2279     struct resolv_cache_info* info = _find_cache_info_locked(netid);
2280 
2281     if (info && info->revision_id == revision_id) {
2282         _res_cache_add_stats_sample_locked(&info->nsstats[ns], sample, max_samples);
2283     }
2284 
2285     pthread_mutex_unlock(&_res_cache_list_lock);
2286 }
2287