• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "resolv_cache.h"
30 #include <resolv.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <time.h>
34 #include "pthread.h"
35 
36 #include <errno.h>
37 #include "arpa_nameser.h"
38 #include <sys/system_properties.h>
39 #include <net/if.h>
40 #include <netdb.h>
41 #include <linux/if.h>
42 
43 #include <arpa/inet.h>
44 #include "resolv_private.h"
45 #include "resolv_iface.h"
46 #include "res_private.h"
47 
48 /* This code implements a small and *simple* DNS resolver cache.
49  *
50  * It is only used to cache DNS answers for a time defined by the smallest TTL
51  * among the answer records in order to reduce DNS traffic. It is not supposed
52  * to be a full DNS cache, since we plan to implement that in the future in a
53  * dedicated process running on the system.
54  *
55  * Note that its design is kept simple very intentionally, i.e.:
56  *
57  *  - it takes raw DNS query packet data as input, and returns raw DNS
58  *    answer packet data as output
59  *
60  *    (this means that two similar queries that encode the DNS name
61  *     differently will be treated distinctly).
62  *
63  *    the smallest TTL value among the answer records are used as the time
64  *    to keep an answer in the cache.
65  *
66  *    this is bad, but we absolutely want to avoid parsing the answer packets
67  *    (and should be solved by the later full DNS cache process).
68  *
69  *  - the implementation is just a (query-data) => (answer-data) hash table
70  *    with a trivial least-recently-used expiration policy.
71  *
72  * Doing this keeps the code simple and avoids to deal with a lot of things
73  * that a full DNS cache is expected to do.
74  *
75  * The API is also very simple:
76  *
77  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
78  *     this will initialize the cache on first usage. the result can be NULL
79  *     if the cache is disabled.
80  *
81  *   - the client calls _resolv_cache_lookup() before performing a query
82  *
83  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
84  *     has been copied into the client-provided answer buffer.
85  *
86  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
87  *     a request normally, *then* call _resolv_cache_add() to add the received
88  *     answer to the cache.
89  *
90  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
91  *     perform a request normally, and *not* call _resolv_cache_add()
92  *
93  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
94  *     is too short to accomodate the cached result.
95  *
96  *  - when network settings change, the cache must be flushed since the list
97  *    of DNS servers probably changed. this is done by calling
98  *    _resolv_cache_reset()
99  *
100  *    the parameter to this function must be an ever-increasing generation
101  *    number corresponding to the current network settings state.
102  *
103  *    This is done because several threads could detect the same network
104  *    settings change (but at different times) and will all end up calling the
105  *    same function. Comparing with the last used generation number ensures
106  *    that the cache is only flushed once per network change.
107  */
108 
109 /* the name of an environment variable that will be checked the first time
110  * this code is called if its value is "0", then the resolver cache is
111  * disabled.
112  */
113 #define  CONFIG_ENV  "BIONIC_DNSCACHE"
114 
115 /* entries older than CONFIG_SECONDS seconds are always discarded.
116  */
117 #define  CONFIG_SECONDS    (60*10)    /* 10 minutes */
118 
119 /* default number of entries kept in the cache. This value has been
120  * determined by browsing through various sites and counting the number
121  * of corresponding requests. Keep in mind that our framework is currently
122  * performing two requests per name lookup (one for IPv4, the other for IPv6)
123  *
124  *    www.google.com      4
125  *    www.ysearch.com     6
126  *    www.amazon.com      8
127  *    www.nytimes.com     22
128  *    www.espn.com        28
129  *    www.msn.com         28
130  *    www.lemonde.fr      35
131  *
132  * (determined in 2009-2-17 from Paris, France, results may vary depending
133  *  on location)
134  *
135  * most high-level websites use lots of media/ad servers with different names
136  * but these are generally reused when browsing through the site.
137  *
138  * As such, a value of 64 should be relatively comfortable at the moment.
139  *
140  * The system property ro.net.dns_cache_size can be used to override the default
141  * value with a custom value
142  *
143  *
144  * ******************************************
145  * * NOTE - this has changed.
146  * * 1) we've added IPv6 support so each dns query results in 2 responses
147  * * 2) we've made this a system-wide cache, so the cost is less (it's not
148  * *    duplicated in each process) and the need is greater (more processes
149  * *    making different requests).
150  * * Upping by 2x for IPv6
151  * * Upping by another 5x for the centralized nature
152  * *****************************************
153  */
154 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
155 /* name of the system property that can be used to set the cache size */
156 #define  DNS_CACHE_SIZE_PROP_NAME   "ro.net.dns_cache_size"
157 
158 /****************************************************************************/
159 /****************************************************************************/
160 /*****                                                                  *****/
161 /*****                                                                  *****/
162 /*****                                                                  *****/
163 /****************************************************************************/
164 /****************************************************************************/
165 
166 /* set to 1 to debug cache operations */
167 #define  DEBUG       0
168 
169 /* set to 1 to debug query data */
170 #define  DEBUG_DATA  0
171 
172 #undef XLOG
173 #if DEBUG
174 #  include "libc_logging.h"
175 #  define XLOG(...)  __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
176 
177 #include <stdio.h>
178 #include <stdarg.h>
179 
180 /** BOUNDED BUFFER FORMATTING
181  **/
182 
183 /* technical note:
184  *
185  *   the following debugging routines are used to append data to a bounded
186  *   buffer they take two parameters that are:
187  *
188  *   - p : a pointer to the current cursor position in the buffer
189  *         this value is initially set to the buffer's address.
190  *
191  *   - end : the address of the buffer's limit, i.e. of the first byte
192  *           after the buffer. this address should never be touched.
193  *
194  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
195  *                      that the buffer is at least one byte.
196  *
197  *   the _bprint_() functions return the new value of 'p' after the data
198  *   has been appended, and also ensure the following:
199  *
200  *   - the returned value will never be strictly greater than 'end'
201  *
202  *   - a return value equal to 'end' means that truncation occured
203  *     (in which case, end[-1] will be set to 0)
204  *
205  *   - after returning from a _bprint_() function, the content of the buffer
206  *     is always 0-terminated, even in the event of truncation.
207  *
208  *  these conventions allow you to call _bprint_ functions multiple times and
209  *  only check for truncation at the end of the sequence, as in:
210  *
211  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
212  *
213  *     p = _bprint_c(p, end, '"');
214  *     p = _bprint_s(p, end, my_string);
215  *     p = _bprint_c(p, end, '"');
216  *
217  *     if (p >= end) {
218  *        // buffer was too small
219  *     }
220  *
221  *     printf( "%s", buff );
222  */
223 
224 /* add a char to a bounded buffer */
225 static char*
_bprint_c(char * p,char * end,int c)226 _bprint_c( char*  p, char*  end, int  c )
227 {
228     if (p < end) {
229         if (p+1 == end)
230             *p++ = 0;
231         else {
232             *p++ = (char) c;
233             *p   = 0;
234         }
235     }
236     return p;
237 }
238 
239 /* add a sequence of bytes to a bounded buffer */
240 static char*
_bprint_b(char * p,char * end,const char * buf,int len)241 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
242 {
243     int  avail = end - p;
244 
245     if (avail <= 0 || len <= 0)
246         return p;
247 
248     if (avail > len)
249         avail = len;
250 
251     memcpy( p, buf, avail );
252     p += avail;
253 
254     if (p < end)
255         p[0] = 0;
256     else
257         end[-1] = 0;
258 
259     return p;
260 }
261 
262 /* add a string to a bounded buffer */
263 static char*
_bprint_s(char * p,char * end,const char * str)264 _bprint_s( char*  p, char*  end, const char*  str )
265 {
266     return _bprint_b(p, end, str, strlen(str));
267 }
268 
269 /* add a formatted string to a bounded buffer */
270 static char*
_bprint(char * p,char * end,const char * format,...)271 _bprint( char*  p, char*  end, const char*  format, ... )
272 {
273     int      avail, n;
274     va_list  args;
275 
276     avail = end - p;
277 
278     if (avail <= 0)
279         return p;
280 
281     va_start(args, format);
282     n = vsnprintf( p, avail, format, args);
283     va_end(args);
284 
285     /* certain C libraries return -1 in case of truncation */
286     if (n < 0 || n > avail)
287         n = avail;
288 
289     p += n;
290     /* certain C libraries do not zero-terminate in case of truncation */
291     if (p == end)
292         p[-1] = 0;
293 
294     return p;
295 }
296 
297 /* add a hex value to a bounded buffer, up to 8 digits */
298 static char*
_bprint_hex(char * p,char * end,unsigned value,int numDigits)299 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
300 {
301     char   text[sizeof(unsigned)*2];
302     int    nn = 0;
303 
304     while (numDigits-- > 0) {
305         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
306     }
307     return _bprint_b(p, end, text, nn);
308 }
309 
310 /* add the hexadecimal dump of some memory area to a bounded buffer */
311 static char*
_bprint_hexdump(char * p,char * end,const uint8_t * data,int datalen)312 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
313 {
314     int   lineSize = 16;
315 
316     while (datalen > 0) {
317         int  avail = datalen;
318         int  nn;
319 
320         if (avail > lineSize)
321             avail = lineSize;
322 
323         for (nn = 0; nn < avail; nn++) {
324             if (nn > 0)
325                 p = _bprint_c(p, end, ' ');
326             p = _bprint_hex(p, end, data[nn], 2);
327         }
328         for ( ; nn < lineSize; nn++ ) {
329             p = _bprint_s(p, end, "   ");
330         }
331         p = _bprint_s(p, end, "  ");
332 
333         for (nn = 0; nn < avail; nn++) {
334             int  c = data[nn];
335 
336             if (c < 32 || c > 127)
337                 c = '.';
338 
339             p = _bprint_c(p, end, c);
340         }
341         p = _bprint_c(p, end, '\n');
342 
343         data    += avail;
344         datalen -= avail;
345     }
346     return p;
347 }
348 
349 /* dump the content of a query of packet to the log */
350 static void
XLOG_BYTES(const void * base,int len)351 XLOG_BYTES( const void*  base, int  len )
352 {
353     char  buff[1024];
354     char*  p = buff, *end = p + sizeof(buff);
355 
356     p = _bprint_hexdump(p, end, base, len);
357     XLOG("%s",buff);
358 }
359 
360 #else /* !DEBUG */
361 #  define  XLOG(...)        ((void)0)
362 #  define  XLOG_BYTES(a,b)  ((void)0)
363 #endif
364 
365 static time_t
_time_now(void)366 _time_now( void )
367 {
368     struct timeval  tv;
369 
370     gettimeofday( &tv, NULL );
371     return tv.tv_sec;
372 }
373 
374 /* reminder: the general format of a DNS packet is the following:
375  *
376  *    HEADER  (12 bytes)
377  *    QUESTION  (variable)
378  *    ANSWER (variable)
379  *    AUTHORITY (variable)
380  *    ADDITIONNAL (variable)
381  *
382  * the HEADER is made of:
383  *
384  *   ID     : 16 : 16-bit unique query identification field
385  *
386  *   QR     :  1 : set to 0 for queries, and 1 for responses
387  *   Opcode :  4 : set to 0 for queries
388  *   AA     :  1 : set to 0 for queries
389  *   TC     :  1 : truncation flag, will be set to 0 in queries
390  *   RD     :  1 : recursion desired
391  *
392  *   RA     :  1 : recursion available (0 in queries)
393  *   Z      :  3 : three reserved zero bits
394  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
395  *
396  *   QDCount: 16 : question count
397  *   ANCount: 16 : Answer count (0 in queries)
398  *   NSCount: 16: Authority Record count (0 in queries)
399  *   ARCount: 16: Additionnal Record count (0 in queries)
400  *
401  * the QUESTION is made of QDCount Question Record (QRs)
402  * the ANSWER is made of ANCount RRs
403  * the AUTHORITY is made of NSCount RRs
404  * the ADDITIONNAL is made of ARCount RRs
405  *
406  * Each Question Record (QR) is made of:
407  *
408  *   QNAME   : variable : Query DNS NAME
409  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
410  *   CLASS   : 16       : class of query (IN=1)
411  *
412  * Each Resource Record (RR) is made of:
413  *
414  *   NAME    : variable : DNS NAME
415  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
416  *   CLASS   : 16       : class of query (IN=1)
417  *   TTL     : 32       : seconds to cache this RR (0=none)
418  *   RDLENGTH: 16       : size of RDDATA in bytes
419  *   RDDATA  : variable : RR data (depends on TYPE)
420  *
421  * Each QNAME contains a domain name encoded as a sequence of 'labels'
422  * terminated by a zero. Each label has the following format:
423  *
424  *    LEN  : 8     : lenght of label (MUST be < 64)
425  *    NAME : 8*LEN : label length (must exclude dots)
426  *
427  * A value of 0 in the encoding is interpreted as the 'root' domain and
428  * terminates the encoding. So 'www.android.com' will be encoded as:
429  *
430  *   <3>www<7>android<3>com<0>
431  *
432  * Where <n> represents the byte with value 'n'
433  *
434  * Each NAME reflects the QNAME of the question, but has a slightly more
435  * complex encoding in order to provide message compression. This is achieved
436  * by using a 2-byte pointer, with format:
437  *
438  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
439  *    OFFSET : 14 : offset to another part of the DNS packet
440  *
441  * The offset is relative to the start of the DNS packet and must point
442  * A pointer terminates the encoding.
443  *
444  * The NAME can be encoded in one of the following formats:
445  *
446  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
447  *   - a single pointer
448  *   - a sequence of simple labels terminated by a pointer
449  *
450  * A pointer shall always point to either a pointer of a sequence of
451  * labels (which can themselves be terminated by either a 0 or a pointer)
452  *
453  * The expanded length of a given domain name should not exceed 255 bytes.
454  *
455  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
456  *       records, only QNAMEs.
457  */
458 
459 #define  DNS_HEADER_SIZE  12
460 
461 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
462 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
463 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
464 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
465 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
466 
467 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
468 
469 typedef struct {
470     const uint8_t*  base;
471     const uint8_t*  end;
472     const uint8_t*  cursor;
473 } DnsPacket;
474 
475 static void
_dnsPacket_init(DnsPacket * packet,const uint8_t * buff,int bufflen)476 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
477 {
478     packet->base   = buff;
479     packet->end    = buff + bufflen;
480     packet->cursor = buff;
481 }
482 
483 static void
_dnsPacket_rewind(DnsPacket * packet)484 _dnsPacket_rewind( DnsPacket*  packet )
485 {
486     packet->cursor = packet->base;
487 }
488 
489 static void
_dnsPacket_skip(DnsPacket * packet,int count)490 _dnsPacket_skip( DnsPacket*  packet, int  count )
491 {
492     const uint8_t*  p = packet->cursor + count;
493 
494     if (p > packet->end)
495         p = packet->end;
496 
497     packet->cursor = p;
498 }
499 
500 static int
_dnsPacket_readInt16(DnsPacket * packet)501 _dnsPacket_readInt16( DnsPacket*  packet )
502 {
503     const uint8_t*  p = packet->cursor;
504 
505     if (p+2 > packet->end)
506         return -1;
507 
508     packet->cursor = p+2;
509     return (p[0]<< 8) | p[1];
510 }
511 
512 /** QUERY CHECKING
513  **/
514 
515 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
516  * the cursor is only advanced in the case of success
517  */
518 static int
_dnsPacket_checkBytes(DnsPacket * packet,int numBytes,const void * bytes)519 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
520 {
521     const uint8_t*  p = packet->cursor;
522 
523     if (p + numBytes > packet->end)
524         return 0;
525 
526     if (memcmp(p, bytes, numBytes) != 0)
527         return 0;
528 
529     packet->cursor = p + numBytes;
530     return 1;
531 }
532 
533 /* parse and skip a given QNAME stored in a query packet,
534  * from the current cursor position. returns 1 on success,
535  * or 0 for malformed data.
536  */
537 static int
_dnsPacket_checkQName(DnsPacket * packet)538 _dnsPacket_checkQName( DnsPacket*  packet )
539 {
540     const uint8_t*  p   = packet->cursor;
541     const uint8_t*  end = packet->end;
542 
543     for (;;) {
544         int  c;
545 
546         if (p >= end)
547             break;
548 
549         c = *p++;
550 
551         if (c == 0) {
552             packet->cursor = p;
553             return 1;
554         }
555 
556         /* we don't expect label compression in QNAMEs */
557         if (c >= 64)
558             break;
559 
560         p += c;
561         /* we rely on the bound check at the start
562          * of the loop here */
563     }
564     /* malformed data */
565     XLOG("malformed QNAME");
566     return 0;
567 }
568 
569 /* parse and skip a given QR stored in a packet.
570  * returns 1 on success, and 0 on failure
571  */
572 static int
_dnsPacket_checkQR(DnsPacket * packet)573 _dnsPacket_checkQR( DnsPacket*  packet )
574 {
575     if (!_dnsPacket_checkQName(packet))
576         return 0;
577 
578     /* TYPE must be one of the things we support */
579     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
580         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
581         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
582         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
583         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
584     {
585         XLOG("unsupported TYPE");
586         return 0;
587     }
588     /* CLASS must be IN */
589     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
590         XLOG("unsupported CLASS");
591         return 0;
592     }
593 
594     return 1;
595 }
596 
597 /* check the header of a DNS Query packet, return 1 if it is one
598  * type of query we can cache, or 0 otherwise
599  */
600 static int
_dnsPacket_checkQuery(DnsPacket * packet)601 _dnsPacket_checkQuery( DnsPacket*  packet )
602 {
603     const uint8_t*  p = packet->base;
604     int             qdCount, anCount, dnCount, arCount;
605 
606     if (p + DNS_HEADER_SIZE > packet->end) {
607         XLOG("query packet too small");
608         return 0;
609     }
610 
611     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
612     /* RA, Z, and RCODE must be 0 */
613     if ((p[2] & 0xFC) != 0 || p[3] != 0) {
614         XLOG("query packet flags unsupported");
615         return 0;
616     }
617 
618     /* Note that we ignore the TC and RD bits here for the
619      * following reasons:
620      *
621      * - there is no point for a query packet sent to a server
622      *   to have the TC bit set, but the implementation might
623      *   set the bit in the query buffer for its own needs
624      *   between a _resolv_cache_lookup and a
625      *   _resolv_cache_add. We should not freak out if this
626      *   is the case.
627      *
628      * - we consider that the result from a RD=0 or a RD=1
629      *   query might be different, hence that the RD bit
630      *   should be used to differentiate cached result.
631      *
632      *   this implies that RD is checked when hashing or
633      *   comparing query packets, but not TC
634      */
635 
636     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
637     qdCount = (p[4] << 8) | p[5];
638     anCount = (p[6] << 8) | p[7];
639     dnCount = (p[8] << 8) | p[9];
640     arCount = (p[10]<< 8) | p[11];
641 
642     if (anCount != 0 || dnCount != 0 || arCount != 0) {
643         XLOG("query packet contains non-query records");
644         return 0;
645     }
646 
647     if (qdCount == 0) {
648         XLOG("query packet doesn't contain query record");
649         return 0;
650     }
651 
652     /* Check QDCOUNT QRs */
653     packet->cursor = p + DNS_HEADER_SIZE;
654 
655     for (;qdCount > 0; qdCount--)
656         if (!_dnsPacket_checkQR(packet))
657             return 0;
658 
659     return 1;
660 }
661 
662 /** QUERY DEBUGGING
663  **/
664 #if DEBUG
665 static char*
_dnsPacket_bprintQName(DnsPacket * packet,char * bp,char * bend)666 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
667 {
668     const uint8_t*  p   = packet->cursor;
669     const uint8_t*  end = packet->end;
670     int             first = 1;
671 
672     for (;;) {
673         int  c;
674 
675         if (p >= end)
676             break;
677 
678         c = *p++;
679 
680         if (c == 0) {
681             packet->cursor = p;
682             return bp;
683         }
684 
685         /* we don't expect label compression in QNAMEs */
686         if (c >= 64)
687             break;
688 
689         if (first)
690             first = 0;
691         else
692             bp = _bprint_c(bp, bend, '.');
693 
694         bp = _bprint_b(bp, bend, (const char*)p, c);
695 
696         p += c;
697         /* we rely on the bound check at the start
698          * of the loop here */
699     }
700     /* malformed data */
701     bp = _bprint_s(bp, bend, "<MALFORMED>");
702     return bp;
703 }
704 
705 static char*
_dnsPacket_bprintQR(DnsPacket * packet,char * p,char * end)706 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
707 {
708 #define  QQ(x)   { DNS_TYPE_##x, #x }
709     static const struct {
710         const char*  typeBytes;
711         const char*  typeString;
712     } qTypes[] =
713     {
714         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
715         { NULL, NULL }
716     };
717     int          nn;
718     const char*  typeString = NULL;
719 
720     /* dump QNAME */
721     p = _dnsPacket_bprintQName(packet, p, end);
722 
723     /* dump TYPE */
724     p = _bprint_s(p, end, " (");
725 
726     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
727         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
728             typeString = qTypes[nn].typeString;
729             break;
730         }
731     }
732 
733     if (typeString != NULL)
734         p = _bprint_s(p, end, typeString);
735     else {
736         int  typeCode = _dnsPacket_readInt16(packet);
737         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
738     }
739 
740     p = _bprint_c(p, end, ')');
741 
742     /* skip CLASS */
743     _dnsPacket_skip(packet, 2);
744     return p;
745 }
746 
747 /* this function assumes the packet has already been checked */
748 static char*
_dnsPacket_bprintQuery(DnsPacket * packet,char * p,char * end)749 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
750 {
751     int   qdCount;
752 
753     if (packet->base[2] & 0x1) {
754         p = _bprint_s(p, end, "RECURSIVE ");
755     }
756 
757     _dnsPacket_skip(packet, 4);
758     qdCount = _dnsPacket_readInt16(packet);
759     _dnsPacket_skip(packet, 6);
760 
761     for ( ; qdCount > 0; qdCount-- ) {
762         p = _dnsPacket_bprintQR(packet, p, end);
763     }
764     return p;
765 }
766 #endif
767 
768 
769 /** QUERY HASHING SUPPORT
770  **
771  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
772  ** BEEN SUCCESFULLY CHECKED.
773  **/
774 
775 /* use 32-bit FNV hash function */
776 #define  FNV_MULT   16777619U
777 #define  FNV_BASIS  2166136261U
778 
779 static unsigned
_dnsPacket_hashBytes(DnsPacket * packet,int numBytes,unsigned hash)780 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
781 {
782     const uint8_t*  p   = packet->cursor;
783     const uint8_t*  end = packet->end;
784 
785     while (numBytes > 0 && p < end) {
786         hash = hash*FNV_MULT ^ *p++;
787     }
788     packet->cursor = p;
789     return hash;
790 }
791 
792 
793 static unsigned
_dnsPacket_hashQName(DnsPacket * packet,unsigned hash)794 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
795 {
796     const uint8_t*  p   = packet->cursor;
797     const uint8_t*  end = packet->end;
798 
799     for (;;) {
800         int  c;
801 
802         if (p >= end) {  /* should not happen */
803             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
804             break;
805         }
806 
807         c = *p++;
808 
809         if (c == 0)
810             break;
811 
812         if (c >= 64) {
813             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
814             break;
815         }
816         if (p + c >= end) {
817             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
818                     __FUNCTION__);
819             break;
820         }
821         while (c > 0) {
822             hash = hash*FNV_MULT ^ *p++;
823             c   -= 1;
824         }
825     }
826     packet->cursor = p;
827     return hash;
828 }
829 
830 static unsigned
_dnsPacket_hashQR(DnsPacket * packet,unsigned hash)831 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
832 {
833     hash = _dnsPacket_hashQName(packet, hash);
834     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
835     return hash;
836 }
837 
838 static unsigned
_dnsPacket_hashQuery(DnsPacket * packet)839 _dnsPacket_hashQuery( DnsPacket*  packet )
840 {
841     unsigned  hash = FNV_BASIS;
842     int       count;
843     _dnsPacket_rewind(packet);
844 
845     /* we ignore the TC bit for reasons explained in
846      * _dnsPacket_checkQuery().
847      *
848      * however we hash the RD bit to differentiate
849      * between answers for recursive and non-recursive
850      * queries.
851      */
852     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
853 
854     /* assume: other flags are 0 */
855     _dnsPacket_skip(packet, 4);
856 
857     /* read QDCOUNT */
858     count = _dnsPacket_readInt16(packet);
859 
860     /* assume: ANcount, NScount, ARcount are 0 */
861     _dnsPacket_skip(packet, 6);
862 
863     /* hash QDCOUNT QRs */
864     for ( ; count > 0; count-- )
865         hash = _dnsPacket_hashQR(packet, hash);
866 
867     return hash;
868 }
869 
870 
871 /** QUERY COMPARISON
872  **
873  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
874  ** BEEN SUCCESFULLY CHECKED.
875  **/
876 
877 static int
_dnsPacket_isEqualDomainName(DnsPacket * pack1,DnsPacket * pack2)878 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
879 {
880     const uint8_t*  p1   = pack1->cursor;
881     const uint8_t*  end1 = pack1->end;
882     const uint8_t*  p2   = pack2->cursor;
883     const uint8_t*  end2 = pack2->end;
884 
885     for (;;) {
886         int  c1, c2;
887 
888         if (p1 >= end1 || p2 >= end2) {
889             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
890             break;
891         }
892         c1 = *p1++;
893         c2 = *p2++;
894         if (c1 != c2)
895             break;
896 
897         if (c1 == 0) {
898             pack1->cursor = p1;
899             pack2->cursor = p2;
900             return 1;
901         }
902         if (c1 >= 64) {
903             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
904             break;
905         }
906         if ((p1+c1 > end1) || (p2+c1 > end2)) {
907             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
908                     __FUNCTION__);
909             break;
910         }
911         if (memcmp(p1, p2, c1) != 0)
912             break;
913         p1 += c1;
914         p2 += c1;
915         /* we rely on the bound checks at the start of the loop */
916     }
917     /* not the same, or one is malformed */
918     XLOG("different DN");
919     return 0;
920 }
921 
922 static int
_dnsPacket_isEqualBytes(DnsPacket * pack1,DnsPacket * pack2,int numBytes)923 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
924 {
925     const uint8_t*  p1 = pack1->cursor;
926     const uint8_t*  p2 = pack2->cursor;
927 
928     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
929         return 0;
930 
931     if ( memcmp(p1, p2, numBytes) != 0 )
932         return 0;
933 
934     pack1->cursor += numBytes;
935     pack2->cursor += numBytes;
936     return 1;
937 }
938 
939 static int
_dnsPacket_isEqualQR(DnsPacket * pack1,DnsPacket * pack2)940 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
941 {
942     /* compare domain name encoding + TYPE + CLASS */
943     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
944          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
945         return 0;
946 
947     return 1;
948 }
949 
950 static int
_dnsPacket_isEqualQuery(DnsPacket * pack1,DnsPacket * pack2)951 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
952 {
953     int  count1, count2;
954 
955     /* compare the headers, ignore most fields */
956     _dnsPacket_rewind(pack1);
957     _dnsPacket_rewind(pack2);
958 
959     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
960     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
961         XLOG("different RD");
962         return 0;
963     }
964 
965     /* assume: other flags are all 0 */
966     _dnsPacket_skip(pack1, 4);
967     _dnsPacket_skip(pack2, 4);
968 
969     /* compare QDCOUNT */
970     count1 = _dnsPacket_readInt16(pack1);
971     count2 = _dnsPacket_readInt16(pack2);
972     if (count1 != count2 || count1 < 0) {
973         XLOG("different QDCOUNT");
974         return 0;
975     }
976 
977     /* assume: ANcount, NScount and ARcount are all 0 */
978     _dnsPacket_skip(pack1, 6);
979     _dnsPacket_skip(pack2, 6);
980 
981     /* compare the QDCOUNT QRs */
982     for ( ; count1 > 0; count1-- ) {
983         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
984             XLOG("different QR");
985             return 0;
986         }
987     }
988     return 1;
989 }
990 
991 /****************************************************************************/
992 /****************************************************************************/
993 /*****                                                                  *****/
994 /*****                                                                  *****/
995 /*****                                                                  *****/
996 /****************************************************************************/
997 /****************************************************************************/
998 
999 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
1000  * structure though they are conceptually part of the hash table.
1001  *
1002  * similarly, mru_next and mru_prev are part of the global MRU list
1003  */
1004 typedef struct Entry {
1005     unsigned int     hash;   /* hash value */
1006     struct Entry*    hlink;  /* next in collision chain */
1007     struct Entry*    mru_prev;
1008     struct Entry*    mru_next;
1009 
1010     const uint8_t*   query;
1011     int              querylen;
1012     const uint8_t*   answer;
1013     int              answerlen;
1014     time_t           expires;   /* time_t when the entry isn't valid any more */
1015     int              id;        /* for debugging purpose */
1016 } Entry;
1017 
1018 /**
1019  * Find the TTL for a negative DNS result.  This is defined as the minimum
1020  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1021  *
1022  * Return 0 if not found.
1023  */
1024 static u_long
answer_getNegativeTTL(ns_msg handle)1025 answer_getNegativeTTL(ns_msg handle) {
1026     int n, nscount;
1027     u_long result = 0;
1028     ns_rr rr;
1029 
1030     nscount = ns_msg_count(handle, ns_s_ns);
1031     for (n = 0; n < nscount; n++) {
1032         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1033             const u_char *rdata = ns_rr_rdata(rr); // find the data
1034             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1035             int len;
1036             u_long ttl, rec_result = ns_rr_ttl(rr);
1037 
1038             // find the MINIMUM-TTL field from the blob of binary data for this record
1039             // skip the server name
1040             len = dn_skipname(rdata, edata);
1041             if (len == -1) continue; // error skipping
1042             rdata += len;
1043 
1044             // skip the admin name
1045             len = dn_skipname(rdata, edata);
1046             if (len == -1) continue; // error skipping
1047             rdata += len;
1048 
1049             if (edata - rdata != 5*NS_INT32SZ) continue;
1050             // skip: serial number + refresh interval + retry interval + expiry
1051             rdata += NS_INT32SZ * 4;
1052             // finally read the MINIMUM TTL
1053             ttl = ns_get32(rdata);
1054             if (ttl < rec_result) {
1055                 rec_result = ttl;
1056             }
1057             // Now that the record is read successfully, apply the new min TTL
1058             if (n == 0 || rec_result < result) {
1059                 result = rec_result;
1060             }
1061         }
1062     }
1063     return result;
1064 }
1065 
1066 /**
1067  * Parse the answer records and find the appropriate
1068  * smallest TTL among the records.  This might be from
1069  * the answer records if found or from the SOA record
1070  * if it's a negative result.
1071  *
1072  * The returned TTL is the number of seconds to
1073  * keep the answer in the cache.
1074  *
1075  * In case of parse error zero (0) is returned which
1076  * indicates that the answer shall not be cached.
1077  */
1078 static u_long
answer_getTTL(const void * answer,int answerlen)1079 answer_getTTL(const void* answer, int answerlen)
1080 {
1081     ns_msg handle;
1082     int ancount, n;
1083     u_long result, ttl;
1084     ns_rr rr;
1085 
1086     result = 0;
1087     if (ns_initparse(answer, answerlen, &handle) >= 0) {
1088         // get number of answer records
1089         ancount = ns_msg_count(handle, ns_s_an);
1090 
1091         if (ancount == 0) {
1092             // a response with no answers?  Cache this negative result.
1093             result = answer_getNegativeTTL(handle);
1094         } else {
1095             for (n = 0; n < ancount; n++) {
1096                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1097                     ttl = ns_rr_ttl(rr);
1098                     if (n == 0 || ttl < result) {
1099                         result = ttl;
1100                     }
1101                 } else {
1102                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1103                 }
1104             }
1105         }
1106     } else {
1107         XLOG("ns_parserr failed. %s\n", strerror(errno));
1108     }
1109 
1110     XLOG("TTL = %d\n", result);
1111 
1112     return result;
1113 }
1114 
1115 static void
entry_free(Entry * e)1116 entry_free( Entry*  e )
1117 {
1118     /* everything is allocated in a single memory block */
1119     if (e) {
1120         free(e);
1121     }
1122 }
1123 
1124 static __inline__ void
entry_mru_remove(Entry * e)1125 entry_mru_remove( Entry*  e )
1126 {
1127     e->mru_prev->mru_next = e->mru_next;
1128     e->mru_next->mru_prev = e->mru_prev;
1129 }
1130 
1131 static __inline__ void
entry_mru_add(Entry * e,Entry * list)1132 entry_mru_add( Entry*  e, Entry*  list )
1133 {
1134     Entry*  first = list->mru_next;
1135 
1136     e->mru_next = first;
1137     e->mru_prev = list;
1138 
1139     list->mru_next  = e;
1140     first->mru_prev = e;
1141 }
1142 
1143 /* compute the hash of a given entry, this is a hash of most
1144  * data in the query (key) */
1145 static unsigned
entry_hash(const Entry * e)1146 entry_hash( const Entry*  e )
1147 {
1148     DnsPacket  pack[1];
1149 
1150     _dnsPacket_init(pack, e->query, e->querylen);
1151     return _dnsPacket_hashQuery(pack);
1152 }
1153 
1154 /* initialize an Entry as a search key, this also checks the input query packet
1155  * returns 1 on success, or 0 in case of unsupported/malformed data */
1156 static int
entry_init_key(Entry * e,const void * query,int querylen)1157 entry_init_key( Entry*  e, const void*  query, int  querylen )
1158 {
1159     DnsPacket  pack[1];
1160 
1161     memset(e, 0, sizeof(*e));
1162 
1163     e->query    = query;
1164     e->querylen = querylen;
1165     e->hash     = entry_hash(e);
1166 
1167     _dnsPacket_init(pack, query, querylen);
1168 
1169     return _dnsPacket_checkQuery(pack);
1170 }
1171 
1172 /* allocate a new entry as a cache node */
1173 static Entry*
entry_alloc(const Entry * init,const void * answer,int answerlen)1174 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
1175 {
1176     Entry*  e;
1177     int     size;
1178 
1179     size = sizeof(*e) + init->querylen + answerlen;
1180     e    = calloc(size, 1);
1181     if (e == NULL)
1182         return e;
1183 
1184     e->hash     = init->hash;
1185     e->query    = (const uint8_t*)(e+1);
1186     e->querylen = init->querylen;
1187 
1188     memcpy( (char*)e->query, init->query, e->querylen );
1189 
1190     e->answer    = e->query + e->querylen;
1191     e->answerlen = answerlen;
1192 
1193     memcpy( (char*)e->answer, answer, e->answerlen );
1194 
1195     return e;
1196 }
1197 
1198 static int
entry_equals(const Entry * e1,const Entry * e2)1199 entry_equals( const Entry*  e1, const Entry*  e2 )
1200 {
1201     DnsPacket  pack1[1], pack2[1];
1202 
1203     if (e1->querylen != e2->querylen) {
1204         return 0;
1205     }
1206     _dnsPacket_init(pack1, e1->query, e1->querylen);
1207     _dnsPacket_init(pack2, e2->query, e2->querylen);
1208 
1209     return _dnsPacket_isEqualQuery(pack1, pack2);
1210 }
1211 
1212 /****************************************************************************/
1213 /****************************************************************************/
1214 /*****                                                                  *****/
1215 /*****                                                                  *****/
1216 /*****                                                                  *****/
1217 /****************************************************************************/
1218 /****************************************************************************/
1219 
1220 /* We use a simple hash table with external collision lists
1221  * for simplicity, the hash-table fields 'hash' and 'hlink' are
1222  * inlined in the Entry structure.
1223  */
1224 
1225 /* Maximum time for a thread to wait for an pending request */
1226 #define PENDING_REQUEST_TIMEOUT 20;
1227 
1228 typedef struct pending_req_info {
1229     unsigned int                hash;
1230     pthread_cond_t              cond;
1231     struct pending_req_info*    next;
1232 } PendingReqInfo;
1233 
1234 typedef struct resolv_cache {
1235     int              max_entries;
1236     int              num_entries;
1237     Entry            mru_list;
1238     pthread_mutex_t  lock;
1239     unsigned         generation;
1240     int              last_id;
1241     Entry*           entries;
1242     PendingReqInfo   pending_requests;
1243 } Cache;
1244 
1245 typedef struct resolv_cache_info {
1246     char                        ifname[IF_NAMESIZE + 1];
1247     struct in_addr              ifaddr;
1248     Cache*                      cache;
1249     struct resolv_cache_info*   next;
1250     char*                       nameservers[MAXNS +1];
1251     struct addrinfo*            nsaddrinfo[MAXNS + 1];
1252     char                        defdname[256];
1253     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
1254 } CacheInfo;
1255 
1256 typedef struct resolv_pidiface_info {
1257     int                             pid;
1258     char                            ifname[IF_NAMESIZE + 1];
1259     struct resolv_pidiface_info*    next;
1260 } PidIfaceInfo;
1261 
1262 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
1263 
1264 static void
_cache_flush_pending_requests_locked(struct resolv_cache * cache)1265 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1266 {
1267     struct pending_req_info *ri, *tmp;
1268     if (cache) {
1269         ri = cache->pending_requests.next;
1270 
1271         while (ri) {
1272             tmp = ri;
1273             ri = ri->next;
1274             pthread_cond_broadcast(&tmp->cond);
1275 
1276             pthread_cond_destroy(&tmp->cond);
1277             free(tmp);
1278         }
1279 
1280         cache->pending_requests.next = NULL;
1281     }
1282 }
1283 
1284 /* return 0 if no pending request is found matching the key
1285  * if a matching request is found the calling thread will wait
1286  * and return 1 when released */
1287 static int
_cache_check_pending_request_locked(struct resolv_cache * cache,Entry * key)1288 _cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
1289 {
1290     struct pending_req_info *ri, *prev;
1291     int exist = 0;
1292 
1293     if (cache && key) {
1294         ri = cache->pending_requests.next;
1295         prev = &cache->pending_requests;
1296         while (ri) {
1297             if (ri->hash == key->hash) {
1298                 exist = 1;
1299                 break;
1300             }
1301             prev = ri;
1302             ri = ri->next;
1303         }
1304 
1305         if (!exist) {
1306             ri = calloc(1, sizeof(struct pending_req_info));
1307             if (ri) {
1308                 ri->hash = key->hash;
1309                 pthread_cond_init(&ri->cond, NULL);
1310                 prev->next = ri;
1311             }
1312         } else {
1313             struct timespec ts = {0,0};
1314             XLOG("Waiting for previous request");
1315             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1316             pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
1317         }
1318     }
1319 
1320     return exist;
1321 }
1322 
1323 /* notify any waiting thread that waiting on a request
1324  * matching the key has been added to the cache */
1325 static void
_cache_notify_waiting_tid_locked(struct resolv_cache * cache,Entry * key)1326 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1327 {
1328     struct pending_req_info *ri, *prev;
1329 
1330     if (cache && key) {
1331         ri = cache->pending_requests.next;
1332         prev = &cache->pending_requests;
1333         while (ri) {
1334             if (ri->hash == key->hash) {
1335                 pthread_cond_broadcast(&ri->cond);
1336                 break;
1337             }
1338             prev = ri;
1339             ri = ri->next;
1340         }
1341 
1342         // remove item from list and destroy
1343         if (ri) {
1344             prev->next = ri->next;
1345             pthread_cond_destroy(&ri->cond);
1346             free(ri);
1347         }
1348     }
1349 }
1350 
1351 /* notify the cache that the query failed */
1352 void
_resolv_cache_query_failed(struct resolv_cache * cache,const void * query,int querylen)1353 _resolv_cache_query_failed( struct resolv_cache* cache,
1354                    const void* query,
1355                    int         querylen)
1356 {
1357     Entry    key[1];
1358 
1359     if (cache && entry_init_key(key, query, querylen)) {
1360         pthread_mutex_lock(&cache->lock);
1361         _cache_notify_waiting_tid_locked(cache, key);
1362         pthread_mutex_unlock(&cache->lock);
1363     }
1364 }
1365 
1366 static void
_cache_flush_locked(Cache * cache)1367 _cache_flush_locked( Cache*  cache )
1368 {
1369     int     nn;
1370 
1371     for (nn = 0; nn < cache->max_entries; nn++)
1372     {
1373         Entry**  pnode = (Entry**) &cache->entries[nn];
1374 
1375         while (*pnode != NULL) {
1376             Entry*  node = *pnode;
1377             *pnode = node->hlink;
1378             entry_free(node);
1379         }
1380     }
1381 
1382     // flush pending request
1383     _cache_flush_pending_requests_locked(cache);
1384 
1385     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1386     cache->num_entries       = 0;
1387     cache->last_id           = 0;
1388 
1389     XLOG("*************************\n"
1390          "*** DNS CACHE FLUSHED ***\n"
1391          "*************************");
1392 }
1393 
1394 /* Return max number of entries allowed in the cache,
1395  * i.e. cache size. The cache size is either defined
1396  * by system property ro.net.dns_cache_size or by
1397  * CONFIG_MAX_ENTRIES if system property not set
1398  * or set to invalid value. */
1399 static int
_res_cache_get_max_entries(void)1400 _res_cache_get_max_entries( void )
1401 {
1402     int result = -1;
1403     char cache_size[PROP_VALUE_MAX];
1404 
1405     const char* cache_mode = getenv("ANDROID_DNS_MODE");
1406 
1407     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1408         // Don't use the cache in local mode.  This is used by the
1409         // proxy itself.
1410         XLOG("setup cache for non-cache process. size=0, %s", cache_mode);
1411         return 0;
1412     }
1413 
1414     if (__system_property_get(DNS_CACHE_SIZE_PROP_NAME, cache_size) > 0) {
1415         result = atoi(cache_size);
1416     }
1417 
1418     // ro.net.dns_cache_size not set or set to negative value
1419     if (result <= 0) {
1420         result = CONFIG_MAX_ENTRIES;
1421     }
1422 
1423     XLOG("cache size: %d", result);
1424     return result;
1425 }
1426 
1427 static struct resolv_cache*
_resolv_cache_create(void)1428 _resolv_cache_create( void )
1429 {
1430     struct resolv_cache*  cache;
1431 
1432     cache = calloc(sizeof(*cache), 1);
1433     if (cache) {
1434         cache->max_entries = _res_cache_get_max_entries();
1435         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1436         if (cache->entries) {
1437             cache->generation = ~0U;
1438             pthread_mutex_init( &cache->lock, NULL );
1439             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1440             XLOG("%s: cache created\n", __FUNCTION__);
1441         } else {
1442             free(cache);
1443             cache = NULL;
1444         }
1445     }
1446     return cache;
1447 }
1448 
1449 
1450 #if DEBUG
1451 static void
_dump_query(const uint8_t * query,int querylen)1452 _dump_query( const uint8_t*  query, int  querylen )
1453 {
1454     char       temp[256], *p=temp, *end=p+sizeof(temp);
1455     DnsPacket  pack[1];
1456 
1457     _dnsPacket_init(pack, query, querylen);
1458     p = _dnsPacket_bprintQuery(pack, p, end);
1459     XLOG("QUERY: %s", temp);
1460 }
1461 
1462 static void
_cache_dump_mru(Cache * cache)1463 _cache_dump_mru( Cache*  cache )
1464 {
1465     char    temp[512], *p=temp, *end=p+sizeof(temp);
1466     Entry*  e;
1467 
1468     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1469     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1470         p = _bprint(p, end, " %d", e->id);
1471 
1472     XLOG("%s", temp);
1473 }
1474 
1475 static void
_dump_answer(const void * answer,int answerlen)1476 _dump_answer(const void* answer, int answerlen)
1477 {
1478     res_state statep;
1479     FILE* fp;
1480     char* buf;
1481     int fileLen;
1482 
1483     fp = fopen("/data/reslog.txt", "w+");
1484     if (fp != NULL) {
1485         statep = __res_get_state();
1486 
1487         res_pquery(statep, answer, answerlen, fp);
1488 
1489         //Get file length
1490         fseek(fp, 0, SEEK_END);
1491         fileLen=ftell(fp);
1492         fseek(fp, 0, SEEK_SET);
1493         buf = (char *)malloc(fileLen+1);
1494         if (buf != NULL) {
1495             //Read file contents into buffer
1496             fread(buf, fileLen, 1, fp);
1497             XLOG("%s\n", buf);
1498             free(buf);
1499         }
1500         fclose(fp);
1501         remove("/data/reslog.txt");
1502     }
1503     else {
1504         errno = 0; // else debug is introducing error signals
1505         XLOG("_dump_answer: can't open file\n");
1506     }
1507 }
1508 #endif
1509 
1510 #if DEBUG
1511 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
1512 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
1513 #else
1514 #  define  XLOG_QUERY(q,len)   ((void)0)
1515 #  define  XLOG_ANSWER(a,len)  ((void)0)
1516 #endif
1517 
1518 /* This function tries to find a key within the hash table
1519  * In case of success, it will return a *pointer* to the hashed key.
1520  * In case of failure, it will return a *pointer* to NULL
1521  *
1522  * So, the caller must check '*result' to check for success/failure.
1523  *
1524  * The main idea is that the result can later be used directly in
1525  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1526  * parameter. This makes the code simpler and avoids re-searching
1527  * for the key position in the htable.
1528  *
1529  * The result of a lookup_p is only valid until you alter the hash
1530  * table.
1531  */
1532 static Entry**
_cache_lookup_p(Cache * cache,Entry * key)1533 _cache_lookup_p( Cache*   cache,
1534                  Entry*   key )
1535 {
1536     int      index = key->hash % cache->max_entries;
1537     Entry**  pnode = (Entry**) &cache->entries[ index ];
1538 
1539     while (*pnode != NULL) {
1540         Entry*  node = *pnode;
1541 
1542         if (node == NULL)
1543             break;
1544 
1545         if (node->hash == key->hash && entry_equals(node, key))
1546             break;
1547 
1548         pnode = &node->hlink;
1549     }
1550     return pnode;
1551 }
1552 
1553 /* Add a new entry to the hash table. 'lookup' must be the
1554  * result of an immediate previous failed _lookup_p() call
1555  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1556  * newly created entry
1557  */
1558 static void
_cache_add_p(Cache * cache,Entry ** lookup,Entry * e)1559 _cache_add_p( Cache*   cache,
1560               Entry**  lookup,
1561               Entry*   e )
1562 {
1563     *lookup = e;
1564     e->id = ++cache->last_id;
1565     entry_mru_add(e, &cache->mru_list);
1566     cache->num_entries += 1;
1567 
1568     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1569          e->id, cache->num_entries);
1570 }
1571 
1572 /* Remove an existing entry from the hash table,
1573  * 'lookup' must be the result of an immediate previous
1574  * and succesful _lookup_p() call.
1575  */
1576 static void
_cache_remove_p(Cache * cache,Entry ** lookup)1577 _cache_remove_p( Cache*   cache,
1578                  Entry**  lookup )
1579 {
1580     Entry*  e  = *lookup;
1581 
1582     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1583          e->id, cache->num_entries-1);
1584 
1585     entry_mru_remove(e);
1586     *lookup = e->hlink;
1587     entry_free(e);
1588     cache->num_entries -= 1;
1589 }
1590 
1591 /* Remove the oldest entry from the hash table.
1592  */
1593 static void
_cache_remove_oldest(Cache * cache)1594 _cache_remove_oldest( Cache*  cache )
1595 {
1596     Entry*   oldest = cache->mru_list.mru_prev;
1597     Entry**  lookup = _cache_lookup_p(cache, oldest);
1598 
1599     if (*lookup == NULL) { /* should not happen */
1600         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1601         return;
1602     }
1603     if (DEBUG) {
1604         XLOG("Cache full - removing oldest");
1605         XLOG_QUERY(oldest->query, oldest->querylen);
1606     }
1607     _cache_remove_p(cache, lookup);
1608 }
1609 
1610 /* Remove all expired entries from the hash table.
1611  */
_cache_remove_expired(Cache * cache)1612 static void _cache_remove_expired(Cache* cache) {
1613     Entry* e;
1614     time_t now = _time_now();
1615 
1616     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1617         // Entry is old, remove
1618         if (now >= e->expires) {
1619             Entry** lookup = _cache_lookup_p(cache, e);
1620             if (*lookup == NULL) { /* should not happen */
1621                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1622                 return;
1623             }
1624             e = e->mru_next;
1625             _cache_remove_p(cache, lookup);
1626         } else {
1627             e = e->mru_next;
1628         }
1629     }
1630 }
1631 
1632 ResolvCacheStatus
_resolv_cache_lookup(struct resolv_cache * cache,const void * query,int querylen,void * answer,int answersize,int * answerlen)1633 _resolv_cache_lookup( struct resolv_cache*  cache,
1634                       const void*           query,
1635                       int                   querylen,
1636                       void*                 answer,
1637                       int                   answersize,
1638                       int                  *answerlen )
1639 {
1640     Entry      key[1];
1641     Entry**    lookup;
1642     Entry*     e;
1643     time_t     now;
1644 
1645     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
1646 
1647     XLOG("%s: lookup", __FUNCTION__);
1648     XLOG_QUERY(query, querylen);
1649 
1650     /* we don't cache malformed queries */
1651     if (!entry_init_key(key, query, querylen)) {
1652         XLOG("%s: unsupported query", __FUNCTION__);
1653         return RESOLV_CACHE_UNSUPPORTED;
1654     }
1655     /* lookup cache */
1656     pthread_mutex_lock( &cache->lock );
1657 
1658     /* see the description of _lookup_p to understand this.
1659      * the function always return a non-NULL pointer.
1660      */
1661     lookup = _cache_lookup_p(cache, key);
1662     e      = *lookup;
1663 
1664     if (e == NULL) {
1665         XLOG( "NOT IN CACHE");
1666         // calling thread will wait if an outstanding request is found
1667         // that matching this query
1668         if (!_cache_check_pending_request_locked(cache, key)) {
1669             goto Exit;
1670         } else {
1671             lookup = _cache_lookup_p(cache, key);
1672             e = *lookup;
1673             if (e == NULL) {
1674                 goto Exit;
1675             }
1676         }
1677     }
1678 
1679     now = _time_now();
1680 
1681     /* remove stale entries here */
1682     if (now >= e->expires) {
1683         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1684         XLOG_QUERY(e->query, e->querylen);
1685         _cache_remove_p(cache, lookup);
1686         goto Exit;
1687     }
1688 
1689     *answerlen = e->answerlen;
1690     if (e->answerlen > answersize) {
1691         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1692         result = RESOLV_CACHE_UNSUPPORTED;
1693         XLOG(" ANSWER TOO LONG");
1694         goto Exit;
1695     }
1696 
1697     memcpy( answer, e->answer, e->answerlen );
1698 
1699     /* bump up this entry to the top of the MRU list */
1700     if (e != cache->mru_list.mru_next) {
1701         entry_mru_remove( e );
1702         entry_mru_add( e, &cache->mru_list );
1703     }
1704 
1705     XLOG( "FOUND IN CACHE entry=%p", e );
1706     result = RESOLV_CACHE_FOUND;
1707 
1708 Exit:
1709     pthread_mutex_unlock( &cache->lock );
1710     return result;
1711 }
1712 
1713 
1714 void
_resolv_cache_add(struct resolv_cache * cache,const void * query,int querylen,const void * answer,int answerlen)1715 _resolv_cache_add( struct resolv_cache*  cache,
1716                    const void*           query,
1717                    int                   querylen,
1718                    const void*           answer,
1719                    int                   answerlen )
1720 {
1721     Entry    key[1];
1722     Entry*   e;
1723     Entry**  lookup;
1724     u_long   ttl;
1725 
1726     /* don't assume that the query has already been cached
1727      */
1728     if (!entry_init_key( key, query, querylen )) {
1729         XLOG( "%s: passed invalid query ?", __FUNCTION__);
1730         return;
1731     }
1732 
1733     pthread_mutex_lock( &cache->lock );
1734 
1735     XLOG( "%s: query:", __FUNCTION__ );
1736     XLOG_QUERY(query,querylen);
1737     XLOG_ANSWER(answer, answerlen);
1738 #if DEBUG_DATA
1739     XLOG( "answer:");
1740     XLOG_BYTES(answer,answerlen);
1741 #endif
1742 
1743     lookup = _cache_lookup_p(cache, key);
1744     e      = *lookup;
1745 
1746     if (e != NULL) { /* should not happen */
1747         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1748              __FUNCTION__, e);
1749         goto Exit;
1750     }
1751 
1752     if (cache->num_entries >= cache->max_entries) {
1753         _cache_remove_expired(cache);
1754         if (cache->num_entries >= cache->max_entries) {
1755             _cache_remove_oldest(cache);
1756         }
1757         /* need to lookup again */
1758         lookup = _cache_lookup_p(cache, key);
1759         e      = *lookup;
1760         if (e != NULL) {
1761             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1762                 __FUNCTION__, e);
1763             goto Exit;
1764         }
1765     }
1766 
1767     ttl = answer_getTTL(answer, answerlen);
1768     if (ttl > 0) {
1769         e = entry_alloc(key, answer, answerlen);
1770         if (e != NULL) {
1771             e->expires = ttl + _time_now();
1772             _cache_add_p(cache, lookup, e);
1773         }
1774     }
1775 #if DEBUG
1776     _cache_dump_mru(cache);
1777 #endif
1778 Exit:
1779     _cache_notify_waiting_tid_locked(cache, key);
1780     pthread_mutex_unlock( &cache->lock );
1781 }
1782 
1783 /****************************************************************************/
1784 /****************************************************************************/
1785 /*****                                                                  *****/
1786 /*****                                                                  *****/
1787 /*****                                                                  *****/
1788 /****************************************************************************/
1789 /****************************************************************************/
1790 
1791 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
1792 
1793 // Head of the list of caches.  Protected by _res_cache_list_lock.
1794 static struct resolv_cache_info _res_cache_list;
1795 
1796 // List of pid iface pairs
1797 static struct resolv_pidiface_info _res_pidiface_list;
1798 
1799 // name of the current default inteface
1800 static char            _res_default_ifname[IF_NAMESIZE + 1];
1801 
1802 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1803 static pthread_mutex_t _res_cache_list_lock;
1804 
1805 // lock protecting the _res_pid_iface_list
1806 static pthread_mutex_t _res_pidiface_list_lock;
1807 
1808 /* lookup the default interface name */
1809 static char *_get_default_iface_locked();
1810 /* find the first cache that has an associated interface and return the name of the interface */
1811 static char* _find_any_iface_name_locked( void );
1812 
1813 /* insert resolv_cache_info into the list of resolv_cache_infos */
1814 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1815 /* creates a resolv_cache_info */
1816 static struct resolv_cache_info* _create_cache_info( void );
1817 /* gets cache associated with an interface name, or NULL if none exists */
1818 static struct resolv_cache* _find_named_cache_locked(const char* ifname);
1819 /* gets a resolv_cache_info associated with an interface name, or NULL if not found */
1820 static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
1821 /* look up the named cache, and creates one if needed */
1822 static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
1823 /* empty the named cache */
1824 static void _flush_cache_for_iface_locked(const char* ifname);
1825 /* empty the nameservers set for the named cache */
1826 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1827 /* lookup the namserver for the name interface */
1828 static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
1829 /* lookup the addr of the nameserver for the named interface */
1830 static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
1831 /* lookup the inteface's address */
1832 static struct in_addr* _get_addr_locked(const char * ifname);
1833 /* return 1 if the provided list of name servers differs from the list of name servers
1834  * currently attached to the provided cache_info */
1835 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1836         char** servers, int numservers);
1837 /* remove a resolv_pidiface_info structure from _res_pidiface_list */
1838 static void _remove_pidiface_info_locked(int pid);
1839 /* get a resolv_pidiface_info structure from _res_pidiface_list with a certain pid */
1840 static struct resolv_pidiface_info* _get_pid_iface_info_locked(int pid);
1841 
1842 static void
_res_cache_init(void)1843 _res_cache_init(void)
1844 {
1845     const char*  env = getenv(CONFIG_ENV);
1846 
1847     if (env && atoi(env) == 0) {
1848         /* the cache is disabled */
1849         return;
1850     }
1851 
1852     memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
1853     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1854     memset(&_res_pidiface_list, 0, sizeof(_res_pidiface_list));
1855     pthread_mutex_init(&_res_cache_list_lock, NULL);
1856     pthread_mutex_init(&_res_pidiface_list_lock, NULL);
1857 }
1858 
1859 struct resolv_cache*
__get_res_cache(const char * ifname)1860 __get_res_cache(const char* ifname)
1861 {
1862     struct resolv_cache *cache;
1863 
1864     pthread_once(&_res_cache_once, _res_cache_init);
1865     pthread_mutex_lock(&_res_cache_list_lock);
1866 
1867     char* iface;
1868     if (ifname == NULL || ifname[0] == '\0') {
1869         iface = _get_default_iface_locked();
1870         if (iface[0] == '\0') {
1871             char* tmp = _find_any_iface_name_locked();
1872             if (tmp) {
1873                 iface = tmp;
1874             }
1875         }
1876     } else {
1877         iface = (char *) ifname;
1878     }
1879 
1880     cache = _get_res_cache_for_iface_locked(iface);
1881 
1882     pthread_mutex_unlock(&_res_cache_list_lock);
1883     XLOG("_get_res_cache: iface = %s, cache=%p\n", iface, cache);
1884     return cache;
1885 }
1886 
1887 static struct resolv_cache*
_get_res_cache_for_iface_locked(const char * ifname)1888 _get_res_cache_for_iface_locked(const char* ifname)
1889 {
1890     if (ifname == NULL)
1891         return NULL;
1892 
1893     struct resolv_cache* cache = _find_named_cache_locked(ifname);
1894     if (!cache) {
1895         struct resolv_cache_info* cache_info = _create_cache_info();
1896         if (cache_info) {
1897             cache = _resolv_cache_create();
1898             if (cache) {
1899                 int len = sizeof(cache_info->ifname);
1900                 cache_info->cache = cache;
1901                 strncpy(cache_info->ifname, ifname, len - 1);
1902                 cache_info->ifname[len - 1] = '\0';
1903 
1904                 _insert_cache_info_locked(cache_info);
1905             } else {
1906                 free(cache_info);
1907             }
1908         }
1909     }
1910     return cache;
1911 }
1912 
1913 void
_resolv_cache_reset(unsigned generation)1914 _resolv_cache_reset(unsigned  generation)
1915 {
1916     XLOG("%s: generation=%d", __FUNCTION__, generation);
1917 
1918     pthread_once(&_res_cache_once, _res_cache_init);
1919     pthread_mutex_lock(&_res_cache_list_lock);
1920 
1921     char* ifname = _get_default_iface_locked();
1922     // if default interface not set then use the first cache
1923     // associated with an interface as the default one.
1924     // Note: Copied the code from __get_res_cache since this
1925     // method will be deleted/obsolete when cache per interface
1926     // implemented all over
1927     if (ifname[0] == '\0') {
1928         struct resolv_cache_info* cache_info = _res_cache_list.next;
1929         while (cache_info) {
1930             if (cache_info->ifname[0] != '\0') {
1931                 ifname = cache_info->ifname;
1932                 break;
1933             }
1934 
1935             cache_info = cache_info->next;
1936         }
1937     }
1938     struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
1939 
1940     if (cache != NULL) {
1941         pthread_mutex_lock( &cache->lock );
1942         if (cache->generation != generation) {
1943             _cache_flush_locked(cache);
1944             cache->generation = generation;
1945         }
1946         pthread_mutex_unlock( &cache->lock );
1947     }
1948 
1949     pthread_mutex_unlock(&_res_cache_list_lock);
1950 }
1951 
1952 void
_resolv_flush_cache_for_default_iface(void)1953 _resolv_flush_cache_for_default_iface(void)
1954 {
1955     char* ifname;
1956 
1957     pthread_once(&_res_cache_once, _res_cache_init);
1958     pthread_mutex_lock(&_res_cache_list_lock);
1959 
1960     ifname = _get_default_iface_locked();
1961     _flush_cache_for_iface_locked(ifname);
1962 
1963     pthread_mutex_unlock(&_res_cache_list_lock);
1964 }
1965 
1966 void
_resolv_flush_cache_for_iface(const char * ifname)1967 _resolv_flush_cache_for_iface(const char* ifname)
1968 {
1969     pthread_once(&_res_cache_once, _res_cache_init);
1970     pthread_mutex_lock(&_res_cache_list_lock);
1971 
1972     _flush_cache_for_iface_locked(ifname);
1973 
1974     pthread_mutex_unlock(&_res_cache_list_lock);
1975 }
1976 
1977 static void
_flush_cache_for_iface_locked(const char * ifname)1978 _flush_cache_for_iface_locked(const char* ifname)
1979 {
1980     struct resolv_cache* cache = _find_named_cache_locked(ifname);
1981     if (cache) {
1982         pthread_mutex_lock(&cache->lock);
1983         _cache_flush_locked(cache);
1984         pthread_mutex_unlock(&cache->lock);
1985     }
1986 }
1987 
1988 static struct resolv_cache_info*
_create_cache_info(void)1989 _create_cache_info(void)
1990 {
1991     struct resolv_cache_info*  cache_info;
1992 
1993     cache_info = calloc(sizeof(*cache_info), 1);
1994     return cache_info;
1995 }
1996 
1997 static void
_insert_cache_info_locked(struct resolv_cache_info * cache_info)1998 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
1999 {
2000     struct resolv_cache_info* last;
2001 
2002     for (last = &_res_cache_list; last->next; last = last->next);
2003 
2004     last->next = cache_info;
2005 
2006 }
2007 
2008 static struct resolv_cache*
_find_named_cache_locked(const char * ifname)2009 _find_named_cache_locked(const char* ifname) {
2010 
2011     struct resolv_cache_info* info = _find_cache_info_locked(ifname);
2012 
2013     if (info != NULL) return info->cache;
2014 
2015     return NULL;
2016 }
2017 
2018 static struct resolv_cache_info*
_find_cache_info_locked(const char * ifname)2019 _find_cache_info_locked(const char* ifname)
2020 {
2021     if (ifname == NULL)
2022         return NULL;
2023 
2024     struct resolv_cache_info* cache_info = _res_cache_list.next;
2025 
2026     while (cache_info) {
2027         if (strcmp(cache_info->ifname, ifname) == 0) {
2028             break;
2029         }
2030 
2031         cache_info = cache_info->next;
2032     }
2033     return cache_info;
2034 }
2035 
2036 static char*
_get_default_iface_locked(void)2037 _get_default_iface_locked(void)
2038 {
2039 
2040     char* iface = _res_default_ifname;
2041 
2042     return iface;
2043 }
2044 
2045 static char*
_find_any_iface_name_locked(void)2046 _find_any_iface_name_locked( void ) {
2047     char* ifname = NULL;
2048 
2049     struct resolv_cache_info* cache_info = _res_cache_list.next;
2050     while (cache_info) {
2051         if (cache_info->ifname[0] != '\0') {
2052             ifname = cache_info->ifname;
2053             break;
2054         }
2055 
2056         cache_info = cache_info->next;
2057     }
2058 
2059     return ifname;
2060 }
2061 
2062 void
_resolv_set_default_iface(const char * ifname)2063 _resolv_set_default_iface(const char* ifname)
2064 {
2065     XLOG("_resolv_set_default_if ifname %s\n",ifname);
2066 
2067     pthread_once(&_res_cache_once, _res_cache_init);
2068     pthread_mutex_lock(&_res_cache_list_lock);
2069 
2070     int size = sizeof(_res_default_ifname);
2071     memset(_res_default_ifname, 0, size);
2072     strncpy(_res_default_ifname, ifname, size - 1);
2073     _res_default_ifname[size - 1] = '\0';
2074 
2075     pthread_mutex_unlock(&_res_cache_list_lock);
2076 }
2077 
2078 void
_resolv_set_nameservers_for_iface(const char * ifname,char ** servers,int numservers,const char * domains)2079 _resolv_set_nameservers_for_iface(const char* ifname, char** servers, int numservers,
2080         const char *domains)
2081 {
2082     int i, rt, index;
2083     struct addrinfo hints;
2084     char sbuf[NI_MAXSERV];
2085     register char *cp;
2086     int *offset;
2087 
2088     pthread_once(&_res_cache_once, _res_cache_init);
2089     pthread_mutex_lock(&_res_cache_list_lock);
2090 
2091     // creates the cache if not created
2092     _get_res_cache_for_iface_locked(ifname);
2093 
2094     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2095 
2096     if (cache_info != NULL &&
2097             !_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
2098         // free current before adding new
2099         _free_nameservers_locked(cache_info);
2100 
2101         memset(&hints, 0, sizeof(hints));
2102         hints.ai_family = PF_UNSPEC;
2103         hints.ai_socktype = SOCK_DGRAM; /*dummy*/
2104         hints.ai_flags = AI_NUMERICHOST;
2105         sprintf(sbuf, "%u", NAMESERVER_PORT);
2106 
2107         index = 0;
2108         for (i = 0; i < numservers && i < MAXNS; i++) {
2109             rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
2110             if (rt == 0) {
2111                 cache_info->nameservers[index] = strdup(servers[i]);
2112                 index++;
2113                 XLOG("_resolv_set_nameservers_for_iface: iface = %s, addr = %s\n",
2114                         ifname, servers[i]);
2115             } else {
2116                 cache_info->nsaddrinfo[index] = NULL;
2117             }
2118         }
2119 
2120         // code moved from res_init.c, load_domain_search_list
2121         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
2122         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
2123             *cp = '\0';
2124         cp = cache_info->defdname;
2125         offset = cache_info->dnsrch_offset;
2126         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
2127             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
2128                 cp++;
2129             if (*cp == '\0') /* stop if nothing more to do */
2130                 break;
2131             *offset++ = cp - cache_info->defdname; /* record this search domain */
2132             while (*cp) { /* zero-terminate it */
2133                 if (*cp == ' '|| *cp == '\t') {
2134                     *cp++ = '\0';
2135                     break;
2136                 }
2137                 cp++;
2138             }
2139         }
2140         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
2141 
2142         // flush cache since new settings
2143         _flush_cache_for_iface_locked(ifname);
2144 
2145     }
2146 
2147     pthread_mutex_unlock(&_res_cache_list_lock);
2148 }
2149 
2150 static int
_resolv_is_nameservers_equal_locked(struct resolv_cache_info * cache_info,char ** servers,int numservers)2151 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
2152         char** servers, int numservers)
2153 {
2154     int i;
2155     char** ns;
2156     int equal = 1;
2157 
2158     // compare each name server against current name servers
2159     if (numservers > MAXNS) numservers = MAXNS;
2160     for (i = 0; i < numservers && equal; i++) {
2161         ns = cache_info->nameservers;
2162         equal = 0;
2163         while(*ns) {
2164             if (strcmp(*ns, servers[i]) == 0) {
2165                 equal = 1;
2166                 break;
2167             }
2168             ns++;
2169         }
2170     }
2171 
2172     return equal;
2173 }
2174 
2175 static void
_free_nameservers_locked(struct resolv_cache_info * cache_info)2176 _free_nameservers_locked(struct resolv_cache_info* cache_info)
2177 {
2178     int i;
2179     for (i = 0; i <= MAXNS; i++) {
2180         free(cache_info->nameservers[i]);
2181         cache_info->nameservers[i] = NULL;
2182         if (cache_info->nsaddrinfo[i] != NULL) {
2183             freeaddrinfo(cache_info->nsaddrinfo[i]);
2184             cache_info->nsaddrinfo[i] = NULL;
2185         }
2186     }
2187 }
2188 
2189 int
_resolv_cache_get_nameserver(int n,char * addr,int addrLen)2190 _resolv_cache_get_nameserver(int n, char* addr, int addrLen)
2191 {
2192     char *ifname;
2193     int result = 0;
2194 
2195     pthread_once(&_res_cache_once, _res_cache_init);
2196     pthread_mutex_lock(&_res_cache_list_lock);
2197 
2198     ifname = _get_default_iface_locked();
2199     result = _get_nameserver_locked(ifname, n, addr, addrLen);
2200 
2201     pthread_mutex_unlock(&_res_cache_list_lock);
2202     return result;
2203 }
2204 
2205 static int
_get_nameserver_locked(const char * ifname,int n,char * addr,int addrLen)2206 _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
2207 {
2208     int len = 0;
2209     char* ns;
2210     struct resolv_cache_info* cache_info;
2211 
2212     if (n < 1 || n > MAXNS || !addr)
2213         return 0;
2214 
2215     cache_info = _find_cache_info_locked(ifname);
2216     if (cache_info) {
2217         ns = cache_info->nameservers[n - 1];
2218         if (ns) {
2219             len = strlen(ns);
2220             if (len < addrLen) {
2221                 strncpy(addr, ns, len);
2222                 addr[len] = '\0';
2223             } else {
2224                 len = 0;
2225             }
2226         }
2227     }
2228 
2229     return len;
2230 }
2231 
2232 struct addrinfo*
_cache_get_nameserver_addr(int n)2233 _cache_get_nameserver_addr(int n)
2234 {
2235     struct addrinfo *result;
2236     char* ifname;
2237 
2238     pthread_once(&_res_cache_once, _res_cache_init);
2239     pthread_mutex_lock(&_res_cache_list_lock);
2240 
2241     ifname = _get_default_iface_locked();
2242 
2243     result = _get_nameserver_addr_locked(ifname, n);
2244     pthread_mutex_unlock(&_res_cache_list_lock);
2245     return result;
2246 }
2247 
2248 static struct addrinfo*
_get_nameserver_addr_locked(const char * ifname,int n)2249 _get_nameserver_addr_locked(const char* ifname, int n)
2250 {
2251     struct addrinfo* ai = NULL;
2252     struct resolv_cache_info* cache_info;
2253 
2254     if (n < 1 || n > MAXNS)
2255         return NULL;
2256 
2257     cache_info = _find_cache_info_locked(ifname);
2258     if (cache_info) {
2259         ai = cache_info->nsaddrinfo[n - 1];
2260     }
2261     return ai;
2262 }
2263 
2264 void
_resolv_set_addr_of_iface(const char * ifname,struct in_addr * addr)2265 _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
2266 {
2267     pthread_once(&_res_cache_once, _res_cache_init);
2268     pthread_mutex_lock(&_res_cache_list_lock);
2269     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2270     if (cache_info) {
2271         memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
2272 
2273         if (DEBUG) {
2274             char* addr_s = inet_ntoa(cache_info->ifaddr);
2275             XLOG("address of interface %s is %s\n", ifname, addr_s);
2276         }
2277     }
2278     pthread_mutex_unlock(&_res_cache_list_lock);
2279 }
2280 
2281 struct in_addr*
_resolv_get_addr_of_default_iface(void)2282 _resolv_get_addr_of_default_iface(void)
2283 {
2284     struct in_addr* ai = NULL;
2285     char* ifname;
2286 
2287     pthread_once(&_res_cache_once, _res_cache_init);
2288     pthread_mutex_lock(&_res_cache_list_lock);
2289     ifname = _get_default_iface_locked();
2290     ai = _get_addr_locked(ifname);
2291     pthread_mutex_unlock(&_res_cache_list_lock);
2292 
2293     return ai;
2294 }
2295 
2296 struct in_addr*
_resolv_get_addr_of_iface(const char * ifname)2297 _resolv_get_addr_of_iface(const char* ifname)
2298 {
2299     struct in_addr* ai = NULL;
2300 
2301     pthread_once(&_res_cache_once, _res_cache_init);
2302     pthread_mutex_lock(&_res_cache_list_lock);
2303     ai =_get_addr_locked(ifname);
2304     pthread_mutex_unlock(&_res_cache_list_lock);
2305     return ai;
2306 }
2307 
2308 static struct in_addr*
_get_addr_locked(const char * ifname)2309 _get_addr_locked(const char * ifname)
2310 {
2311     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2312     if (cache_info) {
2313         return &cache_info->ifaddr;
2314     }
2315     return NULL;
2316 }
2317 
2318 static void
_remove_pidiface_info_locked(int pid)2319 _remove_pidiface_info_locked(int pid) {
2320     struct resolv_pidiface_info* result = &_res_pidiface_list;
2321     struct resolv_pidiface_info* prev = NULL;
2322 
2323     while (result != NULL && result->pid != pid) {
2324         prev = result;
2325         result = result->next;
2326     }
2327     if (prev != NULL && result != NULL) {
2328         prev->next = result->next;
2329         free(result);
2330     }
2331 }
2332 
2333 static struct resolv_pidiface_info*
_get_pid_iface_info_locked(int pid)2334 _get_pid_iface_info_locked(int pid)
2335 {
2336     struct resolv_pidiface_info* result = &_res_pidiface_list;
2337     while (result != NULL && result->pid != pid) {
2338         result = result->next;
2339     }
2340 
2341     return result;
2342 }
2343 
2344 void
_resolv_set_iface_for_pid(const char * ifname,int pid)2345 _resolv_set_iface_for_pid(const char* ifname, int pid)
2346 {
2347     // make sure the pid iface list is created
2348     pthread_once(&_res_cache_once, _res_cache_init);
2349     pthread_mutex_lock(&_res_pidiface_list_lock);
2350 
2351     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2352     if (!pidiface_info) {
2353         pidiface_info = calloc(sizeof(*pidiface_info), 1);
2354         if (pidiface_info) {
2355             pidiface_info->pid = pid;
2356             int len = sizeof(pidiface_info->ifname);
2357             strncpy(pidiface_info->ifname, ifname, len - 1);
2358             pidiface_info->ifname[len - 1] = '\0';
2359 
2360             pidiface_info->next = _res_pidiface_list.next;
2361             _res_pidiface_list.next = pidiface_info;
2362 
2363             XLOG("_resolv_set_iface_for_pid: pid %d , iface %s\n", pid, ifname);
2364         } else {
2365             XLOG("_resolv_set_iface_for_pid failing calloc");
2366         }
2367     }
2368 
2369     pthread_mutex_unlock(&_res_pidiface_list_lock);
2370 }
2371 
2372 void
_resolv_clear_iface_for_pid(int pid)2373 _resolv_clear_iface_for_pid(int pid)
2374 {
2375     pthread_once(&_res_cache_once, _res_cache_init);
2376     pthread_mutex_lock(&_res_pidiface_list_lock);
2377 
2378     _remove_pidiface_info_locked(pid);
2379 
2380     XLOG("_resolv_clear_iface_for_pid: pid %d\n", pid);
2381 
2382     pthread_mutex_unlock(&_res_pidiface_list_lock);
2383 }
2384 
2385 int
_resolv_get_pids_associated_interface(int pid,char * buff,int buffLen)2386 _resolv_get_pids_associated_interface(int pid, char* buff, int buffLen)
2387 {
2388     int len = 0;
2389 
2390     if (!buff) {
2391         return -1;
2392     }
2393 
2394     pthread_once(&_res_cache_once, _res_cache_init);
2395     pthread_mutex_lock(&_res_pidiface_list_lock);
2396 
2397     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2398     buff[0] = '\0';
2399     if (pidiface_info) {
2400         len = strlen(pidiface_info->ifname);
2401         if (len < buffLen) {
2402             strncpy(buff, pidiface_info->ifname, len);
2403             buff[len] = '\0';
2404         }
2405     }
2406 
2407     XLOG("_resolv_get_pids_associated_interface buff: %s\n", buff);
2408 
2409     pthread_mutex_unlock(&_res_pidiface_list_lock);
2410 
2411     return len;
2412 }
2413 
2414 int
_resolv_get_default_iface(char * buff,int buffLen)2415 _resolv_get_default_iface(char* buff, int buffLen)
2416 {
2417     char* ifname;
2418     int len = 0;
2419 
2420     if (!buff || buffLen == 0) {
2421         return -1;
2422     }
2423 
2424     pthread_once(&_res_cache_once, _res_cache_init);
2425     pthread_mutex_lock(&_res_cache_list_lock);
2426 
2427     ifname = _get_default_iface_locked(); // never null, but may be empty
2428 
2429     // if default interface not set. Get first cache with an interface
2430     if (ifname[0] == '\0') {
2431         ifname = _find_any_iface_name_locked(); // may be null
2432     }
2433 
2434     // if we got the default iface or if (no-default) the find_any call gave an answer
2435     if (ifname) {
2436         len = strlen(ifname);
2437         if (len < buffLen) {
2438             strncpy(buff, ifname, len);
2439             buff[len] = '\0';
2440         }
2441     } else {
2442         buff[0] = '\0';
2443     }
2444 
2445     pthread_mutex_unlock(&_res_cache_list_lock);
2446 
2447     return len;
2448 }
2449 
2450 int
_resolv_populate_res_for_iface(res_state statp)2451 _resolv_populate_res_for_iface(res_state statp)
2452 {
2453     int nserv;
2454     struct resolv_cache_info* info = NULL;
2455 
2456     if (statp) {
2457         struct addrinfo* ai;
2458 
2459         if (statp->iface[0] == '\0') { // no interface set assign default
2460             _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
2461         }
2462 
2463         pthread_once(&_res_cache_once, _res_cache_init);
2464         pthread_mutex_lock(&_res_cache_list_lock);
2465         info = _find_cache_info_locked(statp->iface);
2466 
2467         if (info == NULL) {
2468             pthread_mutex_unlock(&_res_cache_list_lock);
2469             return 0;
2470         }
2471 
2472         XLOG("_resolv_populate_res_for_iface: %s\n", statp->iface);
2473         for (nserv = 0; nserv < MAXNS; nserv++) {
2474             ai = info->nsaddrinfo[nserv];
2475             if (ai == NULL) {
2476                 break;
2477             }
2478 
2479             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2480                 if (statp->_u._ext.ext != NULL) {
2481                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2482                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2483                 } else {
2484                     if ((size_t) ai->ai_addrlen
2485                             <= sizeof(statp->nsaddr_list[0])) {
2486                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2487                                 ai->ai_addrlen);
2488                     } else {
2489                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2490                     }
2491                 }
2492             } else {
2493                 XLOG("_resolv_populate_res_for_iface found too long addrlen");
2494             }
2495         }
2496         statp->nscount = nserv;
2497         // now do search domains.  Note that we cache the offsets as this code runs alot
2498         // but the setting/offset-computer only runs when set/changed
2499         strlcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2500         register char **pp = statp->dnsrch;
2501         register int *p = info->dnsrch_offset;
2502         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2503             *pp++ = &statp->defdname + *p++;
2504         }
2505 
2506         pthread_mutex_unlock(&_res_cache_list_lock);
2507     }
2508     return nserv;
2509 }
2510