• Home
  • Line#
  • Scopes#
  • Navigate#
  • Raw
  • Download
1 /*
2  * Copyright (C) 2008 The Android Open Source Project
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  *  * Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  *  * Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in
12  *    the documentation and/or other materials provided with the
13  *    distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
16  * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
17  * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
18  * FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
19  * COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
20  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
21  * BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS
22  * OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED
23  * AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
24  * OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT
25  * OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28 
29 #include "resolv_cache.h"
30 #include <resolv.h>
31 #include <stdlib.h>
32 #include <string.h>
33 #include <time.h>
34 #include "pthread.h"
35 
36 #include <errno.h>
37 #include "arpa_nameser.h"
38 #include <sys/system_properties.h>
39 #include <net/if.h>
40 #include <netdb.h>
41 #include <linux/if.h>
42 
43 #include <arpa/inet.h>
44 #include "resolv_private.h"
45 #include "resolv_iface.h"
46 #include "res_private.h"
47 
48 /* This code implements a small and *simple* DNS resolver cache.
49  *
50  * It is only used to cache DNS answers for a time defined by the smallest TTL
51  * among the answer records in order to reduce DNS traffic. It is not supposed
52  * to be a full DNS cache, since we plan to implement that in the future in a
53  * dedicated process running on the system.
54  *
55  * Note that its design is kept simple very intentionally, i.e.:
56  *
57  *  - it takes raw DNS query packet data as input, and returns raw DNS
58  *    answer packet data as output
59  *
60  *    (this means that two similar queries that encode the DNS name
61  *     differently will be treated distinctly).
62  *
63  *    the smallest TTL value among the answer records are used as the time
64  *    to keep an answer in the cache.
65  *
66  *    this is bad, but we absolutely want to avoid parsing the answer packets
67  *    (and should be solved by the later full DNS cache process).
68  *
69  *  - the implementation is just a (query-data) => (answer-data) hash table
70  *    with a trivial least-recently-used expiration policy.
71  *
72  * Doing this keeps the code simple and avoids to deal with a lot of things
73  * that a full DNS cache is expected to do.
74  *
75  * The API is also very simple:
76  *
77  *   - the client calls _resolv_cache_get() to obtain a handle to the cache.
78  *     this will initialize the cache on first usage. the result can be NULL
79  *     if the cache is disabled.
80  *
81  *   - the client calls _resolv_cache_lookup() before performing a query
82  *
83  *     if the function returns RESOLV_CACHE_FOUND, a copy of the answer data
84  *     has been copied into the client-provided answer buffer.
85  *
86  *     if the function returns RESOLV_CACHE_NOTFOUND, the client should perform
87  *     a request normally, *then* call _resolv_cache_add() to add the received
88  *     answer to the cache.
89  *
90  *     if the function returns RESOLV_CACHE_UNSUPPORTED, the client should
91  *     perform a request normally, and *not* call _resolv_cache_add()
92  *
93  *     note that RESOLV_CACHE_UNSUPPORTED is also returned if the answer buffer
94  *     is too short to accomodate the cached result.
95  *
96  *  - when network settings change, the cache must be flushed since the list
97  *    of DNS servers probably changed. this is done by calling
98  *    _resolv_cache_reset()
99  *
100  *    the parameter to this function must be an ever-increasing generation
101  *    number corresponding to the current network settings state.
102  *
103  *    This is done because several threads could detect the same network
104  *    settings change (but at different times) and will all end up calling the
105  *    same function. Comparing with the last used generation number ensures
106  *    that the cache is only flushed once per network change.
107  */
108 
109 /* the name of an environment variable that will be checked the first time
110  * this code is called if its value is "0", then the resolver cache is
111  * disabled.
112  */
113 #define  CONFIG_ENV  "BIONIC_DNSCACHE"
114 
115 /* entries older than CONFIG_SECONDS seconds are always discarded.
116  */
117 #define  CONFIG_SECONDS    (60*10)    /* 10 minutes */
118 
119 /* default number of entries kept in the cache. This value has been
120  * determined by browsing through various sites and counting the number
121  * of corresponding requests. Keep in mind that our framework is currently
122  * performing two requests per name lookup (one for IPv4, the other for IPv6)
123  *
124  *    www.google.com      4
125  *    www.ysearch.com     6
126  *    www.amazon.com      8
127  *    www.nytimes.com     22
128  *    www.espn.com        28
129  *    www.msn.com         28
130  *    www.lemonde.fr      35
131  *
132  * (determined in 2009-2-17 from Paris, France, results may vary depending
133  *  on location)
134  *
135  * most high-level websites use lots of media/ad servers with different names
136  * but these are generally reused when browsing through the site.
137  *
138  * As such, a value of 64 should be relatively comfortable at the moment.
139  *
140  * The system property ro.net.dns_cache_size can be used to override the default
141  * value with a custom value
142  *
143  *
144  * ******************************************
145  * * NOTE - this has changed.
146  * * 1) we've added IPv6 support so each dns query results in 2 responses
147  * * 2) we've made this a system-wide cache, so the cost is less (it's not
148  * *    duplicated in each process) and the need is greater (more processes
149  * *    making different requests).
150  * * Upping by 2x for IPv6
151  * * Upping by another 5x for the centralized nature
152  * *****************************************
153  */
154 #define  CONFIG_MAX_ENTRIES    64 * 2 * 5
155 /* name of the system property that can be used to set the cache size */
156 #define  DNS_CACHE_SIZE_PROP_NAME   "ro.net.dns_cache_size"
157 
158 /****************************************************************************/
159 /****************************************************************************/
160 /*****                                                                  *****/
161 /*****                                                                  *****/
162 /*****                                                                  *****/
163 /****************************************************************************/
164 /****************************************************************************/
165 
166 /* set to 1 to debug cache operations */
167 #define  DEBUG       0
168 
169 /* set to 1 to debug query data */
170 #define  DEBUG_DATA  0
171 
172 #undef XLOG
173 #if DEBUG
174 #  include "libc_logging.h"
175 #  define XLOG(...)  __libc_format_log(ANDROID_LOG_DEBUG,"libc",__VA_ARGS__)
176 
177 #include <stdio.h>
178 #include <stdarg.h>
179 
180 /** BOUNDED BUFFER FORMATTING
181  **/
182 
183 /* technical note:
184  *
185  *   the following debugging routines are used to append data to a bounded
186  *   buffer they take two parameters that are:
187  *
188  *   - p : a pointer to the current cursor position in the buffer
189  *         this value is initially set to the buffer's address.
190  *
191  *   - end : the address of the buffer's limit, i.e. of the first byte
192  *           after the buffer. this address should never be touched.
193  *
194  *           IMPORTANT: it is assumed that end > buffer_address, i.e.
195  *                      that the buffer is at least one byte.
196  *
197  *   the _bprint_() functions return the new value of 'p' after the data
198  *   has been appended, and also ensure the following:
199  *
200  *   - the returned value will never be strictly greater than 'end'
201  *
202  *   - a return value equal to 'end' means that truncation occured
203  *     (in which case, end[-1] will be set to 0)
204  *
205  *   - after returning from a _bprint_() function, the content of the buffer
206  *     is always 0-terminated, even in the event of truncation.
207  *
208  *  these conventions allow you to call _bprint_ functions multiple times and
209  *  only check for truncation at the end of the sequence, as in:
210  *
211  *     char  buff[1000], *p = buff, *end = p + sizeof(buff);
212  *
213  *     p = _bprint_c(p, end, '"');
214  *     p = _bprint_s(p, end, my_string);
215  *     p = _bprint_c(p, end, '"');
216  *
217  *     if (p >= end) {
218  *        // buffer was too small
219  *     }
220  *
221  *     printf( "%s", buff );
222  */
223 
224 /* add a char to a bounded buffer */
225 static char*
_bprint_c(char * p,char * end,int c)226 _bprint_c( char*  p, char*  end, int  c )
227 {
228     if (p < end) {
229         if (p+1 == end)
230             *p++ = 0;
231         else {
232             *p++ = (char) c;
233             *p   = 0;
234         }
235     }
236     return p;
237 }
238 
239 /* add a sequence of bytes to a bounded buffer */
240 static char*
_bprint_b(char * p,char * end,const char * buf,int len)241 _bprint_b( char*  p, char*  end, const char*  buf, int  len )
242 {
243     int  avail = end - p;
244 
245     if (avail <= 0 || len <= 0)
246         return p;
247 
248     if (avail > len)
249         avail = len;
250 
251     memcpy( p, buf, avail );
252     p += avail;
253 
254     if (p < end)
255         p[0] = 0;
256     else
257         end[-1] = 0;
258 
259     return p;
260 }
261 
262 /* add a string to a bounded buffer */
263 static char*
_bprint_s(char * p,char * end,const char * str)264 _bprint_s( char*  p, char*  end, const char*  str )
265 {
266     return _bprint_b(p, end, str, strlen(str));
267 }
268 
269 /* add a formatted string to a bounded buffer */
270 static char*
_bprint(char * p,char * end,const char * format,...)271 _bprint( char*  p, char*  end, const char*  format, ... )
272 {
273     int      avail, n;
274     va_list  args;
275 
276     avail = end - p;
277 
278     if (avail <= 0)
279         return p;
280 
281     va_start(args, format);
282     n = vsnprintf( p, avail, format, args);
283     va_end(args);
284 
285     /* certain C libraries return -1 in case of truncation */
286     if (n < 0 || n > avail)
287         n = avail;
288 
289     p += n;
290     /* certain C libraries do not zero-terminate in case of truncation */
291     if (p == end)
292         p[-1] = 0;
293 
294     return p;
295 }
296 
297 /* add a hex value to a bounded buffer, up to 8 digits */
298 static char*
_bprint_hex(char * p,char * end,unsigned value,int numDigits)299 _bprint_hex( char*  p, char*  end, unsigned  value, int  numDigits )
300 {
301     char   text[sizeof(unsigned)*2];
302     int    nn = 0;
303 
304     while (numDigits-- > 0) {
305         text[nn++] = "0123456789abcdef"[(value >> (numDigits*4)) & 15];
306     }
307     return _bprint_b(p, end, text, nn);
308 }
309 
310 /* add the hexadecimal dump of some memory area to a bounded buffer */
311 static char*
_bprint_hexdump(char * p,char * end,const uint8_t * data,int datalen)312 _bprint_hexdump( char*  p, char*  end, const uint8_t*  data, int  datalen )
313 {
314     int   lineSize = 16;
315 
316     while (datalen > 0) {
317         int  avail = datalen;
318         int  nn;
319 
320         if (avail > lineSize)
321             avail = lineSize;
322 
323         for (nn = 0; nn < avail; nn++) {
324             if (nn > 0)
325                 p = _bprint_c(p, end, ' ');
326             p = _bprint_hex(p, end, data[nn], 2);
327         }
328         for ( ; nn < lineSize; nn++ ) {
329             p = _bprint_s(p, end, "   ");
330         }
331         p = _bprint_s(p, end, "  ");
332 
333         for (nn = 0; nn < avail; nn++) {
334             int  c = data[nn];
335 
336             if (c < 32 || c > 127)
337                 c = '.';
338 
339             p = _bprint_c(p, end, c);
340         }
341         p = _bprint_c(p, end, '\n');
342 
343         data    += avail;
344         datalen -= avail;
345     }
346     return p;
347 }
348 
349 /* dump the content of a query of packet to the log */
350 static void
XLOG_BYTES(const void * base,int len)351 XLOG_BYTES( const void*  base, int  len )
352 {
353     char  buff[1024];
354     char*  p = buff, *end = p + sizeof(buff);
355 
356     p = _bprint_hexdump(p, end, base, len);
357     XLOG("%s",buff);
358 }
359 
360 #else /* !DEBUG */
361 #  define  XLOG(...)        ((void)0)
362 #  define  XLOG_BYTES(a,b)  ((void)0)
363 #endif
364 
365 static time_t
_time_now(void)366 _time_now( void )
367 {
368     struct timeval  tv;
369 
370     gettimeofday( &tv, NULL );
371     return tv.tv_sec;
372 }
373 
374 /* reminder: the general format of a DNS packet is the following:
375  *
376  *    HEADER  (12 bytes)
377  *    QUESTION  (variable)
378  *    ANSWER (variable)
379  *    AUTHORITY (variable)
380  *    ADDITIONNAL (variable)
381  *
382  * the HEADER is made of:
383  *
384  *   ID     : 16 : 16-bit unique query identification field
385  *
386  *   QR     :  1 : set to 0 for queries, and 1 for responses
387  *   Opcode :  4 : set to 0 for queries
388  *   AA     :  1 : set to 0 for queries
389  *   TC     :  1 : truncation flag, will be set to 0 in queries
390  *   RD     :  1 : recursion desired
391  *
392  *   RA     :  1 : recursion available (0 in queries)
393  *   Z      :  3 : three reserved zero bits
394  *   RCODE  :  4 : response code (always 0=NOERROR in queries)
395  *
396  *   QDCount: 16 : question count
397  *   ANCount: 16 : Answer count (0 in queries)
398  *   NSCount: 16: Authority Record count (0 in queries)
399  *   ARCount: 16: Additionnal Record count (0 in queries)
400  *
401  * the QUESTION is made of QDCount Question Record (QRs)
402  * the ANSWER is made of ANCount RRs
403  * the AUTHORITY is made of NSCount RRs
404  * the ADDITIONNAL is made of ARCount RRs
405  *
406  * Each Question Record (QR) is made of:
407  *
408  *   QNAME   : variable : Query DNS NAME
409  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
410  *   CLASS   : 16       : class of query (IN=1)
411  *
412  * Each Resource Record (RR) is made of:
413  *
414  *   NAME    : variable : DNS NAME
415  *   TYPE    : 16       : type of query (A=1, PTR=12, MX=15, AAAA=28, ALL=255)
416  *   CLASS   : 16       : class of query (IN=1)
417  *   TTL     : 32       : seconds to cache this RR (0=none)
418  *   RDLENGTH: 16       : size of RDDATA in bytes
419  *   RDDATA  : variable : RR data (depends on TYPE)
420  *
421  * Each QNAME contains a domain name encoded as a sequence of 'labels'
422  * terminated by a zero. Each label has the following format:
423  *
424  *    LEN  : 8     : lenght of label (MUST be < 64)
425  *    NAME : 8*LEN : label length (must exclude dots)
426  *
427  * A value of 0 in the encoding is interpreted as the 'root' domain and
428  * terminates the encoding. So 'www.android.com' will be encoded as:
429  *
430  *   <3>www<7>android<3>com<0>
431  *
432  * Where <n> represents the byte with value 'n'
433  *
434  * Each NAME reflects the QNAME of the question, but has a slightly more
435  * complex encoding in order to provide message compression. This is achieved
436  * by using a 2-byte pointer, with format:
437  *
438  *    TYPE   : 2  : 0b11 to indicate a pointer, 0b01 and 0b10 are reserved
439  *    OFFSET : 14 : offset to another part of the DNS packet
440  *
441  * The offset is relative to the start of the DNS packet and must point
442  * A pointer terminates the encoding.
443  *
444  * The NAME can be encoded in one of the following formats:
445  *
446  *   - a sequence of simple labels terminated by 0 (like QNAMEs)
447  *   - a single pointer
448  *   - a sequence of simple labels terminated by a pointer
449  *
450  * A pointer shall always point to either a pointer of a sequence of
451  * labels (which can themselves be terminated by either a 0 or a pointer)
452  *
453  * The expanded length of a given domain name should not exceed 255 bytes.
454  *
455  * NOTE: we don't parse the answer packets, so don't need to deal with NAME
456  *       records, only QNAMEs.
457  */
458 
459 #define  DNS_HEADER_SIZE  12
460 
461 #define  DNS_TYPE_A   "\00\01"   /* big-endian decimal 1 */
462 #define  DNS_TYPE_PTR "\00\014"  /* big-endian decimal 12 */
463 #define  DNS_TYPE_MX  "\00\017"  /* big-endian decimal 15 */
464 #define  DNS_TYPE_AAAA "\00\034" /* big-endian decimal 28 */
465 #define  DNS_TYPE_ALL "\00\0377" /* big-endian decimal 255 */
466 
467 #define  DNS_CLASS_IN "\00\01"   /* big-endian decimal 1 */
468 
469 typedef struct {
470     const uint8_t*  base;
471     const uint8_t*  end;
472     const uint8_t*  cursor;
473 } DnsPacket;
474 
475 static void
_dnsPacket_init(DnsPacket * packet,const uint8_t * buff,int bufflen)476 _dnsPacket_init( DnsPacket*  packet, const uint8_t*  buff, int  bufflen )
477 {
478     packet->base   = buff;
479     packet->end    = buff + bufflen;
480     packet->cursor = buff;
481 }
482 
483 static void
_dnsPacket_rewind(DnsPacket * packet)484 _dnsPacket_rewind( DnsPacket*  packet )
485 {
486     packet->cursor = packet->base;
487 }
488 
489 static void
_dnsPacket_skip(DnsPacket * packet,int count)490 _dnsPacket_skip( DnsPacket*  packet, int  count )
491 {
492     const uint8_t*  p = packet->cursor + count;
493 
494     if (p > packet->end)
495         p = packet->end;
496 
497     packet->cursor = p;
498 }
499 
500 static int
_dnsPacket_readInt16(DnsPacket * packet)501 _dnsPacket_readInt16( DnsPacket*  packet )
502 {
503     const uint8_t*  p = packet->cursor;
504 
505     if (p+2 > packet->end)
506         return -1;
507 
508     packet->cursor = p+2;
509     return (p[0]<< 8) | p[1];
510 }
511 
512 /** QUERY CHECKING
513  **/
514 
515 /* check bytes in a dns packet. returns 1 on success, 0 on failure.
516  * the cursor is only advanced in the case of success
517  */
518 static int
_dnsPacket_checkBytes(DnsPacket * packet,int numBytes,const void * bytes)519 _dnsPacket_checkBytes( DnsPacket*  packet, int  numBytes, const void*  bytes )
520 {
521     const uint8_t*  p = packet->cursor;
522 
523     if (p + numBytes > packet->end)
524         return 0;
525 
526     if (memcmp(p, bytes, numBytes) != 0)
527         return 0;
528 
529     packet->cursor = p + numBytes;
530     return 1;
531 }
532 
533 /* parse and skip a given QNAME stored in a query packet,
534  * from the current cursor position. returns 1 on success,
535  * or 0 for malformed data.
536  */
537 static int
_dnsPacket_checkQName(DnsPacket * packet)538 _dnsPacket_checkQName( DnsPacket*  packet )
539 {
540     const uint8_t*  p   = packet->cursor;
541     const uint8_t*  end = packet->end;
542 
543     for (;;) {
544         int  c;
545 
546         if (p >= end)
547             break;
548 
549         c = *p++;
550 
551         if (c == 0) {
552             packet->cursor = p;
553             return 1;
554         }
555 
556         /* we don't expect label compression in QNAMEs */
557         if (c >= 64)
558             break;
559 
560         p += c;
561         /* we rely on the bound check at the start
562          * of the loop here */
563     }
564     /* malformed data */
565     XLOG("malformed QNAME");
566     return 0;
567 }
568 
569 /* parse and skip a given QR stored in a packet.
570  * returns 1 on success, and 0 on failure
571  */
572 static int
_dnsPacket_checkQR(DnsPacket * packet)573 _dnsPacket_checkQR( DnsPacket*  packet )
574 {
575     if (!_dnsPacket_checkQName(packet))
576         return 0;
577 
578     /* TYPE must be one of the things we support */
579     if (!_dnsPacket_checkBytes(packet, 2, DNS_TYPE_A) &&
580         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_PTR) &&
581         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_MX) &&
582         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_AAAA) &&
583         !_dnsPacket_checkBytes(packet, 2, DNS_TYPE_ALL))
584     {
585         XLOG("unsupported TYPE");
586         return 0;
587     }
588     /* CLASS must be IN */
589     if (!_dnsPacket_checkBytes(packet, 2, DNS_CLASS_IN)) {
590         XLOG("unsupported CLASS");
591         return 0;
592     }
593 
594     return 1;
595 }
596 
597 /* check the header of a DNS Query packet, return 1 if it is one
598  * type of query we can cache, or 0 otherwise
599  */
600 static int
_dnsPacket_checkQuery(DnsPacket * packet)601 _dnsPacket_checkQuery( DnsPacket*  packet )
602 {
603     const uint8_t*  p = packet->base;
604     int             qdCount, anCount, dnCount, arCount;
605 
606     if (p + DNS_HEADER_SIZE > packet->end) {
607         XLOG("query packet too small");
608         return 0;
609     }
610 
611     /* QR must be set to 0, opcode must be 0 and AA must be 0 */
612     /* RA, Z, and RCODE must be 0 */
613     if ((p[2] & 0xFC) != 0 || p[3] != 0) {
614         XLOG("query packet flags unsupported");
615         return 0;
616     }
617 
618     /* Note that we ignore the TC and RD bits here for the
619      * following reasons:
620      *
621      * - there is no point for a query packet sent to a server
622      *   to have the TC bit set, but the implementation might
623      *   set the bit in the query buffer for its own needs
624      *   between a _resolv_cache_lookup and a
625      *   _resolv_cache_add. We should not freak out if this
626      *   is the case.
627      *
628      * - we consider that the result from a RD=0 or a RD=1
629      *   query might be different, hence that the RD bit
630      *   should be used to differentiate cached result.
631      *
632      *   this implies that RD is checked when hashing or
633      *   comparing query packets, but not TC
634      */
635 
636     /* ANCOUNT, DNCOUNT and ARCOUNT must be 0 */
637     qdCount = (p[4] << 8) | p[5];
638     anCount = (p[6] << 8) | p[7];
639     dnCount = (p[8] << 8) | p[9];
640     arCount = (p[10]<< 8) | p[11];
641 
642     if (anCount != 0 || dnCount != 0 || arCount != 0) {
643         XLOG("query packet contains non-query records");
644         return 0;
645     }
646 
647     if (qdCount == 0) {
648         XLOG("query packet doesn't contain query record");
649         return 0;
650     }
651 
652     /* Check QDCOUNT QRs */
653     packet->cursor = p + DNS_HEADER_SIZE;
654 
655     for (;qdCount > 0; qdCount--)
656         if (!_dnsPacket_checkQR(packet))
657             return 0;
658 
659     return 1;
660 }
661 
662 /** QUERY DEBUGGING
663  **/
664 #if DEBUG
665 static char*
_dnsPacket_bprintQName(DnsPacket * packet,char * bp,char * bend)666 _dnsPacket_bprintQName(DnsPacket*  packet, char*  bp, char*  bend)
667 {
668     const uint8_t*  p   = packet->cursor;
669     const uint8_t*  end = packet->end;
670     int             first = 1;
671 
672     for (;;) {
673         int  c;
674 
675         if (p >= end)
676             break;
677 
678         c = *p++;
679 
680         if (c == 0) {
681             packet->cursor = p;
682             return bp;
683         }
684 
685         /* we don't expect label compression in QNAMEs */
686         if (c >= 64)
687             break;
688 
689         if (first)
690             first = 0;
691         else
692             bp = _bprint_c(bp, bend, '.');
693 
694         bp = _bprint_b(bp, bend, (const char*)p, c);
695 
696         p += c;
697         /* we rely on the bound check at the start
698          * of the loop here */
699     }
700     /* malformed data */
701     bp = _bprint_s(bp, bend, "<MALFORMED>");
702     return bp;
703 }
704 
705 static char*
_dnsPacket_bprintQR(DnsPacket * packet,char * p,char * end)706 _dnsPacket_bprintQR(DnsPacket*  packet, char*  p, char*  end)
707 {
708 #define  QQ(x)   { DNS_TYPE_##x, #x }
709     static const struct {
710         const char*  typeBytes;
711         const char*  typeString;
712     } qTypes[] =
713     {
714         QQ(A), QQ(PTR), QQ(MX), QQ(AAAA), QQ(ALL),
715         { NULL, NULL }
716     };
717     int          nn;
718     const char*  typeString = NULL;
719 
720     /* dump QNAME */
721     p = _dnsPacket_bprintQName(packet, p, end);
722 
723     /* dump TYPE */
724     p = _bprint_s(p, end, " (");
725 
726     for (nn = 0; qTypes[nn].typeBytes != NULL; nn++) {
727         if (_dnsPacket_checkBytes(packet, 2, qTypes[nn].typeBytes)) {
728             typeString = qTypes[nn].typeString;
729             break;
730         }
731     }
732 
733     if (typeString != NULL)
734         p = _bprint_s(p, end, typeString);
735     else {
736         int  typeCode = _dnsPacket_readInt16(packet);
737         p = _bprint(p, end, "UNKNOWN-%d", typeCode);
738     }
739 
740     p = _bprint_c(p, end, ')');
741 
742     /* skip CLASS */
743     _dnsPacket_skip(packet, 2);
744     return p;
745 }
746 
747 /* this function assumes the packet has already been checked */
748 static char*
_dnsPacket_bprintQuery(DnsPacket * packet,char * p,char * end)749 _dnsPacket_bprintQuery( DnsPacket*  packet, char*  p, char*  end )
750 {
751     int   qdCount;
752 
753     if (packet->base[2] & 0x1) {
754         p = _bprint_s(p, end, "RECURSIVE ");
755     }
756 
757     _dnsPacket_skip(packet, 4);
758     qdCount = _dnsPacket_readInt16(packet);
759     _dnsPacket_skip(packet, 6);
760 
761     for ( ; qdCount > 0; qdCount-- ) {
762         p = _dnsPacket_bprintQR(packet, p, end);
763     }
764     return p;
765 }
766 #endif
767 
768 
769 /** QUERY HASHING SUPPORT
770  **
771  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKET HAS ALREADY
772  ** BEEN SUCCESFULLY CHECKED.
773  **/
774 
775 /* use 32-bit FNV hash function */
776 #define  FNV_MULT   16777619U
777 #define  FNV_BASIS  2166136261U
778 
779 static unsigned
_dnsPacket_hashBytes(DnsPacket * packet,int numBytes,unsigned hash)780 _dnsPacket_hashBytes( DnsPacket*  packet, int  numBytes, unsigned  hash )
781 {
782     const uint8_t*  p   = packet->cursor;
783     const uint8_t*  end = packet->end;
784 
785     while (numBytes > 0 && p < end) {
786         hash = hash*FNV_MULT ^ *p++;
787     }
788     packet->cursor = p;
789     return hash;
790 }
791 
792 
793 static unsigned
_dnsPacket_hashQName(DnsPacket * packet,unsigned hash)794 _dnsPacket_hashQName( DnsPacket*  packet, unsigned  hash )
795 {
796     const uint8_t*  p   = packet->cursor;
797     const uint8_t*  end = packet->end;
798 
799     for (;;) {
800         int  c;
801 
802         if (p >= end) {  /* should not happen */
803             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
804             break;
805         }
806 
807         c = *p++;
808 
809         if (c == 0)
810             break;
811 
812         if (c >= 64) {
813             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
814             break;
815         }
816         if (p + c >= end) {
817             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
818                     __FUNCTION__);
819             break;
820         }
821         while (c > 0) {
822             hash = hash*FNV_MULT ^ *p++;
823             c   -= 1;
824         }
825     }
826     packet->cursor = p;
827     return hash;
828 }
829 
830 static unsigned
_dnsPacket_hashQR(DnsPacket * packet,unsigned hash)831 _dnsPacket_hashQR( DnsPacket*  packet, unsigned  hash )
832 {
833     hash = _dnsPacket_hashQName(packet, hash);
834     hash = _dnsPacket_hashBytes(packet, 4, hash); /* TYPE and CLASS */
835     return hash;
836 }
837 
838 static unsigned
_dnsPacket_hashQuery(DnsPacket * packet)839 _dnsPacket_hashQuery( DnsPacket*  packet )
840 {
841     unsigned  hash = FNV_BASIS;
842     int       count;
843     _dnsPacket_rewind(packet);
844 
845     /* we ignore the TC bit for reasons explained in
846      * _dnsPacket_checkQuery().
847      *
848      * however we hash the RD bit to differentiate
849      * between answers for recursive and non-recursive
850      * queries.
851      */
852     hash = hash*FNV_MULT ^ (packet->base[2] & 1);
853 
854     /* assume: other flags are 0 */
855     _dnsPacket_skip(packet, 4);
856 
857     /* read QDCOUNT */
858     count = _dnsPacket_readInt16(packet);
859 
860     /* assume: ANcount, NScount, ARcount are 0 */
861     _dnsPacket_skip(packet, 6);
862 
863     /* hash QDCOUNT QRs */
864     for ( ; count > 0; count-- )
865         hash = _dnsPacket_hashQR(packet, hash);
866 
867     return hash;
868 }
869 
870 
871 /** QUERY COMPARISON
872  **
873  ** THE FOLLOWING CODE ASSUMES THAT THE INPUT PACKETS HAVE ALREADY
874  ** BEEN SUCCESFULLY CHECKED.
875  **/
876 
877 static int
_dnsPacket_isEqualDomainName(DnsPacket * pack1,DnsPacket * pack2)878 _dnsPacket_isEqualDomainName( DnsPacket*  pack1, DnsPacket*  pack2 )
879 {
880     const uint8_t*  p1   = pack1->cursor;
881     const uint8_t*  end1 = pack1->end;
882     const uint8_t*  p2   = pack2->cursor;
883     const uint8_t*  end2 = pack2->end;
884 
885     for (;;) {
886         int  c1, c2;
887 
888         if (p1 >= end1 || p2 >= end2) {
889             XLOG("%s: INTERNAL_ERROR: read-overflow !!\n", __FUNCTION__);
890             break;
891         }
892         c1 = *p1++;
893         c2 = *p2++;
894         if (c1 != c2)
895             break;
896 
897         if (c1 == 0) {
898             pack1->cursor = p1;
899             pack2->cursor = p2;
900             return 1;
901         }
902         if (c1 >= 64) {
903             XLOG("%s: INTERNAL_ERROR: malformed domain !!\n", __FUNCTION__);
904             break;
905         }
906         if ((p1+c1 > end1) || (p2+c1 > end2)) {
907             XLOG("%s: INTERNAL_ERROR: simple label read-overflow !!\n",
908                     __FUNCTION__);
909             break;
910         }
911         if (memcmp(p1, p2, c1) != 0)
912             break;
913         p1 += c1;
914         p2 += c1;
915         /* we rely on the bound checks at the start of the loop */
916     }
917     /* not the same, or one is malformed */
918     XLOG("different DN");
919     return 0;
920 }
921 
922 static int
_dnsPacket_isEqualBytes(DnsPacket * pack1,DnsPacket * pack2,int numBytes)923 _dnsPacket_isEqualBytes( DnsPacket*  pack1, DnsPacket*  pack2, int  numBytes )
924 {
925     const uint8_t*  p1 = pack1->cursor;
926     const uint8_t*  p2 = pack2->cursor;
927 
928     if ( p1 + numBytes > pack1->end || p2 + numBytes > pack2->end )
929         return 0;
930 
931     if ( memcmp(p1, p2, numBytes) != 0 )
932         return 0;
933 
934     pack1->cursor += numBytes;
935     pack2->cursor += numBytes;
936     return 1;
937 }
938 
939 static int
_dnsPacket_isEqualQR(DnsPacket * pack1,DnsPacket * pack2)940 _dnsPacket_isEqualQR( DnsPacket*  pack1, DnsPacket*  pack2 )
941 {
942     /* compare domain name encoding + TYPE + CLASS */
943     if ( !_dnsPacket_isEqualDomainName(pack1, pack2) ||
944          !_dnsPacket_isEqualBytes(pack1, pack2, 2+2) )
945         return 0;
946 
947     return 1;
948 }
949 
950 static int
_dnsPacket_isEqualQuery(DnsPacket * pack1,DnsPacket * pack2)951 _dnsPacket_isEqualQuery( DnsPacket*  pack1, DnsPacket*  pack2 )
952 {
953     int  count1, count2;
954 
955     /* compare the headers, ignore most fields */
956     _dnsPacket_rewind(pack1);
957     _dnsPacket_rewind(pack2);
958 
959     /* compare RD, ignore TC, see comment in _dnsPacket_checkQuery */
960     if ((pack1->base[2] & 1) != (pack2->base[2] & 1)) {
961         XLOG("different RD");
962         return 0;
963     }
964 
965     /* assume: other flags are all 0 */
966     _dnsPacket_skip(pack1, 4);
967     _dnsPacket_skip(pack2, 4);
968 
969     /* compare QDCOUNT */
970     count1 = _dnsPacket_readInt16(pack1);
971     count2 = _dnsPacket_readInt16(pack2);
972     if (count1 != count2 || count1 < 0) {
973         XLOG("different QDCOUNT");
974         return 0;
975     }
976 
977     /* assume: ANcount, NScount and ARcount are all 0 */
978     _dnsPacket_skip(pack1, 6);
979     _dnsPacket_skip(pack2, 6);
980 
981     /* compare the QDCOUNT QRs */
982     for ( ; count1 > 0; count1-- ) {
983         if (!_dnsPacket_isEqualQR(pack1, pack2)) {
984             XLOG("different QR");
985             return 0;
986         }
987     }
988     return 1;
989 }
990 
991 /****************************************************************************/
992 /****************************************************************************/
993 /*****                                                                  *****/
994 /*****                                                                  *****/
995 /*****                                                                  *****/
996 /****************************************************************************/
997 /****************************************************************************/
998 
999 /* cache entry. for simplicity, 'hash' and 'hlink' are inlined in this
1000  * structure though they are conceptually part of the hash table.
1001  *
1002  * similarly, mru_next and mru_prev are part of the global MRU list
1003  */
1004 typedef struct Entry {
1005     unsigned int     hash;   /* hash value */
1006     struct Entry*    hlink;  /* next in collision chain */
1007     struct Entry*    mru_prev;
1008     struct Entry*    mru_next;
1009 
1010     const uint8_t*   query;
1011     int              querylen;
1012     const uint8_t*   answer;
1013     int              answerlen;
1014     time_t           expires;   /* time_t when the entry isn't valid any more */
1015     int              id;        /* for debugging purpose */
1016 } Entry;
1017 
1018 /**
1019  * Find the TTL for a negative DNS result.  This is defined as the minimum
1020  * of the SOA records TTL and the MINIMUM-TTL field (RFC-2308).
1021  *
1022  * Return 0 if not found.
1023  */
1024 static u_long
answer_getNegativeTTL(ns_msg handle)1025 answer_getNegativeTTL(ns_msg handle) {
1026     int n, nscount;
1027     u_long result = 0;
1028     ns_rr rr;
1029 
1030     nscount = ns_msg_count(handle, ns_s_ns);
1031     for (n = 0; n < nscount; n++) {
1032         if ((ns_parserr(&handle, ns_s_ns, n, &rr) == 0) && (ns_rr_type(rr) == ns_t_soa)) {
1033             const u_char *rdata = ns_rr_rdata(rr); // find the data
1034             const u_char *edata = rdata + ns_rr_rdlen(rr); // add the len to find the end
1035             int len;
1036             u_long ttl, rec_result = ns_rr_ttl(rr);
1037 
1038             // find the MINIMUM-TTL field from the blob of binary data for this record
1039             // skip the server name
1040             len = dn_skipname(rdata, edata);
1041             if (len == -1) continue; // error skipping
1042             rdata += len;
1043 
1044             // skip the admin name
1045             len = dn_skipname(rdata, edata);
1046             if (len == -1) continue; // error skipping
1047             rdata += len;
1048 
1049             if (edata - rdata != 5*NS_INT32SZ) continue;
1050             // skip: serial number + refresh interval + retry interval + expiry
1051             rdata += NS_INT32SZ * 4;
1052             // finally read the MINIMUM TTL
1053             ttl = ns_get32(rdata);
1054             if (ttl < rec_result) {
1055                 rec_result = ttl;
1056             }
1057             // Now that the record is read successfully, apply the new min TTL
1058             if (n == 0 || rec_result < result) {
1059                 result = rec_result;
1060             }
1061         }
1062     }
1063     return result;
1064 }
1065 
1066 /**
1067  * Parse the answer records and find the appropriate
1068  * smallest TTL among the records.  This might be from
1069  * the answer records if found or from the SOA record
1070  * if it's a negative result.
1071  *
1072  * The returned TTL is the number of seconds to
1073  * keep the answer in the cache.
1074  *
1075  * In case of parse error zero (0) is returned which
1076  * indicates that the answer shall not be cached.
1077  */
1078 static u_long
answer_getTTL(const void * answer,int answerlen)1079 answer_getTTL(const void* answer, int answerlen)
1080 {
1081     ns_msg handle;
1082     int ancount, n;
1083     u_long result, ttl;
1084     ns_rr rr;
1085 
1086     result = 0;
1087     if (ns_initparse(answer, answerlen, &handle) >= 0) {
1088         // get number of answer records
1089         ancount = ns_msg_count(handle, ns_s_an);
1090 
1091         if (ancount == 0) {
1092             // a response with no answers?  Cache this negative result.
1093             result = answer_getNegativeTTL(handle);
1094         } else {
1095             for (n = 0; n < ancount; n++) {
1096                 if (ns_parserr(&handle, ns_s_an, n, &rr) == 0) {
1097                     ttl = ns_rr_ttl(rr);
1098                     if (n == 0 || ttl < result) {
1099                         result = ttl;
1100                     }
1101                 } else {
1102                     XLOG("ns_parserr failed ancount no = %d. errno = %s\n", n, strerror(errno));
1103                 }
1104             }
1105         }
1106     } else {
1107         XLOG("ns_parserr failed. %s\n", strerror(errno));
1108     }
1109 
1110     XLOG("TTL = %d\n", result);
1111 
1112     return result;
1113 }
1114 
1115 static void
entry_free(Entry * e)1116 entry_free( Entry*  e )
1117 {
1118     /* everything is allocated in a single memory block */
1119     if (e) {
1120         free(e);
1121     }
1122 }
1123 
1124 static __inline__ void
entry_mru_remove(Entry * e)1125 entry_mru_remove( Entry*  e )
1126 {
1127     e->mru_prev->mru_next = e->mru_next;
1128     e->mru_next->mru_prev = e->mru_prev;
1129 }
1130 
1131 static __inline__ void
entry_mru_add(Entry * e,Entry * list)1132 entry_mru_add( Entry*  e, Entry*  list )
1133 {
1134     Entry*  first = list->mru_next;
1135 
1136     e->mru_next = first;
1137     e->mru_prev = list;
1138 
1139     list->mru_next  = e;
1140     first->mru_prev = e;
1141 }
1142 
1143 /* compute the hash of a given entry, this is a hash of most
1144  * data in the query (key) */
1145 static unsigned
entry_hash(const Entry * e)1146 entry_hash( const Entry*  e )
1147 {
1148     DnsPacket  pack[1];
1149 
1150     _dnsPacket_init(pack, e->query, e->querylen);
1151     return _dnsPacket_hashQuery(pack);
1152 }
1153 
1154 /* initialize an Entry as a search key, this also checks the input query packet
1155  * returns 1 on success, or 0 in case of unsupported/malformed data */
1156 static int
entry_init_key(Entry * e,const void * query,int querylen)1157 entry_init_key( Entry*  e, const void*  query, int  querylen )
1158 {
1159     DnsPacket  pack[1];
1160 
1161     memset(e, 0, sizeof(*e));
1162 
1163     e->query    = query;
1164     e->querylen = querylen;
1165     e->hash     = entry_hash(e);
1166 
1167     _dnsPacket_init(pack, query, querylen);
1168 
1169     return _dnsPacket_checkQuery(pack);
1170 }
1171 
1172 /* allocate a new entry as a cache node */
1173 static Entry*
entry_alloc(const Entry * init,const void * answer,int answerlen)1174 entry_alloc( const Entry*  init, const void*  answer, int  answerlen )
1175 {
1176     Entry*  e;
1177     int     size;
1178 
1179     size = sizeof(*e) + init->querylen + answerlen;
1180     e    = calloc(size, 1);
1181     if (e == NULL)
1182         return e;
1183 
1184     e->hash     = init->hash;
1185     e->query    = (const uint8_t*)(e+1);
1186     e->querylen = init->querylen;
1187 
1188     memcpy( (char*)e->query, init->query, e->querylen );
1189 
1190     e->answer    = e->query + e->querylen;
1191     e->answerlen = answerlen;
1192 
1193     memcpy( (char*)e->answer, answer, e->answerlen );
1194 
1195     return e;
1196 }
1197 
1198 static int
entry_equals(const Entry * e1,const Entry * e2)1199 entry_equals( const Entry*  e1, const Entry*  e2 )
1200 {
1201     DnsPacket  pack1[1], pack2[1];
1202 
1203     if (e1->querylen != e2->querylen) {
1204         return 0;
1205     }
1206     _dnsPacket_init(pack1, e1->query, e1->querylen);
1207     _dnsPacket_init(pack2, e2->query, e2->querylen);
1208 
1209     return _dnsPacket_isEqualQuery(pack1, pack2);
1210 }
1211 
1212 /****************************************************************************/
1213 /****************************************************************************/
1214 /*****                                                                  *****/
1215 /*****                                                                  *****/
1216 /*****                                                                  *****/
1217 /****************************************************************************/
1218 /****************************************************************************/
1219 
1220 /* We use a simple hash table with external collision lists
1221  * for simplicity, the hash-table fields 'hash' and 'hlink' are
1222  * inlined in the Entry structure.
1223  */
1224 
1225 /* Maximum time for a thread to wait for an pending request */
1226 #define PENDING_REQUEST_TIMEOUT 20;
1227 
1228 typedef struct pending_req_info {
1229     unsigned int                hash;
1230     pthread_cond_t              cond;
1231     struct pending_req_info*    next;
1232 } PendingReqInfo;
1233 
1234 typedef struct resolv_cache {
1235     int              max_entries;
1236     int              num_entries;
1237     Entry            mru_list;
1238     pthread_mutex_t  lock;
1239     unsigned         generation;
1240     int              last_id;
1241     Entry*           entries;
1242     PendingReqInfo   pending_requests;
1243 } Cache;
1244 
1245 typedef struct resolv_cache_info {
1246     char                        ifname[IF_NAMESIZE + 1];
1247     struct in_addr              ifaddr;
1248     Cache*                      cache;
1249     struct resolv_cache_info*   next;
1250     char*                       nameservers[MAXNS +1];
1251     struct addrinfo*            nsaddrinfo[MAXNS + 1];
1252     char                        defdname[256];
1253     int                         dnsrch_offset[MAXDNSRCH+1];  // offsets into defdname
1254 } CacheInfo;
1255 
1256 typedef struct resolv_pidiface_info {
1257     int                             pid;
1258     char                            ifname[IF_NAMESIZE + 1];
1259     struct resolv_pidiface_info*    next;
1260 } PidIfaceInfo;
1261 typedef struct resolv_uidiface_info {
1262     int                             uid_start;
1263     int                             uid_end;
1264     char                            ifname[IF_NAMESIZE + 1];
1265     struct resolv_uidiface_info*    next;
1266 } UidIfaceInfo;
1267 
1268 #define  HTABLE_VALID(x)  ((x) != NULL && (x) != HTABLE_DELETED)
1269 
1270 static void
_cache_flush_pending_requests_locked(struct resolv_cache * cache)1271 _cache_flush_pending_requests_locked( struct resolv_cache* cache )
1272 {
1273     struct pending_req_info *ri, *tmp;
1274     if (cache) {
1275         ri = cache->pending_requests.next;
1276 
1277         while (ri) {
1278             tmp = ri;
1279             ri = ri->next;
1280             pthread_cond_broadcast(&tmp->cond);
1281 
1282             pthread_cond_destroy(&tmp->cond);
1283             free(tmp);
1284         }
1285 
1286         cache->pending_requests.next = NULL;
1287     }
1288 }
1289 
1290 /* return 0 if no pending request is found matching the key
1291  * if a matching request is found the calling thread will wait
1292  * and return 1 when released */
1293 static int
_cache_check_pending_request_locked(struct resolv_cache * cache,Entry * key)1294 _cache_check_pending_request_locked( struct resolv_cache* cache, Entry* key )
1295 {
1296     struct pending_req_info *ri, *prev;
1297     int exist = 0;
1298 
1299     if (cache && key) {
1300         ri = cache->pending_requests.next;
1301         prev = &cache->pending_requests;
1302         while (ri) {
1303             if (ri->hash == key->hash) {
1304                 exist = 1;
1305                 break;
1306             }
1307             prev = ri;
1308             ri = ri->next;
1309         }
1310 
1311         if (!exist) {
1312             ri = calloc(1, sizeof(struct pending_req_info));
1313             if (ri) {
1314                 ri->hash = key->hash;
1315                 pthread_cond_init(&ri->cond, NULL);
1316                 prev->next = ri;
1317             }
1318         } else {
1319             struct timespec ts = {0,0};
1320             XLOG("Waiting for previous request");
1321             ts.tv_sec = _time_now() + PENDING_REQUEST_TIMEOUT;
1322             pthread_cond_timedwait(&ri->cond, &cache->lock, &ts);
1323         }
1324     }
1325 
1326     return exist;
1327 }
1328 
1329 /* notify any waiting thread that waiting on a request
1330  * matching the key has been added to the cache */
1331 static void
_cache_notify_waiting_tid_locked(struct resolv_cache * cache,Entry * key)1332 _cache_notify_waiting_tid_locked( struct resolv_cache* cache, Entry* key )
1333 {
1334     struct pending_req_info *ri, *prev;
1335 
1336     if (cache && key) {
1337         ri = cache->pending_requests.next;
1338         prev = &cache->pending_requests;
1339         while (ri) {
1340             if (ri->hash == key->hash) {
1341                 pthread_cond_broadcast(&ri->cond);
1342                 break;
1343             }
1344             prev = ri;
1345             ri = ri->next;
1346         }
1347 
1348         // remove item from list and destroy
1349         if (ri) {
1350             prev->next = ri->next;
1351             pthread_cond_destroy(&ri->cond);
1352             free(ri);
1353         }
1354     }
1355 }
1356 
1357 /* notify the cache that the query failed */
1358 void
_resolv_cache_query_failed(struct resolv_cache * cache,const void * query,int querylen)1359 _resolv_cache_query_failed( struct resolv_cache* cache,
1360                    const void* query,
1361                    int         querylen)
1362 {
1363     Entry    key[1];
1364 
1365     if (cache && entry_init_key(key, query, querylen)) {
1366         pthread_mutex_lock(&cache->lock);
1367         _cache_notify_waiting_tid_locked(cache, key);
1368         pthread_mutex_unlock(&cache->lock);
1369     }
1370 }
1371 
1372 static void
_cache_flush_locked(Cache * cache)1373 _cache_flush_locked( Cache*  cache )
1374 {
1375     int     nn;
1376 
1377     for (nn = 0; nn < cache->max_entries; nn++)
1378     {
1379         Entry**  pnode = (Entry**) &cache->entries[nn];
1380 
1381         while (*pnode != NULL) {
1382             Entry*  node = *pnode;
1383             *pnode = node->hlink;
1384             entry_free(node);
1385         }
1386     }
1387 
1388     // flush pending request
1389     _cache_flush_pending_requests_locked(cache);
1390 
1391     cache->mru_list.mru_next = cache->mru_list.mru_prev = &cache->mru_list;
1392     cache->num_entries       = 0;
1393     cache->last_id           = 0;
1394 
1395     XLOG("*************************\n"
1396          "*** DNS CACHE FLUSHED ***\n"
1397          "*************************");
1398 }
1399 
1400 /* Return max number of entries allowed in the cache,
1401  * i.e. cache size. The cache size is either defined
1402  * by system property ro.net.dns_cache_size or by
1403  * CONFIG_MAX_ENTRIES if system property not set
1404  * or set to invalid value. */
1405 static int
_res_cache_get_max_entries(void)1406 _res_cache_get_max_entries( void )
1407 {
1408     int result = -1;
1409     char cache_size[PROP_VALUE_MAX];
1410 
1411     const char* cache_mode = getenv("ANDROID_DNS_MODE");
1412 
1413     if (cache_mode == NULL || strcmp(cache_mode, "local") != 0) {
1414         // Don't use the cache in local mode.  This is used by the
1415         // proxy itself.
1416         XLOG("setup cache for non-cache process. size=0, %s", cache_mode);
1417         return 0;
1418     }
1419 
1420     if (__system_property_get(DNS_CACHE_SIZE_PROP_NAME, cache_size) > 0) {
1421         result = atoi(cache_size);
1422     }
1423 
1424     // ro.net.dns_cache_size not set or set to negative value
1425     if (result <= 0) {
1426         result = CONFIG_MAX_ENTRIES;
1427     }
1428 
1429     XLOG("cache size: %d", result);
1430     return result;
1431 }
1432 
1433 static struct resolv_cache*
_resolv_cache_create(void)1434 _resolv_cache_create( void )
1435 {
1436     struct resolv_cache*  cache;
1437 
1438     cache = calloc(sizeof(*cache), 1);
1439     if (cache) {
1440         cache->max_entries = _res_cache_get_max_entries();
1441         cache->entries = calloc(sizeof(*cache->entries), cache->max_entries);
1442         if (cache->entries) {
1443             cache->generation = ~0U;
1444             pthread_mutex_init( &cache->lock, NULL );
1445             cache->mru_list.mru_prev = cache->mru_list.mru_next = &cache->mru_list;
1446             XLOG("%s: cache created\n", __FUNCTION__);
1447         } else {
1448             free(cache);
1449             cache = NULL;
1450         }
1451     }
1452     return cache;
1453 }
1454 
1455 
1456 #if DEBUG
1457 static void
_dump_query(const uint8_t * query,int querylen)1458 _dump_query( const uint8_t*  query, int  querylen )
1459 {
1460     char       temp[256], *p=temp, *end=p+sizeof(temp);
1461     DnsPacket  pack[1];
1462 
1463     _dnsPacket_init(pack, query, querylen);
1464     p = _dnsPacket_bprintQuery(pack, p, end);
1465     XLOG("QUERY: %s", temp);
1466 }
1467 
1468 static void
_cache_dump_mru(Cache * cache)1469 _cache_dump_mru( Cache*  cache )
1470 {
1471     char    temp[512], *p=temp, *end=p+sizeof(temp);
1472     Entry*  e;
1473 
1474     p = _bprint(temp, end, "MRU LIST (%2d): ", cache->num_entries);
1475     for (e = cache->mru_list.mru_next; e != &cache->mru_list; e = e->mru_next)
1476         p = _bprint(p, end, " %d", e->id);
1477 
1478     XLOG("%s", temp);
1479 }
1480 
1481 static void
_dump_answer(const void * answer,int answerlen)1482 _dump_answer(const void* answer, int answerlen)
1483 {
1484     res_state statep;
1485     FILE* fp;
1486     char* buf;
1487     int fileLen;
1488 
1489     fp = fopen("/data/reslog.txt", "w+");
1490     if (fp != NULL) {
1491         statep = __res_get_state();
1492 
1493         res_pquery(statep, answer, answerlen, fp);
1494 
1495         //Get file length
1496         fseek(fp, 0, SEEK_END);
1497         fileLen=ftell(fp);
1498         fseek(fp, 0, SEEK_SET);
1499         buf = (char *)malloc(fileLen+1);
1500         if (buf != NULL) {
1501             //Read file contents into buffer
1502             fread(buf, fileLen, 1, fp);
1503             XLOG("%s\n", buf);
1504             free(buf);
1505         }
1506         fclose(fp);
1507         remove("/data/reslog.txt");
1508     }
1509     else {
1510         errno = 0; // else debug is introducing error signals
1511         XLOG("_dump_answer: can't open file\n");
1512     }
1513 }
1514 #endif
1515 
1516 #if DEBUG
1517 #  define  XLOG_QUERY(q,len)   _dump_query((q), (len))
1518 #  define  XLOG_ANSWER(a, len) _dump_answer((a), (len))
1519 #else
1520 #  define  XLOG_QUERY(q,len)   ((void)0)
1521 #  define  XLOG_ANSWER(a,len)  ((void)0)
1522 #endif
1523 
1524 /* This function tries to find a key within the hash table
1525  * In case of success, it will return a *pointer* to the hashed key.
1526  * In case of failure, it will return a *pointer* to NULL
1527  *
1528  * So, the caller must check '*result' to check for success/failure.
1529  *
1530  * The main idea is that the result can later be used directly in
1531  * calls to _resolv_cache_add or _resolv_cache_remove as the 'lookup'
1532  * parameter. This makes the code simpler and avoids re-searching
1533  * for the key position in the htable.
1534  *
1535  * The result of a lookup_p is only valid until you alter the hash
1536  * table.
1537  */
1538 static Entry**
_cache_lookup_p(Cache * cache,Entry * key)1539 _cache_lookup_p( Cache*   cache,
1540                  Entry*   key )
1541 {
1542     int      index = key->hash % cache->max_entries;
1543     Entry**  pnode = (Entry**) &cache->entries[ index ];
1544 
1545     while (*pnode != NULL) {
1546         Entry*  node = *pnode;
1547 
1548         if (node == NULL)
1549             break;
1550 
1551         if (node->hash == key->hash && entry_equals(node, key))
1552             break;
1553 
1554         pnode = &node->hlink;
1555     }
1556     return pnode;
1557 }
1558 
1559 /* Add a new entry to the hash table. 'lookup' must be the
1560  * result of an immediate previous failed _lookup_p() call
1561  * (i.e. with *lookup == NULL), and 'e' is the pointer to the
1562  * newly created entry
1563  */
1564 static void
_cache_add_p(Cache * cache,Entry ** lookup,Entry * e)1565 _cache_add_p( Cache*   cache,
1566               Entry**  lookup,
1567               Entry*   e )
1568 {
1569     *lookup = e;
1570     e->id = ++cache->last_id;
1571     entry_mru_add(e, &cache->mru_list);
1572     cache->num_entries += 1;
1573 
1574     XLOG("%s: entry %d added (count=%d)", __FUNCTION__,
1575          e->id, cache->num_entries);
1576 }
1577 
1578 /* Remove an existing entry from the hash table,
1579  * 'lookup' must be the result of an immediate previous
1580  * and succesful _lookup_p() call.
1581  */
1582 static void
_cache_remove_p(Cache * cache,Entry ** lookup)1583 _cache_remove_p( Cache*   cache,
1584                  Entry**  lookup )
1585 {
1586     Entry*  e  = *lookup;
1587 
1588     XLOG("%s: entry %d removed (count=%d)", __FUNCTION__,
1589          e->id, cache->num_entries-1);
1590 
1591     entry_mru_remove(e);
1592     *lookup = e->hlink;
1593     entry_free(e);
1594     cache->num_entries -= 1;
1595 }
1596 
1597 /* Remove the oldest entry from the hash table.
1598  */
1599 static void
_cache_remove_oldest(Cache * cache)1600 _cache_remove_oldest( Cache*  cache )
1601 {
1602     Entry*   oldest = cache->mru_list.mru_prev;
1603     Entry**  lookup = _cache_lookup_p(cache, oldest);
1604 
1605     if (*lookup == NULL) { /* should not happen */
1606         XLOG("%s: OLDEST NOT IN HTABLE ?", __FUNCTION__);
1607         return;
1608     }
1609     if (DEBUG) {
1610         XLOG("Cache full - removing oldest");
1611         XLOG_QUERY(oldest->query, oldest->querylen);
1612     }
1613     _cache_remove_p(cache, lookup);
1614 }
1615 
1616 /* Remove all expired entries from the hash table.
1617  */
_cache_remove_expired(Cache * cache)1618 static void _cache_remove_expired(Cache* cache) {
1619     Entry* e;
1620     time_t now = _time_now();
1621 
1622     for (e = cache->mru_list.mru_next; e != &cache->mru_list;) {
1623         // Entry is old, remove
1624         if (now >= e->expires) {
1625             Entry** lookup = _cache_lookup_p(cache, e);
1626             if (*lookup == NULL) { /* should not happen */
1627                 XLOG("%s: ENTRY NOT IN HTABLE ?", __FUNCTION__);
1628                 return;
1629             }
1630             e = e->mru_next;
1631             _cache_remove_p(cache, lookup);
1632         } else {
1633             e = e->mru_next;
1634         }
1635     }
1636 }
1637 
1638 ResolvCacheStatus
_resolv_cache_lookup(struct resolv_cache * cache,const void * query,int querylen,void * answer,int answersize,int * answerlen)1639 _resolv_cache_lookup( struct resolv_cache*  cache,
1640                       const void*           query,
1641                       int                   querylen,
1642                       void*                 answer,
1643                       int                   answersize,
1644                       int                  *answerlen )
1645 {
1646     Entry      key[1];
1647     Entry**    lookup;
1648     Entry*     e;
1649     time_t     now;
1650 
1651     ResolvCacheStatus  result = RESOLV_CACHE_NOTFOUND;
1652 
1653     XLOG("%s: lookup", __FUNCTION__);
1654     XLOG_QUERY(query, querylen);
1655 
1656     /* we don't cache malformed queries */
1657     if (!entry_init_key(key, query, querylen)) {
1658         XLOG("%s: unsupported query", __FUNCTION__);
1659         return RESOLV_CACHE_UNSUPPORTED;
1660     }
1661     /* lookup cache */
1662     pthread_mutex_lock( &cache->lock );
1663 
1664     /* see the description of _lookup_p to understand this.
1665      * the function always return a non-NULL pointer.
1666      */
1667     lookup = _cache_lookup_p(cache, key);
1668     e      = *lookup;
1669 
1670     if (e == NULL) {
1671         XLOG( "NOT IN CACHE");
1672         // calling thread will wait if an outstanding request is found
1673         // that matching this query
1674         if (!_cache_check_pending_request_locked(cache, key)) {
1675             goto Exit;
1676         } else {
1677             lookup = _cache_lookup_p(cache, key);
1678             e = *lookup;
1679             if (e == NULL) {
1680                 goto Exit;
1681             }
1682         }
1683     }
1684 
1685     now = _time_now();
1686 
1687     /* remove stale entries here */
1688     if (now >= e->expires) {
1689         XLOG( " NOT IN CACHE (STALE ENTRY %p DISCARDED)", *lookup );
1690         XLOG_QUERY(e->query, e->querylen);
1691         _cache_remove_p(cache, lookup);
1692         goto Exit;
1693     }
1694 
1695     *answerlen = e->answerlen;
1696     if (e->answerlen > answersize) {
1697         /* NOTE: we return UNSUPPORTED if the answer buffer is too short */
1698         result = RESOLV_CACHE_UNSUPPORTED;
1699         XLOG(" ANSWER TOO LONG");
1700         goto Exit;
1701     }
1702 
1703     memcpy( answer, e->answer, e->answerlen );
1704 
1705     /* bump up this entry to the top of the MRU list */
1706     if (e != cache->mru_list.mru_next) {
1707         entry_mru_remove( e );
1708         entry_mru_add( e, &cache->mru_list );
1709     }
1710 
1711     XLOG( "FOUND IN CACHE entry=%p", e );
1712     result = RESOLV_CACHE_FOUND;
1713 
1714 Exit:
1715     pthread_mutex_unlock( &cache->lock );
1716     return result;
1717 }
1718 
1719 
1720 void
_resolv_cache_add(struct resolv_cache * cache,const void * query,int querylen,const void * answer,int answerlen)1721 _resolv_cache_add( struct resolv_cache*  cache,
1722                    const void*           query,
1723                    int                   querylen,
1724                    const void*           answer,
1725                    int                   answerlen )
1726 {
1727     Entry    key[1];
1728     Entry*   e;
1729     Entry**  lookup;
1730     u_long   ttl;
1731 
1732     /* don't assume that the query has already been cached
1733      */
1734     if (!entry_init_key( key, query, querylen )) {
1735         XLOG( "%s: passed invalid query ?", __FUNCTION__);
1736         return;
1737     }
1738 
1739     pthread_mutex_lock( &cache->lock );
1740 
1741     XLOG( "%s: query:", __FUNCTION__ );
1742     XLOG_QUERY(query,querylen);
1743     XLOG_ANSWER(answer, answerlen);
1744 #if DEBUG_DATA
1745     XLOG( "answer:");
1746     XLOG_BYTES(answer,answerlen);
1747 #endif
1748 
1749     lookup = _cache_lookup_p(cache, key);
1750     e      = *lookup;
1751 
1752     if (e != NULL) { /* should not happen */
1753         XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1754              __FUNCTION__, e);
1755         goto Exit;
1756     }
1757 
1758     if (cache->num_entries >= cache->max_entries) {
1759         _cache_remove_expired(cache);
1760         if (cache->num_entries >= cache->max_entries) {
1761             _cache_remove_oldest(cache);
1762         }
1763         /* need to lookup again */
1764         lookup = _cache_lookup_p(cache, key);
1765         e      = *lookup;
1766         if (e != NULL) {
1767             XLOG("%s: ALREADY IN CACHE (%p) ? IGNORING ADD",
1768                 __FUNCTION__, e);
1769             goto Exit;
1770         }
1771     }
1772 
1773     ttl = answer_getTTL(answer, answerlen);
1774     if (ttl > 0) {
1775         e = entry_alloc(key, answer, answerlen);
1776         if (e != NULL) {
1777             e->expires = ttl + _time_now();
1778             _cache_add_p(cache, lookup, e);
1779         }
1780     }
1781 #if DEBUG
1782     _cache_dump_mru(cache);
1783 #endif
1784 Exit:
1785     _cache_notify_waiting_tid_locked(cache, key);
1786     pthread_mutex_unlock( &cache->lock );
1787 }
1788 
1789 /****************************************************************************/
1790 /****************************************************************************/
1791 /*****                                                                  *****/
1792 /*****                                                                  *****/
1793 /*****                                                                  *****/
1794 /****************************************************************************/
1795 /****************************************************************************/
1796 
1797 static pthread_once_t        _res_cache_once = PTHREAD_ONCE_INIT;
1798 
1799 // Head of the list of caches.  Protected by _res_cache_list_lock.
1800 static struct resolv_cache_info _res_cache_list;
1801 
1802 // List of pid iface pairs
1803 static struct resolv_pidiface_info _res_pidiface_list;
1804 
1805 // List of uid iface pairs
1806 static struct resolv_uidiface_info _res_uidiface_list;
1807 
1808 // name of the current default inteface
1809 static char            _res_default_ifname[IF_NAMESIZE + 1];
1810 
1811 // lock protecting everything in the _resolve_cache_info structs (next ptr, etc)
1812 static pthread_mutex_t _res_cache_list_lock;
1813 
1814 // lock protecting the _res_pid_iface_list
1815 static pthread_mutex_t _res_pidiface_list_lock;
1816 
1817 // lock protecting the _res_uidiface_list
1818 static pthread_mutex_t _res_uidiface_list_lock;
1819 
1820 /* lookup the default interface name */
1821 static char *_get_default_iface_locked();
1822 /* find the first cache that has an associated interface and return the name of the interface */
1823 static char* _find_any_iface_name_locked( void );
1824 
1825 /* insert resolv_cache_info into the list of resolv_cache_infos */
1826 static void _insert_cache_info_locked(struct resolv_cache_info* cache_info);
1827 /* creates a resolv_cache_info */
1828 static struct resolv_cache_info* _create_cache_info( void );
1829 /* gets cache associated with an interface name, or NULL if none exists */
1830 static struct resolv_cache* _find_named_cache_locked(const char* ifname);
1831 /* gets a resolv_cache_info associated with an interface name, or NULL if not found */
1832 static struct resolv_cache_info* _find_cache_info_locked(const char* ifname);
1833 /* look up the named cache, and creates one if needed */
1834 static struct resolv_cache* _get_res_cache_for_iface_locked(const char* ifname);
1835 /* empty the named cache */
1836 static void _flush_cache_for_iface_locked(const char* ifname);
1837 /* empty the nameservers set for the named cache */
1838 static void _free_nameservers_locked(struct resolv_cache_info* cache_info);
1839 /* lookup the namserver for the name interface */
1840 static int _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen);
1841 /* lookup the addr of the nameserver for the named interface */
1842 static struct addrinfo* _get_nameserver_addr_locked(const char* ifname, int n);
1843 /* lookup the inteface's address */
1844 static struct in_addr* _get_addr_locked(const char * ifname);
1845 /* return 1 if the provided list of name servers differs from the list of name servers
1846  * currently attached to the provided cache_info */
1847 static int _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
1848         const char** servers, int numservers);
1849 /* remove a resolv_pidiface_info structure from _res_pidiface_list */
1850 static void _remove_pidiface_info_locked(int pid);
1851 /* get a resolv_pidiface_info structure from _res_pidiface_list with a certain pid */
1852 static struct resolv_pidiface_info* _get_pid_iface_info_locked(int pid);
1853 
1854 /* remove a resolv_pidiface_info structure from _res_uidiface_list */
1855 static int _remove_uidiface_info_locked(const char* iface, int uid_start, int uid_end);
1856 /* get a resolv_uidiface_info structure from _res_uidiface_list with a certain uid */
1857 static struct resolv_uidiface_info* _get_uid_iface_info_locked(int uid);
1858 
1859 static void
_res_cache_init(void)1860 _res_cache_init(void)
1861 {
1862     const char*  env = getenv(CONFIG_ENV);
1863 
1864     if (env && atoi(env) == 0) {
1865         /* the cache is disabled */
1866         return;
1867     }
1868 
1869     memset(&_res_default_ifname, 0, sizeof(_res_default_ifname));
1870     memset(&_res_cache_list, 0, sizeof(_res_cache_list));
1871     memset(&_res_pidiface_list, 0, sizeof(_res_pidiface_list));
1872     memset(&_res_uidiface_list, 0, sizeof(_res_uidiface_list));
1873     pthread_mutex_init(&_res_cache_list_lock, NULL);
1874     pthread_mutex_init(&_res_pidiface_list_lock, NULL);
1875     pthread_mutex_init(&_res_uidiface_list_lock, NULL);
1876 }
1877 
1878 struct resolv_cache*
__get_res_cache(const char * ifname)1879 __get_res_cache(const char* ifname)
1880 {
1881     struct resolv_cache *cache;
1882 
1883     pthread_once(&_res_cache_once, _res_cache_init);
1884     pthread_mutex_lock(&_res_cache_list_lock);
1885 
1886     char* iface;
1887     if (ifname == NULL || ifname[0] == '\0') {
1888         iface = _get_default_iface_locked();
1889         if (iface[0] == '\0') {
1890             char* tmp = _find_any_iface_name_locked();
1891             if (tmp) {
1892                 iface = tmp;
1893             }
1894         }
1895     } else {
1896         iface = (char *) ifname;
1897     }
1898 
1899     cache = _get_res_cache_for_iface_locked(iface);
1900 
1901     pthread_mutex_unlock(&_res_cache_list_lock);
1902     XLOG("_get_res_cache: iface = %s, cache=%p\n", iface, cache);
1903     return cache;
1904 }
1905 
1906 static struct resolv_cache*
_get_res_cache_for_iface_locked(const char * ifname)1907 _get_res_cache_for_iface_locked(const char* ifname)
1908 {
1909     if (ifname == NULL)
1910         return NULL;
1911 
1912     struct resolv_cache* cache = _find_named_cache_locked(ifname);
1913     if (!cache) {
1914         struct resolv_cache_info* cache_info = _create_cache_info();
1915         if (cache_info) {
1916             cache = _resolv_cache_create();
1917             if (cache) {
1918                 int len = sizeof(cache_info->ifname);
1919                 cache_info->cache = cache;
1920                 strncpy(cache_info->ifname, ifname, len - 1);
1921                 cache_info->ifname[len - 1] = '\0';
1922 
1923                 _insert_cache_info_locked(cache_info);
1924             } else {
1925                 free(cache_info);
1926             }
1927         }
1928     }
1929     return cache;
1930 }
1931 
1932 void
_resolv_cache_reset(unsigned generation)1933 _resolv_cache_reset(unsigned  generation)
1934 {
1935     XLOG("%s: generation=%d", __FUNCTION__, generation);
1936 
1937     pthread_once(&_res_cache_once, _res_cache_init);
1938     pthread_mutex_lock(&_res_cache_list_lock);
1939 
1940     char* ifname = _get_default_iface_locked();
1941     // if default interface not set then use the first cache
1942     // associated with an interface as the default one.
1943     // Note: Copied the code from __get_res_cache since this
1944     // method will be deleted/obsolete when cache per interface
1945     // implemented all over
1946     if (ifname[0] == '\0') {
1947         struct resolv_cache_info* cache_info = _res_cache_list.next;
1948         while (cache_info) {
1949             if (cache_info->ifname[0] != '\0') {
1950                 ifname = cache_info->ifname;
1951                 break;
1952             }
1953 
1954             cache_info = cache_info->next;
1955         }
1956     }
1957     struct resolv_cache* cache = _get_res_cache_for_iface_locked(ifname);
1958 
1959     if (cache != NULL) {
1960         pthread_mutex_lock( &cache->lock );
1961         if (cache->generation != generation) {
1962             _cache_flush_locked(cache);
1963             cache->generation = generation;
1964         }
1965         pthread_mutex_unlock( &cache->lock );
1966     }
1967 
1968     pthread_mutex_unlock(&_res_cache_list_lock);
1969 }
1970 
1971 void
_resolv_flush_cache_for_default_iface(void)1972 _resolv_flush_cache_for_default_iface(void)
1973 {
1974     char* ifname;
1975 
1976     pthread_once(&_res_cache_once, _res_cache_init);
1977     pthread_mutex_lock(&_res_cache_list_lock);
1978 
1979     ifname = _get_default_iface_locked();
1980     _flush_cache_for_iface_locked(ifname);
1981 
1982     pthread_mutex_unlock(&_res_cache_list_lock);
1983 }
1984 
1985 void
_resolv_flush_cache_for_iface(const char * ifname)1986 _resolv_flush_cache_for_iface(const char* ifname)
1987 {
1988     pthread_once(&_res_cache_once, _res_cache_init);
1989     pthread_mutex_lock(&_res_cache_list_lock);
1990 
1991     _flush_cache_for_iface_locked(ifname);
1992 
1993     pthread_mutex_unlock(&_res_cache_list_lock);
1994 }
1995 
1996 static void
_flush_cache_for_iface_locked(const char * ifname)1997 _flush_cache_for_iface_locked(const char* ifname)
1998 {
1999     struct resolv_cache* cache = _find_named_cache_locked(ifname);
2000     if (cache) {
2001         pthread_mutex_lock(&cache->lock);
2002         _cache_flush_locked(cache);
2003         pthread_mutex_unlock(&cache->lock);
2004     }
2005 }
2006 
2007 static struct resolv_cache_info*
_create_cache_info(void)2008 _create_cache_info(void)
2009 {
2010     struct resolv_cache_info*  cache_info;
2011 
2012     cache_info = calloc(sizeof(*cache_info), 1);
2013     return cache_info;
2014 }
2015 
2016 static void
_insert_cache_info_locked(struct resolv_cache_info * cache_info)2017 _insert_cache_info_locked(struct resolv_cache_info* cache_info)
2018 {
2019     struct resolv_cache_info* last;
2020 
2021     for (last = &_res_cache_list; last->next; last = last->next);
2022 
2023     last->next = cache_info;
2024 
2025 }
2026 
2027 static struct resolv_cache*
_find_named_cache_locked(const char * ifname)2028 _find_named_cache_locked(const char* ifname) {
2029 
2030     struct resolv_cache_info* info = _find_cache_info_locked(ifname);
2031 
2032     if (info != NULL) return info->cache;
2033 
2034     return NULL;
2035 }
2036 
2037 static struct resolv_cache_info*
_find_cache_info_locked(const char * ifname)2038 _find_cache_info_locked(const char* ifname)
2039 {
2040     if (ifname == NULL)
2041         return NULL;
2042 
2043     struct resolv_cache_info* cache_info = _res_cache_list.next;
2044 
2045     while (cache_info) {
2046         if (strcmp(cache_info->ifname, ifname) == 0) {
2047             break;
2048         }
2049 
2050         cache_info = cache_info->next;
2051     }
2052     return cache_info;
2053 }
2054 
2055 static char*
_get_default_iface_locked(void)2056 _get_default_iface_locked(void)
2057 {
2058 
2059     char* iface = _res_default_ifname;
2060 
2061     return iface;
2062 }
2063 
2064 static char*
_find_any_iface_name_locked(void)2065 _find_any_iface_name_locked( void ) {
2066     char* ifname = NULL;
2067 
2068     struct resolv_cache_info* cache_info = _res_cache_list.next;
2069     while (cache_info) {
2070         if (cache_info->ifname[0] != '\0') {
2071             ifname = cache_info->ifname;
2072             break;
2073         }
2074 
2075         cache_info = cache_info->next;
2076     }
2077 
2078     return ifname;
2079 }
2080 
2081 void
_resolv_set_default_iface(const char * ifname)2082 _resolv_set_default_iface(const char* ifname)
2083 {
2084     XLOG("_resolv_set_default_if ifname %s\n",ifname);
2085 
2086     pthread_once(&_res_cache_once, _res_cache_init);
2087     pthread_mutex_lock(&_res_cache_list_lock);
2088 
2089     int size = sizeof(_res_default_ifname);
2090     memset(_res_default_ifname, 0, size);
2091     strncpy(_res_default_ifname, ifname, size - 1);
2092     _res_default_ifname[size - 1] = '\0';
2093 
2094     pthread_mutex_unlock(&_res_cache_list_lock);
2095 }
2096 
2097 void
_resolv_set_nameservers_for_iface(const char * ifname,const char ** servers,int numservers,const char * domains)2098 _resolv_set_nameservers_for_iface(const char* ifname, const char** servers, int numservers,
2099         const char *domains)
2100 {
2101     int i, rt, index;
2102     struct addrinfo hints;
2103     char sbuf[NI_MAXSERV];
2104     register char *cp;
2105     int *offset;
2106 
2107     pthread_once(&_res_cache_once, _res_cache_init);
2108     pthread_mutex_lock(&_res_cache_list_lock);
2109 
2110     // creates the cache if not created
2111     _get_res_cache_for_iface_locked(ifname);
2112 
2113     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2114 
2115     if (cache_info != NULL &&
2116             !_resolv_is_nameservers_equal_locked(cache_info, servers, numservers)) {
2117         // free current before adding new
2118         _free_nameservers_locked(cache_info);
2119 
2120         memset(&hints, 0, sizeof(hints));
2121         hints.ai_family = PF_UNSPEC;
2122         hints.ai_socktype = SOCK_DGRAM; /*dummy*/
2123         hints.ai_flags = AI_NUMERICHOST;
2124         sprintf(sbuf, "%u", NAMESERVER_PORT);
2125 
2126         index = 0;
2127         for (i = 0; i < numservers && i < MAXNS; i++) {
2128             rt = getaddrinfo(servers[i], sbuf, &hints, &cache_info->nsaddrinfo[index]);
2129             if (rt == 0) {
2130                 cache_info->nameservers[index] = strdup(servers[i]);
2131                 index++;
2132                 XLOG("_resolv_set_nameservers_for_iface: iface = %s, addr = %s\n",
2133                         ifname, servers[i]);
2134             } else {
2135                 cache_info->nsaddrinfo[index] = NULL;
2136             }
2137         }
2138 
2139         // code moved from res_init.c, load_domain_search_list
2140         strlcpy(cache_info->defdname, domains, sizeof(cache_info->defdname));
2141         if ((cp = strchr(cache_info->defdname, '\n')) != NULL)
2142             *cp = '\0';
2143         cp = cache_info->defdname;
2144         offset = cache_info->dnsrch_offset;
2145         while (offset < cache_info->dnsrch_offset + MAXDNSRCH) {
2146             while (*cp == ' ' || *cp == '\t') /* skip leading white space */
2147                 cp++;
2148             if (*cp == '\0') /* stop if nothing more to do */
2149                 break;
2150             *offset++ = cp - cache_info->defdname; /* record this search domain */
2151             while (*cp) { /* zero-terminate it */
2152                 if (*cp == ' '|| *cp == '\t') {
2153                     *cp++ = '\0';
2154                     break;
2155                 }
2156                 cp++;
2157             }
2158         }
2159         *offset = -1; /* cache_info->dnsrch_offset has MAXDNSRCH+1 items */
2160 
2161         // flush cache since new settings
2162         _flush_cache_for_iface_locked(ifname);
2163 
2164     }
2165 
2166     pthread_mutex_unlock(&_res_cache_list_lock);
2167 }
2168 
2169 static int
_resolv_is_nameservers_equal_locked(struct resolv_cache_info * cache_info,const char ** servers,int numservers)2170 _resolv_is_nameservers_equal_locked(struct resolv_cache_info* cache_info,
2171         const char** servers, int numservers)
2172 {
2173     int i;
2174     char** ns;
2175     int equal = 1;
2176 
2177     // compare each name server against current name servers
2178     if (numservers > MAXNS) numservers = MAXNS;
2179     for (i = 0; i < numservers && equal; i++) {
2180         ns = cache_info->nameservers;
2181         equal = 0;
2182         while(*ns) {
2183             if (strcmp(*ns, servers[i]) == 0) {
2184                 equal = 1;
2185                 break;
2186             }
2187             ns++;
2188         }
2189     }
2190 
2191     return equal;
2192 }
2193 
2194 static void
_free_nameservers_locked(struct resolv_cache_info * cache_info)2195 _free_nameservers_locked(struct resolv_cache_info* cache_info)
2196 {
2197     int i;
2198     for (i = 0; i <= MAXNS; i++) {
2199         free(cache_info->nameservers[i]);
2200         cache_info->nameservers[i] = NULL;
2201         if (cache_info->nsaddrinfo[i] != NULL) {
2202             freeaddrinfo(cache_info->nsaddrinfo[i]);
2203             cache_info->nsaddrinfo[i] = NULL;
2204         }
2205     }
2206 }
2207 
2208 int
_resolv_cache_get_nameserver(int n,char * addr,int addrLen)2209 _resolv_cache_get_nameserver(int n, char* addr, int addrLen)
2210 {
2211     char *ifname;
2212     int result = 0;
2213 
2214     pthread_once(&_res_cache_once, _res_cache_init);
2215     pthread_mutex_lock(&_res_cache_list_lock);
2216 
2217     ifname = _get_default_iface_locked();
2218     result = _get_nameserver_locked(ifname, n, addr, addrLen);
2219 
2220     pthread_mutex_unlock(&_res_cache_list_lock);
2221     return result;
2222 }
2223 
2224 static int
_get_nameserver_locked(const char * ifname,int n,char * addr,int addrLen)2225 _get_nameserver_locked(const char* ifname, int n, char* addr, int addrLen)
2226 {
2227     int len = 0;
2228     char* ns;
2229     struct resolv_cache_info* cache_info;
2230 
2231     if (n < 1 || n > MAXNS || !addr)
2232         return 0;
2233 
2234     cache_info = _find_cache_info_locked(ifname);
2235     if (cache_info) {
2236         ns = cache_info->nameservers[n - 1];
2237         if (ns) {
2238             len = strlen(ns);
2239             if (len < addrLen) {
2240                 strncpy(addr, ns, len);
2241                 addr[len] = '\0';
2242             } else {
2243                 len = 0;
2244             }
2245         }
2246     }
2247 
2248     return len;
2249 }
2250 
2251 struct addrinfo*
_cache_get_nameserver_addr(int n)2252 _cache_get_nameserver_addr(int n)
2253 {
2254     struct addrinfo *result;
2255     char* ifname;
2256 
2257     pthread_once(&_res_cache_once, _res_cache_init);
2258     pthread_mutex_lock(&_res_cache_list_lock);
2259 
2260     ifname = _get_default_iface_locked();
2261 
2262     result = _get_nameserver_addr_locked(ifname, n);
2263     pthread_mutex_unlock(&_res_cache_list_lock);
2264     return result;
2265 }
2266 
2267 static struct addrinfo*
_get_nameserver_addr_locked(const char * ifname,int n)2268 _get_nameserver_addr_locked(const char* ifname, int n)
2269 {
2270     struct addrinfo* ai = NULL;
2271     struct resolv_cache_info* cache_info;
2272 
2273     if (n < 1 || n > MAXNS)
2274         return NULL;
2275 
2276     cache_info = _find_cache_info_locked(ifname);
2277     if (cache_info) {
2278         ai = cache_info->nsaddrinfo[n - 1];
2279     }
2280     return ai;
2281 }
2282 
2283 void
_resolv_set_addr_of_iface(const char * ifname,struct in_addr * addr)2284 _resolv_set_addr_of_iface(const char* ifname, struct in_addr* addr)
2285 {
2286     pthread_once(&_res_cache_once, _res_cache_init);
2287     pthread_mutex_lock(&_res_cache_list_lock);
2288     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2289     if (cache_info) {
2290         memcpy(&cache_info->ifaddr, addr, sizeof(*addr));
2291 
2292         if (DEBUG) {
2293             XLOG("address of interface %s is %s\n",
2294                     ifname, inet_ntoa(cache_info->ifaddr));
2295         }
2296     }
2297     pthread_mutex_unlock(&_res_cache_list_lock);
2298 }
2299 
2300 struct in_addr*
_resolv_get_addr_of_default_iface(void)2301 _resolv_get_addr_of_default_iface(void)
2302 {
2303     struct in_addr* ai = NULL;
2304     char* ifname;
2305 
2306     pthread_once(&_res_cache_once, _res_cache_init);
2307     pthread_mutex_lock(&_res_cache_list_lock);
2308     ifname = _get_default_iface_locked();
2309     ai = _get_addr_locked(ifname);
2310     pthread_mutex_unlock(&_res_cache_list_lock);
2311 
2312     return ai;
2313 }
2314 
2315 struct in_addr*
_resolv_get_addr_of_iface(const char * ifname)2316 _resolv_get_addr_of_iface(const char* ifname)
2317 {
2318     struct in_addr* ai = NULL;
2319 
2320     pthread_once(&_res_cache_once, _res_cache_init);
2321     pthread_mutex_lock(&_res_cache_list_lock);
2322     ai =_get_addr_locked(ifname);
2323     pthread_mutex_unlock(&_res_cache_list_lock);
2324     return ai;
2325 }
2326 
2327 static struct in_addr*
_get_addr_locked(const char * ifname)2328 _get_addr_locked(const char * ifname)
2329 {
2330     struct resolv_cache_info* cache_info = _find_cache_info_locked(ifname);
2331     if (cache_info) {
2332         return &cache_info->ifaddr;
2333     }
2334     return NULL;
2335 }
2336 
2337 static void
_remove_pidiface_info_locked(int pid)2338 _remove_pidiface_info_locked(int pid) {
2339     struct resolv_pidiface_info* result = &_res_pidiface_list;
2340     struct resolv_pidiface_info* prev = NULL;
2341 
2342     while (result != NULL && result->pid != pid) {
2343         prev = result;
2344         result = result->next;
2345     }
2346     if (prev != NULL && result != NULL) {
2347         prev->next = result->next;
2348         free(result);
2349     }
2350 }
2351 
2352 static struct resolv_pidiface_info*
_get_pid_iface_info_locked(int pid)2353 _get_pid_iface_info_locked(int pid)
2354 {
2355     struct resolv_pidiface_info* result = &_res_pidiface_list;
2356     while (result != NULL && result->pid != pid) {
2357         result = result->next;
2358     }
2359 
2360     return result;
2361 }
2362 
2363 void
_resolv_set_iface_for_pid(const char * ifname,int pid)2364 _resolv_set_iface_for_pid(const char* ifname, int pid)
2365 {
2366     // make sure the pid iface list is created
2367     pthread_once(&_res_cache_once, _res_cache_init);
2368     pthread_mutex_lock(&_res_pidiface_list_lock);
2369 
2370     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2371     if (!pidiface_info) {
2372         pidiface_info = calloc(sizeof(*pidiface_info), 1);
2373         if (pidiface_info) {
2374             pidiface_info->pid = pid;
2375             int len = sizeof(pidiface_info->ifname);
2376             strncpy(pidiface_info->ifname, ifname, len - 1);
2377             pidiface_info->ifname[len - 1] = '\0';
2378 
2379             pidiface_info->next = _res_pidiface_list.next;
2380             _res_pidiface_list.next = pidiface_info;
2381 
2382             XLOG("_resolv_set_iface_for_pid: pid %d , iface %s\n", pid, ifname);
2383         } else {
2384             XLOG("_resolv_set_iface_for_pid failing calloc");
2385         }
2386     }
2387 
2388     pthread_mutex_unlock(&_res_pidiface_list_lock);
2389 }
2390 
2391 void
_resolv_clear_iface_for_pid(int pid)2392 _resolv_clear_iface_for_pid(int pid)
2393 {
2394     pthread_once(&_res_cache_once, _res_cache_init);
2395     pthread_mutex_lock(&_res_pidiface_list_lock);
2396 
2397     _remove_pidiface_info_locked(pid);
2398 
2399     XLOG("_resolv_clear_iface_for_pid: pid %d\n", pid);
2400 
2401     pthread_mutex_unlock(&_res_pidiface_list_lock);
2402 }
2403 
2404 int
_resolv_get_pids_associated_interface(int pid,char * buff,int buffLen)2405 _resolv_get_pids_associated_interface(int pid, char* buff, int buffLen)
2406 {
2407     int len = 0;
2408 
2409     if (!buff) {
2410         return -1;
2411     }
2412 
2413     pthread_once(&_res_cache_once, _res_cache_init);
2414     pthread_mutex_lock(&_res_pidiface_list_lock);
2415 
2416     struct resolv_pidiface_info* pidiface_info = _get_pid_iface_info_locked(pid);
2417     buff[0] = '\0';
2418     if (pidiface_info) {
2419         len = strlen(pidiface_info->ifname);
2420         if (len < buffLen) {
2421             strncpy(buff, pidiface_info->ifname, len);
2422             buff[len] = '\0';
2423         }
2424     }
2425 
2426     XLOG("_resolv_get_pids_associated_interface buff: %s\n", buff);
2427 
2428     pthread_mutex_unlock(&_res_pidiface_list_lock);
2429 
2430     return len;
2431 }
2432 
2433 static int
_remove_uidiface_info_locked(const char * ifname,int uid_start,int uid_end)2434 _remove_uidiface_info_locked(const char* ifname, int uid_start, int uid_end) {
2435     struct resolv_uidiface_info* result = _res_uidiface_list.next;
2436     struct resolv_uidiface_info* prev = &_res_uidiface_list;
2437     while (result != NULL && !(result->uid_start == uid_start && result->uid_end == uid_end &&
2438             !strcmp(result->ifname, ifname))) {
2439         prev = result;
2440         result = result->next;
2441     }
2442     if (prev != NULL && result != NULL) {
2443         prev->next = result->next;
2444         free(result);
2445         return 0;
2446     }
2447     errno = EINVAL;
2448     return -1;
2449 }
2450 
2451 static struct resolv_uidiface_info*
_get_uid_iface_info_locked(int uid)2452 _get_uid_iface_info_locked(int uid)
2453 {
2454     struct resolv_uidiface_info* result = _res_uidiface_list.next;
2455     while (result != NULL && !(result->uid_start <= uid && result->uid_end >= uid)) {
2456         result = result->next;
2457     }
2458 
2459     return result;
2460 }
2461 
2462 void
_resolv_clear_iface_uid_range_mapping()2463 _resolv_clear_iface_uid_range_mapping()
2464 {
2465     pthread_once(&_res_cache_once, _res_cache_init);
2466     pthread_mutex_lock(&_res_uidiface_list_lock);
2467     struct resolv_uidiface_info *current = _res_uidiface_list.next;
2468     struct resolv_uidiface_info *next;
2469     while (current != NULL) {
2470         next = current->next;
2471         free(current);
2472         current = next;
2473     }
2474     _res_uidiface_list.next = NULL;
2475     pthread_mutex_unlock(&_res_uidiface_list_lock);
2476 }
2477 
2478 void
_resolv_clear_iface_pid_mapping()2479 _resolv_clear_iface_pid_mapping()
2480 {
2481     pthread_once(&_res_cache_once, _res_cache_init);
2482     pthread_mutex_lock(&_res_pidiface_list_lock);
2483     struct resolv_pidiface_info *current = _res_pidiface_list.next;
2484     struct resolv_pidiface_info *next;
2485     while (current != NULL) {
2486         next = current->next;
2487         free(current);
2488         current = next;
2489     }
2490     _res_pidiface_list.next = NULL;
2491     pthread_mutex_unlock(&_res_pidiface_list_lock);
2492 }
2493 
2494 int
_resolv_set_iface_for_uid_range(const char * ifname,int uid_start,int uid_end)2495 _resolv_set_iface_for_uid_range(const char* ifname, int uid_start, int uid_end)
2496 {
2497     int rv = 0;
2498     struct resolv_uidiface_info* uidiface_info;
2499     // make sure the uid iface list is created
2500     pthread_once(&_res_cache_once, _res_cache_init);
2501     if (uid_start > uid_end) {
2502         errno = EINVAL;
2503         return -1;
2504     }
2505     pthread_mutex_lock(&_res_uidiface_list_lock);
2506     uidiface_info = calloc(sizeof(*uidiface_info), 1);
2507     if (uidiface_info) {
2508         uidiface_info->uid_start = uid_start;
2509         uidiface_info->uid_end = uid_end;
2510         int len = sizeof(uidiface_info->ifname);
2511         strncpy(uidiface_info->ifname, ifname, len - 1);
2512         uidiface_info->ifname[len - 1] = '\0';
2513 
2514         uidiface_info->next = _res_uidiface_list.next;
2515         _res_uidiface_list.next = uidiface_info;
2516 
2517         XLOG("_resolv_set_iface_for_uid_range: [%d,%d], iface %s\n", uid_start, uid_end,
2518                 ifname);
2519     } else {
2520         XLOG("_resolv_set_iface_for_uid_range failing calloc\n");
2521         rv = -1;
2522         errno = EINVAL;
2523     }
2524 
2525     pthread_mutex_unlock(&_res_uidiface_list_lock);
2526     return rv;
2527 }
2528 
2529 int
_resolv_clear_iface_for_uid_range(const char * ifname,int uid_start,int uid_end)2530 _resolv_clear_iface_for_uid_range(const char* ifname, int uid_start, int uid_end)
2531 {
2532     pthread_once(&_res_cache_once, _res_cache_init);
2533     pthread_mutex_lock(&_res_uidiface_list_lock);
2534 
2535     int rv = _remove_uidiface_info_locked(ifname, uid_start, uid_end);
2536 
2537     XLOG("_resolv_clear_iface_for_uid_range: [%d,%d] iface %s\n", uid_start, uid_end, ifname);
2538 
2539     pthread_mutex_unlock(&_res_uidiface_list_lock);
2540 
2541     return rv;
2542 }
2543 
2544 int
_resolv_get_uids_associated_interface(int uid,char * buff,int buffLen)2545 _resolv_get_uids_associated_interface(int uid, char* buff, int buffLen)
2546 {
2547     int len = 0;
2548 
2549     if (!buff) {
2550         return -1;
2551     }
2552 
2553     pthread_once(&_res_cache_once, _res_cache_init);
2554     pthread_mutex_lock(&_res_uidiface_list_lock);
2555 
2556     struct resolv_uidiface_info* uidiface_info = _get_uid_iface_info_locked(uid);
2557     buff[0] = '\0';
2558     if (uidiface_info) {
2559         len = strlen(uidiface_info->ifname);
2560         if (len < buffLen) {
2561             strncpy(buff, uidiface_info->ifname, len);
2562             buff[len] = '\0';
2563         }
2564     }
2565 
2566     XLOG("_resolv_get_uids_associated_interface buff: %s\n", buff);
2567 
2568     pthread_mutex_unlock(&_res_uidiface_list_lock);
2569 
2570     return len;
2571 }
2572 
2573 size_t
_resolv_get_default_iface(char * buff,size_t buffLen)2574 _resolv_get_default_iface(char* buff, size_t buffLen)
2575 {
2576     if (!buff || buffLen == 0) {
2577         return 0;
2578     }
2579 
2580     pthread_once(&_res_cache_once, _res_cache_init);
2581     pthread_mutex_lock(&_res_cache_list_lock);
2582 
2583     char* ifname = _get_default_iface_locked(); // never null, but may be empty
2584 
2585     // if default interface not set give up.
2586     if (ifname[0] == '\0') {
2587         pthread_mutex_unlock(&_res_cache_list_lock);
2588         return 0;
2589     }
2590 
2591     size_t len = strlen(ifname);
2592     if (len < buffLen) {
2593         strncpy(buff, ifname, len);
2594         buff[len] = '\0';
2595     } else {
2596         buff[0] = '\0';
2597     }
2598 
2599     pthread_mutex_unlock(&_res_cache_list_lock);
2600 
2601     return len;
2602 }
2603 
2604 void
_resolv_populate_res_for_iface(res_state statp)2605 _resolv_populate_res_for_iface(res_state statp)
2606 {
2607     if (statp == NULL) {
2608         return;
2609     }
2610 
2611     if (statp->iface[0] == '\0') { // no interface set assign default
2612         size_t if_len = _resolv_get_default_iface(statp->iface, sizeof(statp->iface));
2613         if (if_len + 1 > sizeof(statp->iface)) {
2614             XLOG("%s: INTERNAL_ERROR: can't fit interface name into statp->iface.\n", __FUNCTION__);
2615             return;
2616         }
2617         if (if_len == 0) {
2618             XLOG("%s: INTERNAL_ERROR: can't find any suitable interfaces.\n", __FUNCTION__);
2619             return;
2620         }
2621     }
2622 
2623     pthread_once(&_res_cache_once, _res_cache_init);
2624     pthread_mutex_lock(&_res_cache_list_lock);
2625 
2626     struct resolv_cache_info* info = _find_cache_info_locked(statp->iface);
2627     if (info != NULL) {
2628         int nserv;
2629         struct addrinfo* ai;
2630         XLOG("_resolv_populate_res_for_iface: %s\n", statp->iface);
2631         for (nserv = 0; nserv < MAXNS; nserv++) {
2632             ai = info->nsaddrinfo[nserv];
2633             if (ai == NULL) {
2634                 break;
2635             }
2636 
2637             if ((size_t) ai->ai_addrlen <= sizeof(statp->_u._ext.ext->nsaddrs[0])) {
2638                 if (statp->_u._ext.ext != NULL) {
2639                     memcpy(&statp->_u._ext.ext->nsaddrs[nserv], ai->ai_addr, ai->ai_addrlen);
2640                     statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2641                 } else {
2642                     if ((size_t) ai->ai_addrlen
2643                             <= sizeof(statp->nsaddr_list[0])) {
2644                         memcpy(&statp->nsaddr_list[nserv], ai->ai_addr,
2645                                 ai->ai_addrlen);
2646                     } else {
2647                         statp->nsaddr_list[nserv].sin_family = AF_UNSPEC;
2648                     }
2649                 }
2650             } else {
2651                 XLOG("_resolv_populate_res_for_iface found too long addrlen");
2652             }
2653         }
2654         statp->nscount = nserv;
2655         // now do search domains.  Note that we cache the offsets as this code runs alot
2656         // but the setting/offset-computer only runs when set/changed
2657         strlcpy(statp->defdname, info->defdname, sizeof(statp->defdname));
2658         register char **pp = statp->dnsrch;
2659         register int *p = info->dnsrch_offset;
2660         while (pp < statp->dnsrch + MAXDNSRCH && *p != -1) {
2661             *pp++ = &statp->defdname + *p++;
2662         }
2663     }
2664     pthread_mutex_unlock(&_res_cache_list_lock);
2665 }
2666